commit
stringlengths
40
40
subject
stringlengths
1
1.49k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
new_contents
stringlengths
1
29.8k
old_contents
stringlengths
0
9.9k
lang
stringclasses
3 values
proba
float64
0
1
0db094aba5095b63a8f9bfb066afb0048617f87e
add update_GeneAtlas_images.py
scheduled_bots/scripts/update_GeneAtlas_images.py
scheduled_bots/scripts/update_GeneAtlas_images.py
""" One off script to change GeneAtlas images to point to full-sized versions https://github.com/SuLab/GeneWikiCentral/issues/1 As described at https://www.wikidata.org/wiki/Property_talk:P692#How_about_using_full_size_image_instead_of_small_thumbnail.3F update all uses of the Gene Atlas Image property to use the full-sized version of the Gene Atlas image (e.g., https://www.wikidata.org/wiki/File:PBB_GE_ACTN3_206891_at_fs.png) instead of the thumbnail (e.g., https://www.wikidata.org/wiki/File:PBB_GE_ACTN3_206891_at_tn.png) SELECT ?item ?image WHERE { ?item wdt:P351 ?entrez . ?item wdt:P703 wd:Q15978631 . ?item wdt:P692 ?image } limit 1000 """ from collections import defaultdict from scheduled_bots.local import WDPASS, WDUSER from tqdm import tqdm from wikidataintegrator import wdi_core, wdi_login, wdi_helpers import urllib.request login = wdi_login.WDLogin(WDUSER, WDPASS) image_qid = wdi_helpers.id_mapper("P692", [("P703", "Q15978631")]) qid_images = defaultdict(list) for image, qid in image_qid.items(): qid_images[qid].append(image) qid_images = dict(qid_images) for qid, images in tqdm(qid_images.items()): images = [urllib.request.unquote(image.replace("http://commons.wikimedia.org/wiki/Special:FilePath/", "")) for image in images] images_proc = [image for image in images if image.startswith("PBB GE") and image.endswith("at tn.png")] if not images_proc: continue images_keep = [image for image in images if image.startswith("PBB GE") and image.endswith("at fs.png")] item = wdi_core.WDItemEngine(wd_item_id=qid) s = [] for image in images_proc: s.append(wdi_core.WDCommonsMedia(image.replace(" at tn.png", " at fs.png"), "P692")) for image in images_keep: s.append(wdi_core.WDCommonsMedia(image, "P692")) item.update(data=s) wdi_helpers.try_write(item, '', '', login, edit_summary="replace thumbnail gene atlas image with fs")
Python
0.000001
427a95f0c56facc138448cde7e7b9da1bcdc8ea4
Add super basic Hypothesis example
add_example.py
add_example.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # Unit Tests def test_add_zero(): assert 0 + 1 == 1 + 0 def test_add_single_digits(): assert 1 + 2 == 2 + 1 def test_add_double_digits(): assert 10 + 12 == 12 + 10 # Property-based Test from hypothesis import given import hypothesis.strategies as st @given(st.integers(), st.integers()) def test_add(x, y): assert x + y == y + x
Python
0.001464
3a4cb29e91008225c057feb3811e93b59f99d941
use flask-mail
application.py
application.py
from flask import Flask from flask.ext.mail import Mail, Message mail = Mail() app = Flask(__name__) app.config.update( MAIL_SERVER='smtp.gmail.com', MAIL_PORT='465', MAIL_USE_SSL=True, MAIL_USERNAME='nokbar@voltaire.sh', MAIL_PASSWORD='H3rpD3rpL0l') mail.init_app(app) @app.route("/") def index(): msg = Message("Hello", sender=("flask", "flask@voltaire.sh"), recipients=["sjchen@sjchen.net"]) msg.body = "testing" msg.html = "<b>testing</b>" mail.send(msg) return msg.html if __name__ == '__main__': app.run()
Python
0.000001
21a504dce25a1b22bda27cd74a443af98b24ad14
Add pseudo filter combining pypandoc and panflute
filters/extract_urls.py
filters/extract_urls.py
import io import pypandoc import panflute def prepare(doc): doc.images = [] doc.links = [] def action(elem, doc): if isinstance(elem, panflute.Image): doc.images.append(elem) elif isinstance(elem, panflute.Link): doc.links.append(elem) if __name__ == '__main__': data = pypandoc.convert_file('example.md', 'json') f = io.StringIO(data) doc = panflute.load(f) doc = panflute.run_filter(action, prepare=prepare, doc=doc) print("\nImages:") for image in doc.images: print(image.url) print("\nLinks:") for link in doc.links: print(link.url)
Python
0
b811bb9e9469a23921f841d4bfe3b52928a83e14
Create b.py
at/abc126/b.py
at/abc126/b.py
read = input s = read() a, b = map(int , [s[:2], s[2:]]) YYMM = False MMYY = False if 1 <= b and b <= 12: YYMM = True if 1 <= a and a <= 12: MMYY = True if YYMM and MMYY : print('AMBIGUOUS') elif YYMM and not MMYY: print('YYMM') elif not YYMM and MMYY: print('MMYY') else : print('NA')
Python
0.000018
32c025a217f7771be94976fda6ede2d80855b4b6
Move things to new units module
pyatmlab/units.py
pyatmlab/units.py
"""Various units-related things """ from pint import (UnitRegistry, Context) ureg = UnitRegistry() ureg.define("micro- = 1e-6 = µ-") # aid conversion between different radiance units sp2 = Context("radiance") sp2.add_transformation( "[length] * [mass] / [time] ** 3", "[mass] / [time] ** 2", lambda ureg, x: x / ureg.speed_of_light) sp2.add_transformation( "[mass] / [time] ** 2", "[length] * [mass] / [time] ** 3", lambda ureg, x: x * ureg.speed_of_light) ureg.add_context(sp2) radiance_units = { "si": ureg.W/(ureg.m**2*ureg.sr*ureg.Hz), "ir": ureg.mW/(ureg.m**2*ureg.sr*(1/ureg.cm))}
Python
0.000001
50494947bdf7fc8fce50cb5f589c84fd48db4b05
test perm using py.test #1150
login/tests/fixture.py
login/tests/fixture.py
# -*- encoding: utf-8 -*- import pytest from login.tests.factories import ( TEST_PASSWORD, UserFactory, ) class PermTest: def __init__(self, client): setup_users() self.client = client def anon(self, url): self.client.logout() response = self.client.get(url) assert 200 == response.status_code def staff(self, url): # check anon user cannot login self.client.logout() response = self.client.get(url) assert 302 == response.status_code assert 'accounts/login' in response['Location'] # check web user cannot login assert self.client.login(username='web', password=TEST_PASSWORD) assert 302 == response.status_code assert 'accounts/login' in response['Location'] # check staff user can login assert self.client.login(username='staff', password=TEST_PASSWORD) response = self.client.get(url) assert 200 == response.status_code @pytest.fixture def perm_check(client): """Check permissions on a URL. We use a clever trick to pass parameters to the fixture. For details: py.test: Pass a parameter to a fixture function http://stackoverflow.com/questions/18011902/py-test-pass-a-parameter-to-a-fixture-function """ return PermTest(client) def setup_users(): """Using factories - set-up users for permissions test cases.""" UserFactory( username='admin', email='admin@pkimber.net', is_staff=True, is_superuser=True ) UserFactory(username='staff', email='staff@pkimber.net', is_staff=True) UserFactory( username='web', email='web@pkimber.net', first_name='William', last_name='Webber' )
Python
0
1a98ccfbff406509d9290e76bbdf8edbb862fc1d
Solve orderred dict
python/py-collections-ordereddict.py
python/py-collections-ordereddict.py
from collections import OrderedDict d = OrderedDict() number_of_items = int(input().strip()) for i in range(number_of_items): item, delimeter, price = input().strip().rpartition(" ") price = int(price) if (item in d): previous_total_purchased = d.get(item) next_total_purchased = previous_total_purchased + price d[item] = next_total_purchased else: d[item] = price for item, price in d.items(): print (f'{item} {price}')
Python
0.999999
af278f4b5ac93c4dc8407e484b7563c6fdf7c6b5
add bin/autopac.py
bin/autopac.py
bin/autopac.py
#!/usr/bin/env python import ConfigParser import os import re import subprocess import sys CONF_PATH_LIST = [ '~/.autopac.ini', '~/.autopac/autopac.ini', '/etc/autopac.ini', '/etc/autopac/autopac.ini', '/autopac.ini', ] class CommandFailed(Exception): MESSAGE_TMPL = ( ">>> Error occurred while executing command: %(command)r\n" ">>> Return code: %(return_code)d\n" ">>> Outputs:\n" "%(output)s" ) def __init__(self, command, return_code, output): self.command = command self.return_code = return_code self.output = output def print_exc(self): sys.stderr.write(self.MESSAGE_TMPL % { 'command': self.command, 'return_code': self.return_code, 'output': self.output, }) class NetworkSetup(object): BASE_COMMAND = 'networksetup' DEFAULT_SECTION = 'AutopacDefaults' PAC_URL_COLUMN = 'pac_url' STATIC_ADDR_COLUMN = 'static_addr' RE_GET_AIRPORT_NETWORK = re.compile( r'Current\sWi-Fi\sNetwork:\s(?P<airport_network>[^\n]+)', re.I) RE_GET_STATUS = re.compile( r'Port:\s*(?P<network_service>[^\n]*)\nDevice:\s*(?P<device>[^\n]*)', re.I) def __init__(self, config_parser): self.cp = config_parser # set default values self.default_pac_url = None self.default_static_addr = None if self.cp.has_section(self.DEFAULT_SECTION): self.default_pac_url = self.cp.get(self.DEFAULT_SECTION, self.PAC_URL_COLUMN) self.default_static_addr = self.cp.get(self.DEFAULT_SECTION, self.STATIC_ADDR_COLUMN) def _execute(self, command, *args): cmd = [ self.BASE_COMMAND, '-%s' % command ] cmd.extend(args) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) retval = p.wait() output = p.stdout.read() if retval == 0: return output else: raise CommandFailed(" ".join(cmd), retval, output) def _get_airport_network(self, device): output = self._execute('getairportnetwork', device) match_result = self.RE_GET_AIRPORT_NETWORK.match(output) if match_result: return match_result.groupdict().get('airport_network') def get_status(self): output = self._execute('listallhardwareports') for network_service, device in self.RE_GET_STATUS.findall(output): try: airport_network = self._get_airport_network(device) except CommandFailed: continue else: return network_service, device, airport_network return None, None, None def get_target_conf(self, airport_network): pac_url = self.default_pac_url static_addr = self.default_static_addr if self.cp.has_section(airport_network): pac_url = self.cp.get(airport_network, self.PAC_URL_COLUMN) static_addr = self.cp.get(airport_network, self.STATIC_ADDR_COLUMN) return pac_url, static_addr def set_autoproxy_state(self, network_service, state): """ networksetup -setautoproxystate $network_service $state """ self._execute('setautoproxystate', network_service, state) sys.stdout.write(">>> Set autoproxy state of %r to %r\n" % (network_service, state)) def setautoproxyurl(self, network_service, pac_url): """ networksetup -setautoproxyurl $network_service $pac_url """ self._execute('setautoproxyurl', network_service, pac_url) sys.stdout.write(">>> Set autoproxy url of %r to %r\n" % (network_service, pac_url)) def set_dhcp(self, network_service): """ networksetup -setdhcp $network_service """ self._execute('setdhcp', network_service) sys.stdout.write(">>> Set dhcp for %r\n" % network_service) def set_manual_with_dhcp_router(self, network_service, static_addr): """ networksetup -setmanualwithdhcprouter $network_service $static_addr """ self._execute('setmanualwithdhcprouter', network_service, static_addr) sys.stdout.write(">>> Set static addr for %r to %r\n" % (network_service, static_addr)) def auto_setup(self): network_service, _, airport_network = self.get_status() if airport_network is None: return False, 'Airport device not found.' else: pac_url, static_addr = self.get_target_conf(airport_network) if pac_url: self.setautoproxyurl(network_service, pac_url) else: self.set_autoproxy_state(network_service, 'off') if static_addr: self.set_manual_with_dhcp_router(network_service, static_addr) else: self.set_dhcp(network_service) return True, None if __name__ == '__main__': conf_abspath_list = map(lambda x: os.path.expanduser(x), CONF_PATH_LIST) cp = ConfigParser.ConfigParser() cp.read(conf_abspath_list) networksetup = NetworkSetup(cp) try: is_succeeded, message = networksetup.auto_setup() except CommandFailed as e: e.print_exc() sys.exit(1) else: if not is_succeeded: sys.stderr.write('>>> Error: %s\n' % message) sys.exit(1)
Python
0.000001
fa4155114304d1ebc9e3bb04f546ce7d4708c381
Add simple pipeline
pykit/pipeline.py
pykit/pipeline.py
# -*- coding: utf-8 -*- """ Pipeline that determines phase ordering and execution. """ from __future__ import print_function, division, absolute_import import types cpy = { 'lower_convert': lower_convert, } lower = { } # ______________________________________________________________________ # Execute pipeline def apply_transform(transform, func, env): if isinstance(transform, types.ModuleType): return transform.run(func, env) else: return transform(func, env) def run(transforms, order, func, env): for transform in order: if transform in transforms: func, env = apply_transform(transforms[transform], func, env)
Python
0.000001
141005c72b1686d73cdc581e9ee8313529e11e4c
Add health check script.
tools/health-check.py
tools/health-check.py
#!/usr/bin/python # Health check script that examines the /status/ URI and sends mail on any # condition other than 200/OK. # Configuration is via environment variables: # * POWERMON_STATUS - absolute URL to /status/ URI # * POWERMON_SMTPHOST - SMTP host name used to send mail # * POWERMON_MAILTO - email address where problem reports are sent from email.mime.text import MIMEText from httplib import HTTPConnection, HTTPSConnection from os import environ from os.path import basename from smtplib import SMTP from urlparse import urlparse import sys def getenvars(*vars): """Returns the values of one or more environment variables.""" values = [] for var in vars: if not var in environ: die('%s environment variable not defined' % var) values.append(environ[var]) return tuple(values) def die_err(e, message): """Displays exception details and a message then exits program.""" print message print e sys.exit(1) def die(message): """Displays a message then exits program.""" print message sys.exit(1) def http_get(url): """Returns the tuple (status, response body) for a GET request to the given URL.""" conn = None headers = { 'Accept': 'text/plain, text/html, text/xml', 'Content-Length': 0, 'User-Agent': 'Python/%s.%s.%s' % sys.version_info[0:3] } result = urlparse(url) try : if result.scheme == 'https': conn = HTTPSConnection(result.netloc) else: conn = HTTPConnection(result.netloc) conn.request('GET', url, "", headers) response = conn.getresponse() return (response.status, str(response.read())) except Exception, e: die_err(e, 'HTTP GET failed:') finally: if conn: conn.close() def send_mail(mfrom, mto, body, smtp_host): """Sends a health check failure notice to the designated recipient.""" msg = MIMEText(body) msg['Subject'] = 'Powermon Health Check Failure' msg['From'] = mfrom msg['To'] = mto s = SMTP(smtp_host) try: s.sendmail(mfrom, [mto], msg.as_string()) finally: s.quit (status_url, mailto, smtp_host) = getenvars( 'POWERMON_STATUS', 'POWERMON_MAILTO', 'POWERMON_SMTPHOST') hostname = 'localhost' if 'HOSTNAME' in environ: hostname = environ['HOSTNAME'] mailfrom = '%s@%s' % (environ['USER'], hostname) print 'Checking', status_url (status, body) = http_get(status_url) print body if status > 200: print 'Sending failure notice to', mailto send_mail(mailfrom, mailto, body, smtp_host)
Python
0
210eba35fc4473e626fc58a8e4ea3cdbb6abdc28
add undocumented function to display new messages.
rtv/docs.py
rtv/docs.py
from .__version__ import __version__ __all__ = ['AGENT', 'SUMMARY', 'AUTH', 'CONTROLS', 'HELP', 'COMMENT_FILE', 'SUBMISSION_FILE', 'COMMENT_EDIT_FILE'] AGENT = """\ desktop:https://github.com/michael-lazar/rtv:{} (by /u/civilization_phaze_3)\ """.format(__version__) SUMMARY = """ Reddit Terminal Viewer is a lightweight browser for www.reddit.com built into a terminal window. """ AUTH = """\ Authenticating is required to vote and leave comments. If only a username is given, the program will display a secure prompt to enter a password. """ CONTROLS = """ Controls -------- RTV currently supports browsing both subreddits and individual submissions. In each mode the controls are slightly different. In subreddit mode you can browse through the top submissions on either the front page or a specific subreddit. In submission mode you can view the self text for a submission and browse comments. """ HELP = """ Basic Commands `j/k` or `UP/DOWN` : Move the cursor up/down `m/n` or `PgUp/PgDn`: Jump to the previous/next page `o` or `ENTER` : Open the selected item as a webpage `r` or `F5` : Refresh page content `u` : Log in or switch accounts `i` : Display new messages prompt `?` : Show the help screen `q` : Quit Authenticated Commands `a/z` : Upvote/downvote `c` : Compose a new post or comment `e` : Edit an existing post or comment `d` : Delete an existing post or comment `s` : Open/close subscribed subreddits list Subreddit Mode `l` or `RIGHT` : Enter the selected submission `/` : Open a prompt to switch subreddits `f` : Open a prompt to search the current subreddit Submission Mode `h` or `LEFT` : Return to subreddit mode `SPACE` : Fold the selected comment, or load additional comments """ COMMENT_FILE = u""" # Please enter a comment. Lines starting with '#' will be ignored, # and an empty message aborts the comment. # # Replying to {author}'s {type} {content} """ COMMENT_EDIT_FILE = u"""{content} # Please enter a comment. Lines starting with '#' will be ignored, # and an empty message aborts the comment. # # Editing your comment """ SUBMISSION_FILE = u"""{content} # Please enter your submission. Lines starting with '#' will be ignored, # and an empty field aborts the submission. # # The first line will be interpreted as the title # The following lines will be interpreted as the content # # Posting to {name} """
from .__version__ import __version__ __all__ = ['AGENT', 'SUMMARY', 'AUTH', 'CONTROLS', 'HELP', 'COMMENT_FILE', 'SUBMISSION_FILE', 'COMMENT_EDIT_FILE'] AGENT = """\ desktop:https://github.com/michael-lazar/rtv:{} (by /u/civilization_phaze_3)\ """.format(__version__) SUMMARY = """ Reddit Terminal Viewer is a lightweight browser for www.reddit.com built into a terminal window. """ AUTH = """\ Authenticating is required to vote and leave comments. If only a username is given, the program will display a secure prompt to enter a password. """ CONTROLS = """ Controls -------- RTV currently supports browsing both subreddits and individual submissions. In each mode the controls are slightly different. In subreddit mode you can browse through the top submissions on either the front page or a specific subreddit. In submission mode you can view the self text for a submission and browse comments. """ HELP = """ Basic Commands `j/k` or `UP/DOWN` : Move the cursor up/down `m/n` or `PgUp/PgDn`: Jump to the previous/next page `o` or `ENTER` : Open the selected item as a webpage `r` or `F5` : Refresh page content `u` : Log in or switch accounts `?` : Show the help screen `q` : Quit Authenticated Commands `a/z` : Upvote/downvote `c` : Compose a new post or comment `e` : Edit an existing post or comment `d` : Delete an existing post or comment `s` : Open/close subscribed subreddits list Subreddit Mode `l` or `RIGHT` : Enter the selected submission `/` : Open a prompt to switch subreddits `f` : Open a prompt to search the current subreddit Submission Mode `h` or `LEFT` : Return to subreddit mode `SPACE` : Fold the selected comment, or load additional comments """ COMMENT_FILE = u""" # Please enter a comment. Lines starting with '#' will be ignored, # and an empty message aborts the comment. # # Replying to {author}'s {type} {content} """ COMMENT_EDIT_FILE = u"""{content} # Please enter a comment. Lines starting with '#' will be ignored, # and an empty message aborts the comment. # # Editing your comment """ SUBMISSION_FILE = u"""{content} # Please enter your submission. Lines starting with '#' will be ignored, # and an empty field aborts the submission. # # The first line will be interpreted as the title # The following lines will be interpreted as the content # # Posting to {name} """
Python
0
04287120372a6fdb906ed9f27ead4c5f91d5690e
Add a modified version of simple bot
tota/heroes/lenovo.py
tota/heroes/lenovo.py
from tota.utils import closest, distance, sort_by_distance, possible_moves from tota import settings __author__ = "angvp" def create(): def lenovo_hero_logic(self, things, t): # some useful data about the enemies I can see in the map enemy_team = settings.ENEMY_TEAMS[self.team] enemies = [thing for thing in things.values() if thing.team == enemy_team] closest_enemy = closest(self, enemies) closest_enemy_distance = distance(self, closest_enemy) real_life = (self.life / self.max_life) * 100 # now lets decide what to do if int(real_life) < 85 and self.can('heal', t): # if I'm hurt and can heal, heal if closest_enemy_distance <= settings.HEAL_DISTANCE and self.can('fireball', t): return 'fireball', closest_enemy.position elif closest_enemy_distance <= settings.HEAL_DISTANCE and self.can('stun', t): return 'stun', closest_enemy.position elif closest_enemy_distance <= settings.HEAL_DISTANCE and self.can('attack', t): return 'attack', closest_enemy.position else: return 'heal', self.position else: # else, try to attack if closest_enemy: # there is an enemy if closest_enemy_distance <= settings.STUN_DISTANCE and self.can('stun', t): # try to stun him return 'stun', closest_enemy.position if closest_enemy_distance <= settings.FIREBALL_DISTANCE and self.can('fireball', t) and closest_enemy_distance > settings.FIREBALL_RADIUS: # else try to fireball him, but only if I'm not in range return 'fireball', closest_enemy.position elif closest_enemy_distance <= settings.HERO_ATTACK_DISTANCE: # else try to attack him return 'attack', closest_enemy.position else: # of finally just move to him (if I have life > 50) moves = sort_by_distance(closest_enemy, possible_moves(self, things)) if len(moves) > 0: back_moves = moves[len(moves)-1] else: back_moves = self.position if moves and int(real_life) > 50: return 'move', moves[0] else: return 'move', back_moves # can't do the things I want. Do nothing. return None return lenovo_hero_logic
Python
0
2f7d5f30fd6b6cb430c55b21d7cab75800bcfe97
Add a little hacky highlighter
screencasts/hello-weave/highlight.py
screencasts/hello-weave/highlight.py
import json prompt = 'ilya@weave-01:~$ ' highlight = [ ('weave-01', 'red'), ('weave-02', 'red'), ('docker', 'red'), ('run', 'red'), ('--name', 'red'), ('hello', 'red'), ('netcat', 'red'), ('-lk', 'red'), ('1234', 'red'), ('sudo curl -s -L git.io/weave -o /usr/local/bin/weave', 'red'), ('b4e40e4b4665a1ffa23f90eb3ab57c83ef243e64151bedc1501235df6e532e09\r\n', 'red'), ('Hello, Weave!\r\n', 'red'), ] highlight_tokens = [t[0] for t in highlight] tokens = [] colours = { 'red': ('\033[91m', '\033[00m'), } for f in ['rec-weave-01.json', 'rec-weave-02.json']: with open(f) as json_data: d = json.load(json_data) json_data.close() commands = d['stdout'] word = '' word_start = 0 for i,x in enumerate(commands): curr = x[1] if curr == prompt: continue elif curr != '\r\n' and curr != ' ' and len(curr) == 1: if word_start == 0: word_start = i word = curr else: word += curr elif (curr == '\r\n' or curr == ' ') and word_start != 0: tokens.append((word, word_start, True)) word_start = 0 elif curr != '\r\n' and len(curr) > 1: tokens.append((curr, i, False)) offset = 0 for x in tokens: if x[0] in highlight_tokens: commands.insert(x[1] + offset, [0, colours['red'][0]]) offset += 1 l = len(x[0]) if x[2] else 1 commands.insert(x[1] + l + offset, [0, colours['red'][1]]) offset += 1 d['commands'] = commands print(json.dumps(d))
Python
0.000005
cb958aedf6678872eb21d0214f6d6b759d33deec
Copy in linear function approx from Easy21
hiora_cartpole/linfa.py
hiora_cartpole/linfa.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals import itertools import numpy as np import pyrsistent import easy21.core as easy21 LinfaExperience = pyrsistent.immutable( 'feature, theta, E, N0, epsi, alpha, lmbda, p_obs, p_act') def init(lmbda, alpha, feature): """ Arguments: feature - NPArray mapping dealer sums (axis 0), player sums (axis 1) and actions (axis 2) to feature vectors """ return LinfaExperience(feature=feature, theta=np.zeros(feature.shape[3]), E=np.zeros(feature.shape[3]), N0=100, epsi=0.05, alpha=alpha, lmbda=lmbda, p_obs=None, # p … previous p_act=None) #### Make a feature lookup table # Note: This is not what the exercise specifies, I think, but I don't understand # how that what the exercise specifies makes sense. As I understand it, the # features vectors in the the exercise are 36-element vectors in which exactly # one element is 1 . The 1 indicates that the state is in certain dealer card # intervals, certain player card intervals and that we've chose a certain # action. Why not encode these separately? This is what I've done here. I'll see # if it works or not. # # Note about note: I was wrong stating that exactly one element is 1. For # example, dealer sum 4, player sum 6, action STICK turns on the ([1, 4], [1, # 6], hit), ([1, 4], [4, 9], hit), ([4, 7], [1, 6], hit) and ([4, 7], [4, 9], # hit) features. def feature_slow(o, a): d = o.dealer_sum p = o.player_sum booleans = [1 <= d <= 4, 4 <= d <= 7, 7 <= d <= 10, 1 <= p <= 6, 4 <= p <= 9, 7 <= p <= 12, 10 <= p <= 15, 13 <= p <= 18, 16 <= p <= 21, a == easy21.Action.HIT] # Then 1/True indicates HIT. return np.array(booleans, np.bool) def is_in_interval(n, iv): return iv[0] <= n <= iv[1] # Note: This is what the exercise specifies. Actually it works much better than # what I used. The encodings are equivalent, i.e. if you have a feature vector # in their format, you can convert it into one in my format and vice versa. The # difference is that their feature vector is longer, so that we can use more # weights. Is this the only reason why it works much better? def ex_feature_slow(o, a): dealer_intervals = [[1, 4], [4, 7], [7, 10]] player_intervals = [[1, 6], [4, 9], [7, 12], [10, 15], [13, 18], [16, 21]] actions = [easy21.Action.STICK, easy21.Action.HIT] d = o.dealer_sum p = o.player_sum return np.array([is_in_interval(d, div) and is_in_interval(p, piv) and a == this_a for (div, piv, this_a) in itertools.product(dealer_intervals, player_intervals, actions)], np.bool) def prepare_feature(ao_to_feature): return np.array([ [ [ao_to_feature(easy21.Observation(d, p), a) for a in [easy21.Action.STICK, easy21.Action.HIT] ] for p in xrange(1, 22) ] for d in xrange(1, 11) ]) def true_with_prob(p): return np.random.choice(2, p=[1-p, p]) def choose_action(e, o): if true_with_prob(e.epsi): return easy21.rand_action() else: stick_return = e.feature[o.dealer_sum - 1, o.player_sum - 1, easy21.Action.STICK].dot(e.theta) hit_return = e.feature[o.dealer_sum - 1, o.player_sum - 1, easy21.Action.HIT].dot(e.theta) return easy21.Action.STICK if stick_return > hit_return \ else easy21.Action.HIT # Python has not built-in readable argmax. numpy would be overkill. def think(e, o, r, done=False): """ Args: e … experience o … observation r … reward """ if not done: a = choose_action(e, o) # action feat = e.feature[o.dealer_sum - 1, o.player_sum - 1, a] Qnext = feat.dot(e.theta) # expected Q of next action else: a = None Qnext = 0 if e.p_obs: # Except for first timestep. p_feat = e.feature[e.p_obs.dealer_sum - 1, e.p_obs.player_sum - 1, e.p_act] Qcur = p_feat.dot(e.theta) delta = Qcur - (r + Qnext) # Yes, in the gradient it's inverted. e.E.__iadd__(p_feat) # Note: Eligibility traces could be done slightly more succinctly by # scaling the feature vectors themselves. See Silver slides. e.theta.__isub__(e.alpha * delta * e.E) e.E.__imul__(e.lmbda) return e.set(p_obs=o, p_act=a), a def wrapup(e, o, r): e, _ = think(e, o, r, done=True) return e.set(p_obs=None, p_act=None, E=np.zeros((36))) def Q(e): return e.feature.dot(e.theta) # Matrix of products of each feature vector # with weights.
Python
0
6b4733c213046c7a16bf255cfbc92408e2f01423
Add test for registry model hash
tests/models/test_authenticated_registry_model.py
tests/models/test_authenticated_registry_model.py
import pytest from dockci.models.auth import AuthenticatedRegistry BASE_AUTHENTICATED_REGISTRY = dict( id=1, display_name='Display name', base_name='Base name', username='Username', password='Password', email='Email', insecure=False, ) class TestHash(object): """ Test ``AuthenticatedRegistry.__hash__`` """ def test_hash_eq(self): """ Test when hash should be equal """ left = AuthenticatedRegistry(**BASE_AUTHENTICATED_REGISTRY) right = AuthenticatedRegistry(**BASE_AUTHENTICATED_REGISTRY) assert hash(left) == hash(right) @pytest.mark.parametrize('attr_name,attr_value', [ ('id', 7), ('display_name', 'different'), ('base_name', 'different'), ('username', 'different'), ('password', 'different'), ('email', 'different'), ('insecure', True), ]) def test_hash_ne(self, attr_name, attr_value): """ Test when hash should be not equal """ left = AuthenticatedRegistry(**BASE_AUTHENTICATED_REGISTRY) right = AuthenticatedRegistry(**BASE_AUTHENTICATED_REGISTRY) setattr(right, attr_name, attr_value) assert hash(left) != hash(right)
Python
0
984b8ecd043986877349c6de789842155b8a9fa1
Add own version of compare script
scr_compare_chunks_MK.py
scr_compare_chunks_MK.py
import csv import string from nltk import word_tokenize from sfsf import training_data_factory #from sfsf import sfsf_config from collections import defaultdict, Counter def read_chunk_scores( score_file ): top_chunk_scores = defaultdict(list) bottom_chunk_scores = defaultdict(list) with open(score_file, 'rt') as fh: csv_reader = csv.reader(fh, delimiter=",") headers = next(csv_reader) for row in csv_reader: if row[0][:8] == "training": continue isbn = row[2] score = float(row[6]) if row[0] == "testing_bottom": bottom_chunk_scores[isbn].append(score) else: top_chunk_scores[isbn].append(score) return top_chunk_scores, bottom_chunk_scores def compute_doc_freq( top_text_chunks ): doc_freq = defaultdict( list ) for isbn, text_chunks in top_text_chunks: terms = set([term for text_chunk in text_chunks for term in tokenize_chunk( text_chunk )]) for term in terms: doc_freq[term] += [isbn] return doc_freq def get_isbn_title(isbn, isbn_data): for isbn_row in isbn_data: if isbn_row[1] == isbn: return isbn_row[2] def do_sample( top_chunk_scores, bottom_chunk_scores, wpg_data_file ): training_factory = training_data_factory.TrainingDataFactory() isbn_data = training_factory.get_isbn_data( wpg_data_file ) # returns data sorted by sales top_isbn_data = [isbn_row for isbn_row in isbn_data if isbn_row[1] in top_chunk_scores] bottom_isbn_data = [isbn_row for isbn_row in isbn_data if isbn_row[1] in bottom_chunk_scores] return top_isbn_data, bottom_isbn_data def get_text_chunks( sample_data ): training_factory = training_data_factory.TrainingDataFactory() return training_factory.sample_txts( sample_data, sample_size=5000 ) def filter_chunks( chunk_tuples, chunk_scores, threshold, bigger_than ): if bigger_than: print("filtering bigger than") return [ chunk for isbn, chunks in chunk_tuples for chunk, score in zip(chunks, chunk_scores[isbn]) if score >= threshold ] else: print("filtering smaller than") return [ chunk for isbn, chunks in chunk_tuples for chunk, score in zip(chunks, chunk_scores[isbn]) if score < threshold ] def tokenize_chunk( chunk_as_string ): more_punctuation = string.punctuation + '“”‘’«»' return word_tokenize( chunk_as_string.lower().translate( str.maketrans( "", "", more_punctuation ) ) ) def make_dist( chunks, doc_freq ): return Counter([term for chunk in chunks for term in tokenize_chunk( chunk ) if len(doc_freq[term]) > 1]) def get_most_frequent_terms( isbn_data, chunk_scores, threshold, bigger_than=True ): text_chunks = get_text_chunks( isbn_data ) doc_freq = compute_doc_freq( text_chunks ) chunks = filter_chunks( text_chunks, chunk_scores, threshold, bigger_than ) fdist = make_dist( chunks, doc_freq ) top_terms = [term for term, freq in fdist.most_common(10000)] return top_terms, fdist, doc_freq if __name__ == "__main__": wpg_data_file = "wpg_data.csv" score_file = "../docker_volume/report-deeplearning-total-120-train-20-test-20-iteration-3-date-20171030_1030.csv" #score_file = "./data/non_disclosed/remote_volume_20170406/report_20170404_0951.csv" total_size = 120 top_chunk_scores, bottom_chunk_scores = read_chunk_scores( score_file ) top_isbn_data, bottom_isbn_data = do_sample( top_chunk_scores, bottom_chunk_scores, wpg_data_file ) bottom_terms, bottom_fdist, bottom_doc_freq = get_most_frequent_terms( bottom_isbn_data, bottom_chunk_scores, 0.5, bigger_than=False ) top_terms, top_fdist, top_doc_freq = get_most_frequent_terms( top_isbn_data, top_chunk_scores, 0.8, bigger_than=True ) print(bottom_fdist.most_common(100)) print(top_fdist.most_common(100)) top_only = [term for term in top_terms[:200] if term not in bottom_terms] bottom_only = [term for term in bottom_terms[:200] if term not in top_terms] for index, term in enumerate(top_terms[:1000]): top_rank = index + 1 bottom_rank = "NA" if term in bottom_terms: bottom_rank = bottom_terms.index(term) + 1 if bottom_rank == "NA" or bottom_rank / top_rank > 3: print(term, top_rank, bottom_rank) for term in top_only: titles = [get_isbn_title(isbn, top_isbn_data) for isbn in top_doc_freq[term]] print("top only:", term, top_fdist[term], titles) for term in bottom_only: titles = [get_isbn_title(isbn, bottom_isbn_data) for isbn in bottom_doc_freq[term]] print("bottom only:", term, bottom_fdist[term], titles)
Python
0
57dc7e58dcfd101c29026c8c07763cba2eb7dd14
add helper script to inspect comments on released content
scripts/show_comments.py
scripts/show_comments.py
#!/usr/bin/env python from __future__ import print_function import sys def main(): fs = open(sys.argv[1]).read().splitlines() fs = map(lambda f: {'name':f, 'contents':open(f).readlines()},fs) for f in fs: buffer = '' multiline = 0 is_first = True for i,line in enumerate(f['contents'],start=1): multiline += line.count('(*') if (line.count('//') > 0 or multiline > 0) and not is_first: buffer += '{}: {}'.format(i,line) closed = line.count('*)') if closed > 0 and is_first: is_first = False multiline -= closed if buffer: print ('*** {}:'.format(f['name'])) print (buffer) print () if __name__ == '__main__': main()
Python
0
4b83b7a3d286f60454c96ae609ce18c731339877
add a stub fuse-based fs component
src/fs/nomadfs.py
src/fs/nomadfs.py
#!/usr/bin/env python # # Copyright (c) 2015 Josef 'Jeff' Sipek <jeffpc@josefsipek.net> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # import errno import fuse fuse.fuse_python_api = (0, 2) class Nomad(fuse.Fuse): def __init__(self, *args, **kw): fuse.Fuse.__init__(self, *args, **kw) def getattr(self, path): return -errno.ENOSYS def getdir(self, path): return -errno.ENOSYS if __name__ == "__main__": fs = Nomad() fs.flags = 0 fs.multithreaded = 0 fs.parse(errex=1) fs.main()
Python
0
4efc50f91d2b141270739ea9f8bef9685cc86e7f
add houdini/shelf/fitcam
houdini/shelf/fitcam.py
houdini/shelf/fitcam.py
# -*- coding: utf-8 -*- import hou import toolutils def setfit(oldCam, resx, resy): oldCam.setDisplayFlag(False) oldCam.parm(oldCam.path() + "/resx").set(resx) oldCam.parm(oldCam.path() + "/resy").set(resy) camups = oldCam.inputAncestors() if camups == (): camup = oldCam else: camup = camups = oldCam.inputAncestors()[-1] null = hou.node('obj').createNode('null', 'ScaleWorld') blend = hou.node('obj').createNode('blend', 'Blend_position') fetch = hou.node('obj').createNode('fetch', 'Fetch_NewCam') newCam = hou.node('obj').createNode('cam', 'Render_Camera') null.move(camup.position() + hou.Vector2(0, 1)) blend.move(oldCam.position() + hou.Vector2(0, -1)) fetch.move(oldCam.position() + hou.Vector2(0, -2)) newCam.move(oldCam.position() + hou.Vector2(0, -3)) camup.setNextInput(null) blend.setNextInput(oldCam) fetch.setNextInput(blend) newCam.setNextInput(fetch) null.setDisplayFlag(False) blend.setDisplayFlag(False) fetch.setDisplayFlag(False) blend.parm(blend.path() + "/blendm1").set(63) fetch.parm(fetch.path() + "/useinputoffetched").set(1) oldCamPath = oldCam.path() relativePath = newCam.relativePathTo(oldCam) resx = " ch(\"" + relativePath + "/resx\")" resy = " ch(\"" + relativePath + "/resy\")" focal = " ch(\"" + relativePath + "/focal\")" aperture = " ch(\"" + relativePath + "/aperture\")" vm_background = " ch(\"" + relativePath + "/vm_background\")" newCam.setParmExpressions(dict(resx=resx, resy=resy, focal=focal, aperture=aperture, vm_background=vm_background)) newCam.parm("vm_bgenable").set(0) newCam.parm("vm_bgenable").set(0) newCam.parm("vm_bgenable").lock(True) def main(): view = toolutils.sceneViewer() sel = view.selectObjects('请选择一个相机') if len(sel) > 0: if sel[0].type().name()=='cam': resolution = hou.ui.readInput('set Resolution',buttons = ('Set','close'),title = 'set Resolution',initial_contents = '1920-1080',close_choice = 1,default_choice = 0) resx = resolution[1].split('-')[0] resy = resolution[1].split('-')[1] oldCam = sel[0] if resolution[0] == 0: setfit(oldCam, resx, resy)
Python
0
1886af3e8c96108a8f7bdb320969373e66299bf4
Create __init__.py
python/django_standalone_orm/__init__.py
python/django_standalone_orm/__init__.py
Python
0.000429
f7046ba07a3ec41d26df0b0bce67c6ab8013bfd8
Fix for the activity whose transcripts are stunted
doc/release-scripts/Fix_Transcript_Stunted.py
doc/release-scripts/Fix_Transcript_Stunted.py
''' Issue :Transcript for model comversations stunted Fix : The CSS used for the transcript part is not same as that of the others (which used the toggler CSS). Have made the required changes for the transcripts related to the audio and model conversations which come up on click of answer this in Unit 0 :English Beginner Lesson 8 : Let's Talk ''' import re from gnowsys_ndf.ndf.models import node_collection from bs4 import BeautifulSoup '''Extracting the let's talk activity having the issue''' actnd = node_collection.find({'_type':'GSystem','_id':ObjectId('59425d1c4975ac013cccbba3')}) soup = BeautifulSoup(actnd.content) mrkup2 = '<form class="trans-form"><input align="right" id="toggler09" type="checkbox" /> <label class="toggle-me" for="toggler09">Transcript</label><div class="transcript"><object data="/media/b/0/b/3537c6b9800766bde84555191d5b510c5d760afc72a8fea888b765258369f.txt" style="width:99%!important; height:auto!important;word-wrap: break-word;" type="text/html"></object></div></form>' mrkup3 = '<form class="trans-form"><input align="right" id="toggler08" type="checkbox" /> <label class="toggle-me" for="toggler08">Transcript</label><div class="transcript"><object data="/media/d/0/c/94657554e663a44dc3dfa309454108a4ba5bbc620131bb7a1a1e1d089cb88.txt" style="width:99%!important; height:auto!important;word-wrap: break-word;" type="text/html"></object></div></form>' mrkup4 = '<form class="trans-form"><input align="right" id="toggler07" type="checkbox" /> <label class="toggle-me" for="toggler07">Transcript</label><div class="transcript"><object data="/media/2/a/3/7868f3d837d326586fe59f6b1f1abdde16b3bfcbcb1e239511877d6963583.txt" style="width:99%!important; height:auto!important;word-wrap: break-word;" type="text/html"></object></div></form>' '''Replace the transcript related tags with the required''' for each in soup.find_all('input',{"class":"small radius transcript-toggler"}): #print each['class'],each.attrs,each.attrs.keys() stylflg = each.has_attr('style') if stylflg: #for child in each.parent.children: # print each.parent.children prnt_div = each.parent inner_divtag = prnt_div.find('div',{"class":"transcript-data hide"}) print inner_divtag trnscrpt_file = inner_divtag.find('object')['data'] print trnscrpt_file if trnscrpt_file.split('/')[-1] == '3537c6b9800766bde84555191d5b510c5d760afc72a8fea888b765258369f.txt': inner_divtag.decompose() each.replaceWith(BeautifulSoup(mrkup2,'html.parser')) if trnscrpt_file.split('/')[-1] == '94657554e663a44dc3dfa309454108a4ba5bbc620131bb7a1a1e1d089cb88.txt': inner_divtag.decompose() each.replaceWith(BeautifulSoup(mrkup3,'html.parser')) if trnscrpt_file.split('/')[-1] == '7868f3d837d326586fe59f6b1f1abdde16b3bfcbcb1e239511877d6963583.txt': inner_divtag.decompose() each.replaceWith(BeautifulSoup(mrkup4,'html.parser')) #print prnt_div #print "*"*30 actnd.content = soup actnd.content = actnd.content.decode("utf-8") actnd.save()
Python
0.000002
861b7742fd4954e8cf3a1242e8818b9e380acf62
Add Hetzner Cloud Inventory Plugin (#52553)
lib/ansible/plugins/inventory/hcloud.py
lib/ansible/plugins/inventory/hcloud.py
# Copyright (c) 2019 Hetzner Cloud GmbH <info@hetzner-cloud.de> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = r""" name: hcloud plugin_type: inventory authors: - Lukas Kaemmerling (@lkaemmerling) short_description: Ansible dynamic inventory plugin for the Hetzner Cloud. version_added: "2.8" requirements: - python >= 2.7 - hcloud-python >= 1.0.0 description: - Reads inventories from the Hetzner Cloud API. - Uses a YAML configuration file that ends with hcloud.(yml|yaml). options: plugin: description: marks this as an instance of the "hcloud" plugin required: true choices: ["hcloud"] token: description: The Hetzner Cloud API Token. required: true env: - name: HCLOUD_TOKEN connect_with: description: Connect to the server using the value from this field. default: public_ipv4 type: str choices: - public_ipv4 - hostname - ipv4_dns_ptr locations: description: Populate inventory with instances in this location. default: [] type: list required: false types: description: Populate inventory with instances with this type. default: [] type: list required: false images: description: Populate inventory with instances with this image name, only available for system images. default: [] type: list required: false label_selector: description: Populate inventory with instances with this label. default: "" type: str required: false """ EXAMPLES = r""" # Minimal example. `HCLOUD_TOKEN` is exposed in environment. plugin: hcloud # Example with locations, types, groups and token plugin: hcloud token: foobar locations: - nbg1 types: - cx11 """ import os from ansible.errors import AnsibleError, AnsibleParserError from ansible.module_utils._text import to_native from ansible.plugins.inventory import BaseInventoryPlugin from ansible.release import __version__ try: from hcloud import hcloud except ImportError: raise AnsibleError("The Hetzner Cloud dynamic inventory plugin requires hcloud-python.") class InventoryModule(BaseInventoryPlugin): NAME = "hcloud" def _configure_hcloud_client(self): self.api_token = self.get_option("token") if self.api_token is None: raise AnsibleError( "Please specify a token, via the option token or via environment variable HCLOUD_TOKEN") self.endpoint = os.getenv("HCLOUD_ENDPOINT") or "https://api.hetzner.cloud/v1" self.client = hcloud.Client(token=self.api_token, api_endpoint=self.endpoint, application_name="ansible-inventory", application_version=__version__) def _test_hcloud_token(self): try: # We test the API Token against the location API, because this is the API with the smallest result # and not controllable from the customer. self.client.locations.get_all() except hcloud.APIException: raise AnsibleError("Invalid Hetzner Cloud API Token.") def _add_groups(self): locations = self.client.locations.get_all() for location in locations: self.inventory.add_group(to_native("location_" + location.name)) images = self.client.images.get_all(type="system") for image in images: self.inventory.add_group(to_native("image_" + image.os_flavor)) server_types = self.client.server_types.get_all() for server_type in server_types: self.inventory.add_group(to_native("server_type_" + server_type.name)) def _get_servers(self): if len(self.get_option("label_selector")) > 0: self.servers = self.client.servers.get_all(label_selector=self.get_option("label_selector")) else: self.servers = self.client.servers.get_all() def _filter_servers(self): if self.get_option("locations"): tmp = [] for server in self.servers: if server.datacenter.location.name in self.get_option("locations"): tmp.append(server) self.servers = tmp if self.get_option("types"): tmp = [] for server in self.servers: if server.server_type.name in self.get_option("types"): tmp.append(server) self.servers = tmp if self.get_option("images"): tmp = [] for server in self.servers: if server.image is not None and server.image.os_flavor in self.get_option("images"): tmp.append(server) self.servers = tmp def _set_server_attributes(self, server): self.inventory.set_variable(server.name, "id", to_native(server.id)) self.inventory.set_variable(server.name, "name", to_native(server.name)) self.inventory.set_variable(server.name, "status", to_native(server.status)) # Network self.inventory.set_variable(server.name, "ipv4", to_native(server.public_net.ipv4.ip)) self.inventory.set_variable(server.name, "ipv6_network", to_native(server.public_net.ipv6.network)) self.inventory.set_variable(server.name, "ipv6_network_mask", to_native(server.public_net.ipv6.network_mask)) if self.get_option("connect_with") == "public_ipv4": self.inventory.set_variable(server.name, "ansible_host", to_native(server.public_net.ipv4.ip)) elif self.get_option("connect_with") == "hostname": self.inventory.set_variable(server.name, "ansible_host", to_native(server.name)) elif self.get_option("connect_with") == "ipv4_dns_ptr": self.inventory.set_variable(server.name, "ansible_host", to_native(server.public_net.ipv4.dns_ptr)) # Server Type self.inventory.set_variable(server.name, "server_type", to_native(server.image.name)) # Datacenter self.inventory.set_variable(server.name, "datacenter", to_native(server.datacenter.name)) self.inventory.set_variable(server.name, "location", to_native(server.datacenter.location.name)) # Image self.inventory.set_variable(server.name, "image_id", to_native(server.image.id)) self.inventory.set_variable(server.name, "image_name", to_native(server.image.name)) def verify_file(self, path): """Return the possibly of a file being consumable by this plugin.""" return ( super(InventoryModule, self).verify_file(path) and path.endswith((self.NAME + ".yaml", self.NAME + ".yml")) ) def parse(self, inventory, loader, path, cache=True): super(InventoryModule, self).parse(inventory, loader, path, cache) self._read_config_data(path) self._configure_hcloud_client() self._test_hcloud_token() self._add_groups() self._get_servers() self._filter_servers() for server in self.servers: self.inventory.add_host(server.name) self.inventory.add_host(server.name, group="location_" + server.datacenter.location.name) self.inventory.add_host(server.name, group="image_" + server.image.os_flavor) self.inventory.add_host(server.name, group="server_type_" + server.server_type.name) self._set_server_attributes(server)
Python
0.000005
88548319d8a7c44d039ce269621f0a9ff4ee8af6
refactor leslie matrix; add leslie_exe.py
poptox/leslie/leslie_exe.py
poptox/leslie/leslie_exe.py
import numpy as np import os.path import pandas as pd import sys #find parent directory and import base (travis) parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)) sys.path.append(parentddir) from base.uber_model import UberModel, ModelSharedInputs # print(sys.path) # print(os.path) class LeslieInputs(ModelSharedInputs): """ Input class for Leslie. """ def __init__(self): """Class representing the inputs for Leslie""" super(LeslieInputs, self).__init__() self.init_pop_size = pd.Series([], dtype="float") self.stages = pd.Series([], dtype="float") self.l_m = pd.Series([], dtype="float") self.time_steps = pd.Series([], dtype="float") class LeslieOutputs(object): """ Output class for Leslie. """ def __init__(self): """Class representing the outputs for Leslie""" super(LeslieOutputs, self).__init__() self.out_pop_matrix = pd.Series(name="out_pop_matrix") class Leslie(UberModel, LeslieInputs, LeslieOutputs): """ Leslie model for population growth. """ def __init__(self, pd_obj, pd_obj_exp): """Class representing the Leslie model and containing all its methods""" super(Leslie, self).__init__() self.pd_obj = pd_obj self.pd_obj_exp = pd_obj_exp self.pd_obj_out = None def execute_model(self): """ Callable to execute the running of the model: 1) Populate input parameters 2) Create output DataFrame to hold the model outputs 3) Run the model's methods to generate outputs 4) Fill the output DataFrame with the generated model outputs """ self.populate_inputs(self.pd_obj, self) self.pd_obj_out = self.populate_outputs(self) self.run_methods() self.fill_output_dataframe(self) # Begin model methods def run_methods(self): """ Execute all algorithm methods for model logic """ try: self.leslie_grow() except Exception as e: print(str(e)) def leslie_grow(self): self.out_pop_matrix = np.zeros(shape=(self.stages, self.time_steps)) self.out_pop_matrix[:, 0] = self.init_pop_size for i in range(1, self.time_steps): n = np.dot(self.l_m, self.out_pop_matrix[:, i-1]) self.out_pop_matrix[:, i] = n.squeeze() return self.out_pop_matrix.tolist()
Python
0.000011
d3d6a6018d55581bf081c93386f6676c8bb105ce
Add module for running the main simulation
simulate.py
simulate.py
import genetic import sys output = sys.stdout def setOutput(out): output = out genetic.setOutput(output) # Test data for a XOR gate testData = ( (0.1, 0.1, 0.9), (0.1, 0.9, 0.9), (0.9, 0.1, 0.9), (0.9, 0.9, 0.1) ) def simulate(): sim = genetic.Simulation(2, 1, testData, 100) sim.simulate(100)
Python
0
2cd1e7fcdf53c312c3db8e6f1d257084a87cccbb
Add migration to update action implementation hashes.
recipe-server/normandy/recipes/migrations/0045_update_action_hashes.py
recipe-server/normandy/recipes/migrations/0045_update_action_hashes.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals import hashlib from base64 import b64encode, urlsafe_b64encode from django.db import migrations def make_hashes_urlsafe_sri(apps, schema_editor): Action = apps.get_model('recipes', 'Action') for action in Action.objects.all(): data = action.implementation.encode() digest = hashlib.sha384(data).digest() data_hash = urlsafe_b64encode(digest) action.implementation_hash = 'sha384-' + data_hash.decode() action.save() def make_hashes_sha1(apps, schema_editor): Action = apps.get_model('recipes', 'Action') for action in Action.objects.all(): data = action.implementation.encode() data_hash = hashlib.sha1(data).hexdigest() action.implementation_hash = data_hash action.save() class Migration(migrations.Migration): dependencies = [ ('recipes', '0044_auto_20170801_0010'), ] operations = [ migrations.RunPython(make_hashes_urlsafe_sri, make_hashes_sha1), ]
Python
0
37a181a987e4974d21c3e043d66e0d65468785aa
Check in io module
contones/io.py
contones/io.py
import multiprocessing import os import uuid from osgeo import gdal import contones.raster def _run_encoder(path, encoder_cls, geom=None): encoder = encoder_cls() with contones.raster.Raster(path) as r: if geom: with r.crop(geom) as cropped: cropped.save(encoder) else: r.save(encoder) buff = encoder.read() # Remove the dataset from memory encoder.unlink() return buff # TODO: Generalize and replace _run_encoder() def convert(inpath, outpath=None): if outpath is None: outpath = get_imageio_for(outpath)() with contones.raster.Raster(path) as r: r.save(outpath) return outpath def run_encoderpool(encoder_cls, pathnames, geom=None, multicore=True): """Run an encoder job using a pool of workers. Arguments: path -- path to a GDAL dataset encoder_cls -- encoder class to use, not an instance Keyword args: geom -- geometry used to crop raster as a geos.Polygon or None multicore -- true/false, process in parallel by default """ encoder = encoder_cls() if not multicore: return [_run_encoder(path, encoder_cls, geom) for path in pathnames] num_workers = multiprocessing.cpu_count() num_workers = num_workers / 2 if num_workers > 4 else num_workers pool = multiprocessing.Pool(num_workers) results = [pool.apply(_run_encoder, (path, encoder_cls, geom,)) for path in pathnames] pool.close() return results def get_imageio_for(path): """Returns the io class from a file path or gdal.Driver ShortName.""" extsep = os.path.extsep ext = path.rsplit(extsep, 1)[-1] if extsep in path else path #ext = os.path.splitext(path)[-1] if extsep in path else path for cls in BaseImageIO.__subclasses__(): if ext in [cls.ext, cls.driver_name]: return cls raise Exception('No IO class for {}'.format(path)) # TODO: These not strictly encoders as they have filepaths, etc. Rename to # Transformer, Converter, Driver? Or, FileStore, ImageFile, ImageFileStore? #class BaseEncoder(object): #class BaseImageStore(object): class BaseImageIO(object): """Base encoder for GDAL Datasets derived from GDAL.Driver, used mainly for raster image encoding. New raster formats should subclass this. """ _vsimem = '/vsimem' # Specify this in subclass driver_name = None driver_opts = [] ext = None def __init__(self, path=None): self.driver = gdal.GetDriverByName(self.driver_name) self.path = path or self.get_tmpname() def __getattr__(self, attr): return getattr(self.driver, attr) def create(self, nx, ny, bandcount, datatype): #self._check_exists() ds = self.Create(self.path, nx, ny, bandcount, datatype, self.driver_opts) return contones.raster.Raster(ds) #def vsipath(self): def get_tmpname(self): basename = '{}.{}'.format(str(uuid.uuid4()), self.ext) return os.path.join(self._vsimem, basename) def _check_exists(self): if os.path.exists(self.path): raise IOError('{} already exists'.format(self.path)) def copy_from(self, dataset): #self._check_exists() ds = self.CreateCopy(self.path, dataset.ds, options=self.driver_opts) return contones.raster.Raster(ds) def read(self, size=0): """Returns the raster data buffer as str.""" f = gdal.VSIFOpenL(self.path, 'rb') if f is None: raise IOError('Could not read from {}'.format(self.path)) fstat = gdal.VSIStatL(self.path) data = gdal.VSIFReadL(1, fstat.size, f) gdal.VSIFCloseL(f) return data def unlink(self): gdal.Unlink(self.path) class GeoTIFFEncoder(BaseImageIO): """GeoTIFF raster encoder.""" driver_name = 'GTiff' driver_opts = ['COMPRESS=PACKBITS'] ext = 'tif' class HFAEncoder(BaseImageIO): """Erdas Imagine raster encoder.""" driver_name = 'HFA' driver_opts = ['COMPRESSED=YES'] ext = 'img'
Python
0
fa349a967f9f1149dc9aae1bab168f7be7436320
Use HTTP/1.1 for memory cache http server.
tools/telemetry/telemetry/core/memory_cache_http_server.py
tools/telemetry/telemetry/core/memory_cache_http_server.py
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import BaseHTTPServer import mimetypes import os import SimpleHTTPServer import SocketServer import sys import zlib class MemoryCacheHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): def do_GET(self): """Serve a GET request.""" resource = self.SendHead() if resource: self.wfile.write(resource['response']) def do_HEAD(self): """Serve a HEAD request.""" self.SendHead() def SendHead(self): path = self.translate_path(self.path) ctype = self.guess_type(path) if path not in self.server.resource_map: self.send_error(404, 'File not found') return None resource = self.server.resource_map[path] self.send_response(200) self.send_header('Content-Type', ctype) self.send_header('Content-Length', str(resource['content-length'])) self.send_header('Last-Modified', self.date_time_string(resource['last-modified'])) if resource['zipped']: self.send_header('Content-Encoding', 'deflate') self.end_headers() return resource class MemoryCacheHTTPServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer): # Increase the request queue size. The default value, 5, is set in # SocketServer.TCPServer (the parent of BaseHTTPServer.HTTPServer). # Since we're intercepting many domains through this single server, # it is quite possible to get more than 5 concurrent requests. request_queue_size = 128 def __init__(self, host_port, handler, directories): BaseHTTPServer.HTTPServer.__init__(self, host_port, handler) self.resource_map = {} for path in directories: self.LoadResourceMap(path) def LoadResourceMap(self, cwd): """Loads all files in cwd into the in-memory resource map.""" for root, dirs, files in os.walk(cwd): # Skip hidden files and folders (like .svn and .git). files = [f for f in files if f[0] != '.'] dirs[:] = [d for d in dirs if d[0] != '.'] for f in files: file_path = os.path.join(root, f) if not os.path.exists(file_path): # Allow for '.#' files continue with open(file_path, 'rb') as fd: response = fd.read() fs = os.fstat(fd.fileno()) content_type = mimetypes.guess_type(file_path)[0] zipped = False if content_type in ['text/html', 'text/css', 'application/javascript']: zipped = True response = zlib.compress(response, 9) self.resource_map[file_path] = { 'content-length': len(response), 'last-modified': fs.st_mtime, 'response': response, 'zipped': zipped } def Main(): assert len(sys.argv) > 2, 'usage: %prog <port> [<path1>, <path2>, ...]' port = int(sys.argv[1]) directories = sys.argv[2:] server_address = ('127.0.0.1', port) MemoryCacheHTTPRequestHandler.protocol_version = 'HTTP/1.1' httpd = MemoryCacheHTTPServer(server_address, MemoryCacheHTTPRequestHandler, directories) httpd.serve_forever() if __name__ == '__main__': Main()
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import BaseHTTPServer import mimetypes import os import SimpleHTTPServer import SocketServer import sys import zlib class MemoryCacheHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): def do_GET(self): """Serve a GET request.""" resource = self.SendHead() if resource: self.wfile.write(resource['response']) def do_HEAD(self): """Serve a HEAD request.""" self.SendHead() def SendHead(self): path = self.translate_path(self.path) ctype = self.guess_type(path) if path not in self.server.resource_map: self.send_error(404, 'File not found') return None resource = self.server.resource_map[path] self.send_response(200) self.send_header('Content-Type', ctype) self.send_header('Content-Length', str(resource['content-length'])) self.send_header('Last-Modified', self.date_time_string(resource['last-modified'])) if resource['zipped']: self.send_header('Content-Encoding', 'deflate') self.end_headers() return resource class MemoryCacheHTTPServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer): # Increase the request queue size. The default value, 5, is set in # SocketServer.TCPServer (the parent of BaseHTTPServer.HTTPServer). # Since we're intercepting many domains through this single server, # it is quite possible to get more than 5 concurrent requests. request_queue_size = 128 def __init__(self, host_port, handler, directories): BaseHTTPServer.HTTPServer.__init__(self, host_port, handler) self.resource_map = {} for path in directories: self.LoadResourceMap(path) def LoadResourceMap(self, cwd): """Loads all files in cwd into the in-memory resource map.""" for root, dirs, files in os.walk(cwd): # Skip hidden files and folders (like .svn and .git). files = [f for f in files if f[0] != '.'] dirs[:] = [d for d in dirs if d[0] != '.'] for f in files: file_path = os.path.join(root, f) if not os.path.exists(file_path): # Allow for '.#' files continue with open(file_path, 'rb') as fd: response = fd.read() fs = os.fstat(fd.fileno()) content_type = mimetypes.guess_type(file_path)[0] zipped = False if content_type in ['text/html', 'text/css', 'application/javascript']: zipped = True response = zlib.compress(response, 9) self.resource_map[file_path] = { 'content-length': len(response), 'last-modified': fs.st_mtime, 'response': response, 'zipped': zipped } def Main(): assert len(sys.argv) > 2, 'usage: %prog <port> [<path1>, <path2>, ...]' port = int(sys.argv[1]) directories = sys.argv[2:] server_address = ('127.0.0.1', port) MemoryCacheHTTPRequestHandler.protocol_version = 'HTTP/1.0' httpd = MemoryCacheHTTPServer(server_address, MemoryCacheHTTPRequestHandler, directories) httpd.serve_forever() if __name__ == '__main__': Main()
Python
0.000076
1253cf2773b510f88b4391e22f0e98b4ef3cdf52
Create serializers.py
templates/root/main/serializers.py
templates/root/main/serializers.py
from django.contrib.auth.models import User from rest_framework import serializers from <%= appName %>.models import Sample class SampleSerializer(serializers.HyperlinkedModelSerializer): owner = serializers.ReadOnlyField(source='owner.username') class Meta: model = Sample fields = ('id', 'created', 'name', 'img_name', 'url', 'owner', 'info') class UserSerializer(serializers.HyperlinkedModelSerializer): clownfish = serializers.HyperlinkedRelatedField(many=True, view_name='sample-detail', read_only=True) class Meta: model = User fields = ('url', 'username', 'sample')
Python
0.000002
c6015e049ab1ce059298af9147851f9a6a1c1e46
Replace NotImplemented singleton with NotImplementedError exceptin
src/ggrc_workflows/services/workflow_cycle_calculator/one_time_cycle_calculator.py
src/ggrc_workflows/services/workflow_cycle_calculator/one_time_cycle_calculator.py
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file> # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> # Created By: urban@reciprocitylabs.com # Maintained By: urban@reciprocitylabs.com import datetime from ggrc_workflows.services.workflow_cycle_calculator import cycle_calculator class OneTimeCycleCalculator(cycle_calculator.CycleCalculator): """CycleCalculator implementation for one-time workflows Because one-time workflows have concrete start and end dates already specified for tasks, we don't have to implement relative_day_to_date function and we can return all values in their raw format (we don't need to adjust for holidays). """ def __init__(self, workflow, base_date=None): super(OneTimeCycleCalculator, self).__init__(workflow) def relative_day_to_date(self, relative_day, relative_month=None, base_date=None): raise NotImplementedError("Relative days are not applicable " "for one-time workflows.") def sort_tasks(self): self.tasks.sort(key=lambda t: self._date_normalizer(t.start_date)) @staticmethod def get_relative_start(task): raise NotImplementedError("Relative days are not applicable " "for one-time workflows.") @staticmethod def get_relative_end(task): raise NotImplementedError("Relative days are not applicable " "for one-time workflows.") @staticmethod def task_date_range(task, base_date=None): return task.start_date, task.end_date @staticmethod def _date_normalizer(d): if type(d) is datetime.datetime: return d.date() return d def workflow_date_range(self): tasks_start_dates = [ self._date_normalizer(task.start_date) for task in self.tasks] tasks_end_dates = [ self._date_normalizer(task.end_date) for task in self.tasks] return min(tasks_start_dates), max(tasks_end_dates) def next_cycle_start_date(self, base_date=None): return None
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file> # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> # Created By: urban@reciprocitylabs.com # Maintained By: urban@reciprocitylabs.com import datetime from ggrc_workflows.services.workflow_cycle_calculator import cycle_calculator class OneTimeCycleCalculator(cycle_calculator.CycleCalculator): """CycleCalculator implementation for one-time workflows Because one-time workflows have concrete start and end dates already specified for tasks, we don't have to implement relative_day_to_date function and we can return all values in their raw format (we don't need to adjust for holidays). """ def __init__(self, workflow, base_date=None): super(OneTimeCycleCalculator, self).__init__(workflow) def relative_day_to_date(self, relative_day, relative_month=None, base_date=None): raise NotImplemented("Relative days are not applicable " "for one-time workflows.") def sort_tasks(self): self.tasks.sort(key=lambda t: self._date_normalizer(t.start_date)) @staticmethod def get_relative_start(task): raise NotImplemented("Relative days are not applicable " "for one-time workflows.") @staticmethod def get_relative_end(task): raise NotImplemented("Relative days are not applicable " "for one-time workflows.") @staticmethod def task_date_range(task, base_date=None): return task.start_date, task.end_date @staticmethod def _date_normalizer(d): if type(d) is datetime.datetime: return d.date() return d def workflow_date_range(self): tasks_start_dates = [ self._date_normalizer(task.start_date) for task in self.tasks] tasks_end_dates = [ self._date_normalizer(task.end_date) for task in self.tasks] return min(tasks_start_dates), max(tasks_end_dates) def next_cycle_start_date(self, base_date=None): return None
Python
0.999768
5dc2f523473f4921c3b7f1915966c0ac22b09474
Create package and metadatas
mots_vides/__init__.py
mots_vides/__init__.py
""" Mots-vides """ __version__ = '2015.1.21.dev0' __author__ = 'Fantomas42' __email__ = 'fantomas42@gmail.com' __url__ = 'https://github.com/Fantomas42/mots-vides'
Python
0
02e9602a5723aa3cbe9395290e4c18e439065007
Remove redundant code
numpy/distutils/tests/test_fcompiler.py
numpy/distutils/tests/test_fcompiler.py
from __future__ import division, absolute_import, print_function from numpy.testing import assert_ import numpy.distutils.fcompiler customizable_flags = [ ('f77', 'F77FLAGS'), ('f90', 'F90FLAGS'), ('free', 'FREEFLAGS'), ('arch', 'FARCH'), ('debug', 'FDEBUG'), ('flags', 'FFLAGS'), ('linker_so', 'LDFLAGS'), ] def test_fcompiler_flags(monkeypatch): monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '0') fc = numpy.distutils.fcompiler.new_fcompiler(compiler='none') flag_vars = fc.flag_vars.clone(lambda *args, **kwargs: None) for opt, envvar in customizable_flags: new_flag = '-dummy-{}-flag'.format(opt) prev_flags = getattr(flag_vars, opt) monkeypatch.setenv(envvar, new_flag) new_flags = getattr(flag_vars, opt) monkeypatch.delenv(envvar) assert_(new_flags == [new_flag]) monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '1') for opt, envvar in customizable_flags: new_flag = '-dummy-{}-flag'.format(opt) prev_flags = getattr(flag_vars, opt) monkeypatch.setenv(envvar, new_flag) new_flags = getattr(flag_vars, opt) monkeypatch.delenv(envvar) if prev_flags is None: assert_(new_flags == [new_flag]) else: assert_(new_flags == prev_flags + [new_flag])
from __future__ import division, absolute_import, print_function from numpy.testing import assert_ import numpy.distutils.fcompiler customizable_flags = [ ('f77', 'F77FLAGS'), ('f90', 'F90FLAGS'), ('free', 'FREEFLAGS'), ('arch', 'FARCH'), ('debug', 'FDEBUG'), ('flags', 'FFLAGS'), ('linker_so', 'LDFLAGS'), ] def test_fcompiler_flags(monkeypatch): monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '0') fc = numpy.distutils.fcompiler.new_fcompiler(compiler='none') flag_vars = fc.flag_vars.clone(lambda *args, **kwargs: None) for opt, envvar in customizable_flags: new_flag = '-dummy-{}-flag'.format(opt) prev_flags = getattr(flag_vars, opt) monkeypatch.setenv(envvar, new_flag) new_flags = getattr(flag_vars, opt) monkeypatch.delenv(envvar) assert_(new_flags == [new_flag]) monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '1') fc = numpy.distutils.fcompiler.new_fcompiler(compiler='none') flag_vars = fc.flag_vars.clone(lambda *args, **kwargs: None) for opt, envvar in customizable_flags: new_flag = '-dummy-{}-flag'.format(opt) prev_flags = getattr(flag_vars, opt) monkeypatch.setenv(envvar, new_flag) new_flags = getattr(flag_vars, opt) monkeypatch.delenv(envvar) if prev_flags is None: assert_(new_flags == [new_flag]) else: assert_(new_flags == prev_flags + [new_flag])
Python
0.999999
b9bb7e36977b757a63015ac3af8b538f0c67f16c
add manage.py
manage.py
manage.py
from argparse import ArgumentParser def apply_migrates(args): print('migrate') def make_parser(): parser = ArgumentParser() subparsers = parser.add_subparsers() migrate = subparsers.add_parser('migrate') migrate.set_defaults(func=apply_migrates) return parser if __name__ == '__main__': parser = make_parser() args = parser.parse_args() if vars(args): args.func(args) else: parser.print_help()
Python
0.000001
67a3a0050c90c500c0c08a638436799df441c326
Add markov implementation
markov.py
markov.py
from nltk import word_tokenize, pos_tag import numpy import random from copy import deepcopy def compute_transitions(tokens, precondition=lambda token, last_token: True, order=1): last_tokens = [tokens[0]] transitions = dict() # count the occurences of "present | past" for token in tokens[1:]: past = tuple(last_tokens) if precondition(token, past[-1]): suffixes = [past[i:] for i in range(len(past))] for suffix in suffixes: if suffix not in transitions: transitions[suffix] = {token : 1} else: if token not in transitions[suffix]: transitions[suffix][token] = 1 else: transitions[suffix][token] += 1 last_tokens = last_tokens[1 if len(last_tokens) == order else 0:] last_tokens.append(token) # compute probabilities for transition_counts in transitions.values(): summed_occurences = sum(transition_counts.values()) for token in transition_counts.keys(): transition_counts[token] /= summed_occurences # ensure there is a probability for token in tokens: if (token,) not in transitions: transitions[(token,)] = {token: 1} return transitions def compute_token_probabilities(pos_tagged_tokens): token_probabilities = dict() for item in pos_tagged_tokens: if item[1] not in token_probabilities: token_probabilities[item[1]] = {item[0]: 1} else: if item[0] not in token_probabilities[item[1]]: token_probabilities[item[1]][item[0]] = 1 else: token_probabilities[item[1]][item[0]] += 1 for probabilities in token_probabilities.values(): summed_occurences = sum(probabilities.values()) for token in probabilities.keys(): probabilities[token] /= summed_occurences return token_probabilities def _weighted_choice(item_probabilities, value_to_probability=lambda x:x, probability_sum=1): """ Expects a list of (item, probability)-tuples and the sum of all probabilities and returns one entry weighted at random """ random_value = random.random()*probability_sum summed_probability = 0 for item, value in item_probabilities: summed_probability += value_to_probability(value) if summed_probability > random_value: return item def generate_text(transitions, start_symbol, count, symbol_to_token=lambda x:x, precondition=lambda x: True, order=1): last_symbols = [start_symbol] generated_tokens = [] for i in range(1, count): new_symbol = generate_next_token(transitions, tuple(last_symbols[-i if i < order else -order:]), precondition) last_symbols = last_symbols[1 if len(last_symbols) == order else 0:] last_symbols.append(new_symbol) generated_tokens.append(symbol_to_token(new_symbol)) return generated_tokens def generate_next_token(transitions, past, precondition=lambda x: True): for key in [past[i:] for i in range(len(past))]: if key in transitions: possible_transitions = deepcopy(transitions[key]) for key in transitions[key].keys(): if not precondition(key): del possible_transitions[key] return _weighted_choice(possible_transitions.items(), probability_sum=sum(possible_transitions.values())) def lexicographic_markov(input, count, order=1): tokens = word_tokenize(input) pos_tagged_tokens = pos_tag(tokens) symbol_transitions = compute_transitions([x[1] for x in pos_tagged_tokens]) token_probabilities = compute_token_probabilities(pos_tagged_tokens) return generate_text(symbol_transitions, random.choice([x[1] for x in pos_tagged_tokens]), count, lambda symbol: _weighted_choice(token_probabilities[symbol].items()), order)
Python
0
58cb5bde9c658e7b5fc7a7c946951e8abaade5e4
Check against sixtrack in different file
examples/python/test_workflow_footprint/001_checks_against_sixtrack.py
examples/python/test_workflow_footprint/001_checks_against_sixtrack.py
import pickle import numpy as np import pysixtrack import sixtracktools # Load machine with open('line.pkl', 'rb') as fid: pbline = pickle.load(fid) line = pysixtrack.Line.fromline(pbline) # Load particle on CO with open('particle_on_CO.pkl', 'rb') as fid: part_on_CO = pysixtrack.Particles.from_dict( pickle._load(fid)) # Load iconv with open('iconv.pkl', 'rb') as fid: iconv = pickle.load(fid) # Load sixtrack tracking data sixdump_all = sixtracktools.SixDump101('res/dump3.dat') # Assume first particle to be on the closed orbit Nele_st = len(iconv) sixdump_CO = sixdump_all[::2][:Nele_st] # Compute closed orbit using tracking closed_orbit = line.track_elem_by_elem(part_on_CO) # Check that closed orbit is closed pstart = closed_orbit[0].copy() pstart_st = pysixtrack.Particles(**sixdump_CO[0].get_minimal_beam()) print('STsigma, Sigma, Stdelta, delta, Stpx, px') for iturn in range(10): line.track(pstart) line.track(pstart_st) print('%e, %e, %e, %e, %e, %e' % (pstart_st.sigma, pstart.sigma, pstart_st.delta, pstart.delta, pstart_st.px, pstart.px)) # Compare closed orbit against sixtrack for att in 'x px y py delta sigma'.split(): att_CO = np.array([getattr(pp, att) for pp in closed_orbit]) att_CO_at_st_ele = att_CO[iconv] print('Max C.O. discrepancy in %s %.2e' % (att, np.max(np.abs(att_CO_at_st_ele-getattr(sixdump_CO, att))))) # Compare tracking results sixdump = sixdump_all[1::2] # Particle with deviation from CO # sixdump = sixdump_all[::2] # Particle on CO p_in_st = pysixtrack.Particles(**sixdump[0].get_minimal_beam()) p_out_st = pysixtrack.Particles(**sixdump[1].get_minimal_beam()) p_in_pyst = p_in_st.copy() p_out_pyst = p_in_pyst.copy() for att in 'x px y py delta sigma'.split(): attin = getattr(p_in_st, att) attout = getattr(p_out_st, att) print('SxTr: Change in '+att+': %e' % (attout-attin)) attin_pyst = getattr(p_in_pyst, att) attout_pyst = getattr(p_out_pyst, att) print('PyST: Change in '+att+': %e' % (attout_pyst-attin_pyst)) def compare(prun, pbench, pbench_prev): out = [] out_rel = [] error = False for att in 'x px y py delta sigma'.split(): vrun = getattr(prun, att) vbench = getattr(pbench, att) vbench_prev = getattr(pbench_prev, att) diff = vrun-vbench diffrel = abs(1.-abs(vrun-vbench_prev)/abs(vbench-vbench_prev)) out.append(abs(diff)) out_rel.append(diffrel) print(f"{att:<5} {vrun:22.13e} {vbench:22.13e} {diff:22.13g} {diffrel:22.13g}") if diffrel > 1e-8 or np.isnan(diffrel): if diff > 1e-11: print('Too large discrepancy!') error = True print(f"\nmax {max(out):21.12e} maxrel {max(out_rel):22.12e}") return error print("") for ii in range(1, len(iconv)): jja = iconv[ii-1] jjb = iconv[ii] prun = pysixtrack.Particles(**sixdump[ii-1].get_minimal_beam()) pbench_prev = prun.copy() print(f"\n-----sixtrack={ii} sixtracklib={jja} --------------") #print(f"pysixtr {jja}, x={prun.x}, px={prun.px}") for jj in range(jja+1, jjb+1): label = line.element_names[jj] elem = line.elements[jj] pin = prun.copy() elem.track(prun) print(f"{jj} {label},{str(elem)[:50]}") pbench = pysixtrack.Particles(**sixdump[ii].get_minimal_beam()) #print(f"sixdump {ii}, x={pbench.x}, px={pbench.px}") print("-----------------------") error = compare(prun, pbench, pbench_prev) print("-----------------------\n\n") if error: print('Error detected') break
Python
0
8fa7120606e206d08acbad198e253ea428eef584
Add tests for inline list compilation
tests/compiler/test_inline_list_compilation.py
tests/compiler/test_inline_list_compilation.py
import pytest from tests.compiler import compile_snippet, internal_call, STATIC_START, LOCAL_START from thinglang.compiler.errors import NoMatchingOverload, InvalidReference from thinglang.compiler.opcodes import OpcodePopLocal, OpcodePushStatic def test_inline_list_compilation(): assert compile_snippet('list<number> numbers = [1, 2, 3]') == [ OpcodePushStatic(STATIC_START), # Push the values OpcodePushStatic(STATIC_START + 1), OpcodePushStatic(STATIC_START + 2), internal_call('list.__constructor__'), # Create the list internal_call('list.append'), # Compile 3 append calls internal_call('list.append'), internal_call('list.append'), OpcodePopLocal(LOCAL_START) ] def test_inline_list_type_homogeneity(): with pytest.raises(NoMatchingOverload): assert compile_snippet('list<number> numbers = [1, Container(), 3]') def test_inline_list_declaration_type_match(): with pytest.raises(InvalidReference): assert compile_snippet('list<number> numbers = [Container(), Container(), Container()]')
Python
0
142cb17be1c024839cd972071b2f9665c87ed5f1
Update downloadable clang to r338452
third_party/clang_toolchain/download_clang.bzl
third_party/clang_toolchain/download_clang.bzl
""" Helpers to download a recent clang release.""" def _get_platform_folder(os_name): os_name = os_name.lower() if os_name.startswith("windows"): return "Win" if os_name.startswith("mac os"): return "Mac" if not os_name.startswith("linux"): fail("Unknown platform") return "Linux_x64" def _download_chromium_clang( repo_ctx, platform_folder, package_version, sha256, out_folder): cds_url = "https://commondatastorage.googleapis.com/chromium-browser-clang" cds_file = "clang-%s.tgz" % package_version cds_full_url = "{0}/{1}/{2}".format(cds_url, platform_folder, cds_file) repo_ctx.download_and_extract(cds_full_url, output = out_folder, sha256 = sha256) def download_clang(repo_ctx, out_folder): """ Download a fresh clang release and put it into out_folder. Clang itself will be located in 'out_folder/bin/clang'. We currently download one of the latest releases of clang by the Chromium project (see https://chromium.googlesource.com/chromium/src/+/master/docs/clang.md). Args: repo_ctx: An instance of repository_context object. out_folder: A folder to extract the compiler into. """ # TODO(ibiryukov): we currently download and extract some extra tools in the # clang release (e.g., sanitizers). We should probably remove the ones # we don't need and document the ones we want provide in addition to clang. # Latest CLANG_REVISION and CLANG_SUB_REVISION of the Chromiums's release # can be found in https://chromium.googlesource.com/chromium/src/tools/clang/+/master/scripts/update.py CLANG_REVISION = "338452" CLANG_SUB_REVISION = 1 package_version = "%s-%s" % (CLANG_REVISION, CLANG_SUB_REVISION) checksums = { "Linux_x64": "213ba23a0a9855ede5041f66661caa9c5c59a573ec60b82a31839f9a97f397bf", "Mac": "4267774201f8cb50c25e081375e87038d58db80064a20a0d9d7fe57ea4357ece", "Win": "a8a5d5b25443c099e2c20d1a0cdce2f1d17e2dba84de66a6dc6a239ce3e78c34", } platform_folder = _get_platform_folder(repo_ctx.os.name) _download_chromium_clang( repo_ctx, platform_folder, package_version, checksums[platform_folder], out_folder, )
""" Helpers to download a recent clang release.""" def _get_platform_folder(os_name): os_name = os_name.lower() if os_name.startswith("windows"): return "Win" if os_name.startswith("mac os"): return "Mac" if not os_name.startswith("linux"): fail("Unknown platform") return "Linux_x64" def _download_chromium_clang( repo_ctx, platform_folder, package_version, sha256, out_folder): cds_url = "https://commondatastorage.googleapis.com/chromium-browser-clang" cds_file = "clang-%s.tgz" % package_version cds_full_url = "{0}/{1}/{2}".format(cds_url, platform_folder, cds_file) repo_ctx.download_and_extract(cds_full_url, output = out_folder, sha256 = sha256) def download_clang(repo_ctx, out_folder): """ Download a fresh clang release and put it into out_folder. Clang itself will be located in 'out_folder/bin/clang'. We currently download one of the latest releases of clang by the Chromium project (see https://chromium.googlesource.com/chromium/src/+/master/docs/clang.md). Args: repo_ctx: An instance of repository_context object. out_folder: A folder to extract the compiler into. """ # TODO(ibiryukov): we currently download and extract some extra tools in the # clang release (e.g., sanitizers). We should probably remove the ones # we don't need and document the ones we want provide in addition to clang. # Latest CLANG_REVISION and CLANG_SUB_REVISION of the Chromiums's release # can be found in https://chromium.googlesource.com/chromium/src/tools/clang/+/master/scripts/update.py CLANG_REVISION = "336424" CLANG_SUB_REVISION = 1 package_version = "%s-%s" % (CLANG_REVISION, CLANG_SUB_REVISION) checksums = { "Linux_x64": "2ea97e047470da648f5d078af008bce6891287592382cee3d53a1187d996da94", "Mac": "c6e28909cce63ee35e0d51284d9f0f6e8838f7fb8b7a0dc9536c2ea900552df0", "Win": "1299fda7c4378bfb81337f7e5f351c8a1f953f51e0744e2170454b8d722f3db7", } platform_folder = _get_platform_folder(repo_ctx.os.name) _download_chromium_clang( repo_ctx, platform_folder, package_version, checksums[platform_folder], out_folder, )
Python
0
3ddf0f0fead6018b5c313253a0df2165452cfb6e
Add shared babel init code
src/eduid_common/api/translation.py
src/eduid_common/api/translation.py
# -*- coding: utf-8 -*- from flask import request from flask_babel import Babel __author__ = 'lundberg' def init_babel(app): babel = Babel(app) app.babel = babel @babel.localeselector def get_locale(): # if a user is logged in, use the locale from the user settings # XXX: TODO # otherwise try to guess the language from the user accept # header the browser transmits. The best match wins. return request.accept_languages.best_match(app.config.get('SUPPORTED_LANGUAGES')) return app
Python
0
7db0b2ea9830ef2dad74b34ab0fd4cd9f5a5eca1
add test/lorenz_example.py: solving the Lorenz system using a custom Python kernel
test/lorenz_example.py
test/lorenz_example.py
# -*- coding: utf-8 -*- # # Usage example: solve the Lorenz system using a custom Python kernel. from __future__ import division, print_function import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d.axes3d import Axes3D from pydgq.solver.types import DTYPE from pydgq.solver.galerkin import init from pydgq.solver.kernel_interface import PythonKernel import pydgq.solver.odesolve from pydgq.utils.discontify import discontify # for plotting dG results ##################### # config ##################### q = 2 # degree of basis for dG and cG # How many visualization (interpolation) points to use within each timestep for Galerkin methods. # # Note that the dG solution has the best accuracy at the endpoint of the timestep; # to compare apples-to-apples with classical integrators, this should be set to 1. # # Larger values (e.g. 11) are useful for visualizing the behavior of the dG solution inside # the timestep (something the classical integrators do not model at all). # nt_vis_galerkin = 11 nt = 3500 # number of timesteps dt = 0.1 # timestep size save_from = 0 # see pydgq.solver.odesolve.ivp() ##################### # custom kernel ##################### # A kernel for the Lorenz system. # # The custom kernel only needs to override callback(); even __init__ is not strictly needed, # unless adding some custom parameters (like here). # class LorenzKernel(PythonKernel): def __init__(self, rho, sigma, beta): # super PythonKernel.__init__(self, n=3) # custom init self.rho = rho self.sigma = sigma self.beta = beta def callback(self, t): # dxdt = sigma (y - x) # dydt = x (rho - z) - y # dzdt = x y - beta z self.out[0] = self.sigma * (self.w[1] - self.w[0]) self.out[1] = self.w[0] * (self.rho - self.w[2]) - self.w[1] self.out[2] = self.w[0] * self.w[1] - self.beta * self.w[2] # this is nonlinear, so we can't use a built-in linear kernel ##################### # main program ##################### def test(integrator, nt_vis): n_saved_timesteps = pydgq.solver.odesolve.n_saved_timesteps( nt, save_from ) result_len = pydgq.solver.odesolve.result_len( nt, save_from, interp=nt_vis ) startj,endj = pydgq.solver.odesolve.timestep_boundaries( nt, save_from, interp=nt_vis ) n = 3 # the Lorenz system has 3 DOFs # we use the same values as in the example at https://en.wikipedia.org/wiki/Lorenz_system # rho = 28. sigma = 10. beta = 8./3. # set IC # w0 = np.empty( (n,), dtype=DTYPE, order="C" ) w0[0] = 0. w0[1] = 2. w0[2] = 20. # instantiate kernel rhs = LorenzKernel(rho=rho, sigma=sigma, beta=beta) # create output arrays ww = None #np.empty( (result_len,n), dtype=DTYPE, order="C" ) # result array for w; if None, will be created by ivp() ff = np.empty( (result_len,n), dtype=DTYPE, order="C" ) # optional, result array for w', could be None fail = np.empty( (n_saved_timesteps,), dtype=np.intc, order="C" ) # optional, fail flag for each timestep, could be None # solve problem ww,tt = pydgq.solver.odesolve.ivp( integrator=integrator, allow_denormals=False, w0=w0, dt=dt, nt=nt, save_from=save_from, interp=nt_vis, rhs=rhs, ww=ww, ff=ff, fail=fail, maxit=10 ) # visualize # # http://matplotlib.org/mpl_toolkits/mplot3d/tutorial.html # print( "** Plotting solution **" ) fig = plt.figure(1) plt.clf() # Axes3D has a tendency to underestimate how much space it needs; it draws its labels # outside the window area in certain orientations. # # This causes the labels to be clipped, which looks bad. We prevent this by creating the axes # in a slightly smaller rect (leaving a margin). This way the labels will show - outside the Axes3D, # but still inside the figure window. # # The final touch is to set the window background to a matching white, so that the # background of the figure appears uniform. # fig.patch.set_color( (1,1,1) ) fig.patch.set_alpha( 1.0 ) x0y0wh = [ 0.02, 0.02, 0.96, 0.96 ] # left, bottom, width, height (here as fraction of subplot area) ax = Axes3D(fig, rect=x0y0wh) # show the discontinuities at timestep boundaries if using dG (and actually have something to draw within each timestep) if integrator == "dG" and nt_vis > 1: tt = discontify( tt, endj - 1, fill="nan" ) wtmp = np.empty( (tt.shape[0],n), dtype=DTYPE, order="C" ) for j in range(n): # we need the copy() to get memory-contiguous data for discontify() to process wtmp[:,j] = discontify( ww[:,j].copy(), endj - 1, fill="nan" ) ax.plot( wtmp[:,0], wtmp[:,1], wtmp[:,2], linewidth=0.5 ) else: ax.plot( ww[:,0], ww[:,1], ww[:,2], linewidth=0.5 ) plt.grid(b=True, which="both") plt.axis("tight") ax.set_xlabel(r"$x$") ax.set_ylabel(r"$y$") ax.set_zlabel(r"$z$") plt.suptitle(r"Trajectory: $\rho = %g$, $\sigma = %g$, $\beta = %g$, $x_0 = %g$, $y_0 = %g$, $z_0 = %g$" % (rho, sigma, beta, w0[0], w0[1], w0[2])) if __name__ == '__main__': print("** Solving the Lorenz system **") nt_vis = nt_vis_galerkin init(q=q, method="dG", nt_vis=nt_vis, rule=None) test(integrator="dG", nt_vis=nt_vis) plt.show()
Python
0.000001
30bca45e1ac9fc6953728950695135b491403215
Add test for logical constant folding.
tests/basics/logic_constfolding.py
tests/basics/logic_constfolding.py
# tests logical constant folding in parser def f_true(): print('f_true') return True def f_false(): print('f_false') return False print(0 or False) print(1 or foo) print(f_false() or 1 or foo) print(f_false() or 1 or f_true()) print(0 and foo) print(1 and True) print(f_true() and 0 and foo) print(f_true() and 1 and f_false()) print(not 0) print(not False) print(not 1) print(not True) print(not not 0) print(not not 1)
Python
0
5b3863c90d4bc07bbc170fc213b4a4c46b3ddc01
Test setting selinux context on lost+found (#1038146)
tests/formats_test/selinux_test.py
tests/formats_test/selinux_test.py
#!/usr/bin/python import os import selinux import tempfile import unittest from devicelibs_test import baseclass from blivet.formats import device_formats import blivet.formats.fs as fs class SELinuxContextTestCase(baseclass.DevicelibsTestCase): """Testing SELinux contexts. """ @unittest.skipUnless(os.geteuid() == 0, "requires root privileges") def testMountingExt2FS(self): _LOOP_DEV0 = self._loopMap[self._LOOP_DEVICES[0]] an_fs = fs.Ext2FS(device=_LOOP_DEV0, label="test") self.assertIsNone(an_fs.create()) mountpoint = tempfile.mkdtemp("test.selinux") an_fs.mount(mountpoint=mountpoint) root_selinux_context = selinux.getfilecon(mountpoint) lost_and_found = os.path.join(mountpoint, "lost+found") self.assertTrue(os.path.exists(lost_and_found)) lost_and_found_selinux_context = selinux.getfilecon(lost_and_found) an_fs.unmount() os.rmdir(mountpoint) self.assertEqual(root_selinux_context[1], 'system_u:object_r:file_t:s0') self.assertEqual(lost_and_found_selinux_context[1], 'system_u:object_r:lost_found_t:s0') @unittest.skipUnless(os.geteuid() == 0, "requires root privileges") def testMountingXFS(self): _LOOP_DEV0 = self._loopMap[self._LOOP_DEVICES[0]] an_fs = fs.XFS(device=_LOOP_DEV0, label="test") self.assertIsNone(an_fs.create()) mountpoint = tempfile.mkdtemp("test.selinux") an_fs.mount(mountpoint=mountpoint) root_selinux_context = selinux.getfilecon(mountpoint) lost_and_found = os.path.join(mountpoint, "lost+found") self.assertFalse(os.path.exists(lost_and_found)) an_fs.unmount() os.rmdir(mountpoint) self.assertEqual(root_selinux_context[1], 'system_u:object_r:file_t:s0') def suite(): suite1 = unittest.TestLoader().loadTestsFromTestCase(SELinuxContextTestCase) return unittest.TestSuite([suite1]) if __name__ == "__main__": unittest.main()
Python
0
68d620d56625c4c1bd30a30f31840d9bd440b29e
Add find_objects test module
tests/plantcv/test_find_objects.py
tests/plantcv/test_find_objects.py
import cv2 import numpy as np from plantcv.plantcv import find_objects def test_find_objects(test_data): # Read in test data img = cv2.imread(test_data.small_rgb_img) mask = cv2.imread(test_data.small_bin_img, -1) cnt, _ = test_data.load_contours(test_data.small_contours_file) contours, _ = find_objects(img=img, mask=mask) # Assert contours match test data assert np.all(cnt) == np.all(contours) def test_find_objects_grayscale_input(test_data): # Read in test data img = cv2.imread(test_data.small_gray_img, -1) mask = cv2.imread(test_data.small_bin_img, -1) cnt, _ = test_data.load_contours(test_data.small_contours_file) contours, _ = find_objects(img=img, mask=mask) # Assert contours match test data assert np.all(cnt) == np.all(contours)
Python
0.000002
36033be962fcc3e97d14dd06b42bcd3be52a97c5
Add floting_point.py
parser/sample/floting_point.py
parser/sample/floting_point.py
import logging from lex_tokens import LexToken from ply.yacc import yacc class FloatingPointParser(object): class FloatingPointSyntaxError(Exception): pass def __init__(self, debug=False): if debug: self._log = logging.getLogger('PhysicalDivideCharParser') else: self._log = yacc.NullLogger() self._lex = LexToken(debug) self.tokens = self._lex.tokens self._parser = yacc.yacc(module=self, debug=debug, debuglog=self._log) def p_floating_point(self, p): 'expression : floating' p[0] = p[1] def p_floating_1(self, p): 'floating : single_num DOT single_num' p[0] = p[1] + p[2] + p[3] def p_floating_2(self, p): 'floating : single_num dot_char single_num' p[0] = p[1] + p[2] + p[3] def p_floating_3(self, p): 'floating : single_num' p[0] = p[1] def p_divid_dot(self, p): 'dot_char : DOT' p[0] = p[1] def p_sign1(self, p): 'single_num : NUMBER' p[0] = str(p[1]) def p_sign2(self, p): 'single_num : MINUS NUMBER' p[0] = p[1] + str(p[2]) def p_error(self, p): if p is None: # End-of-file raise self.FloatingPointSyntaxError('Parsing error (%s)' % self.__expr_text) err_msg = 'token type: {}, value: {}'.format(p.type, p.value) raise self.FloatingPointSyntaxError(err_msg) def parse(self, s): self.__expr_text = s try: return self._parser.parse(s, lexer=self._lex.lexer()) except self.FloatingPointSyntaxError: print "NOT Matched" return None if __name__ == '__main__': header_parser = FloatingPointParser() data = '5.6' data = '- 5.6' data = 'VERSION 5.6 ;' data = '5' data = '-5' print header_parser.parse(data)
Python
0.000001
be17cf90b06a118d579c0211dd3bc2d45433fb2d
Write unit tests for _handle_long_response
tests/test_handle_long_response.py
tests/test_handle_long_response.py
import context class TestHandleLongResponse(context.slouch.testing.CommandTestCase): bot_class = context.TimerBot config = {'start_fmt': '{:%Y}', 'stop_fmt': '{.days}'} normal_text = "@genericmention: this is generic mention message contains a URL <http://foo.com/>\n@genericmention: this generic mention message contains a :fast_parrot: and :nyancat_big:\n" over_limit_text = normal_text * 50 # 8550 chars def test_handle_long_message_api(self): _res = { 'type': 'message', 'text': self.normal_text, 'channel': None, } responses = self.bot._handle_long_response(_res) self.assertEqual(len(responses), 1) self.assertEqual(responses, [{ 'type': 'message', 'text': self.normal_text, 'channel': None }]) def test_handle_long_message_over_limit_api(self): _res = { 'type': 'message', 'text': self.over_limit_text, 'channel': None, } responses = self.bot._handle_long_response(_res) self.assertEqual([len(r['text']) for r in responses], [3932, 3933, 685]) self.assertEqual(len(responses), 3) def test_handle_long_message_rtm(self): responses = self.bot._handle_long_response(self.normal_text) self.assertEqual(responses, [self.normal_text]) self.assertEqual(len(responses), 1) def test_handle_long_message_over_limit_rtm(self): responses = self.bot._handle_long_response(self.over_limit_text) self.assertEqual([len(r) for r in responses], [3932, 3933, 685]) self.assertEqual(len(responses), 3)
Python
0.000064
feafe480d651ee6b58a1631f4eb4533f63ea6ad4
Add user tests
tests/api/test_user.py
tests/api/test_user.py
from unittest import mock from groupy.api import user from .base import get_fake_response from .base import TestCase class UserTests(TestCase): def setUp(self): self.m_session = mock.Mock() self.m_session.get.return_value = get_fake_response(data={'id': 'foo'}) self.user = user.User(self.m_session) def test_id_is_foo(self): self.assertEqual(self.user.me['id'], 'foo') @mock.patch('groupy.api.user.blocks') def test_blocks_uses_id(self, m_blocks): self.user.blocks (__, id_), __ = m_blocks.Blocks.call_args self.assertEqual(id_, 'foo') def test_update(self): data = {'bar': 'foo'} self.m_session.post.return_value = get_fake_response(data=data) result = self.user.update(foo='bar') self.assertEqual(result, data) class SmsModeTests(TestCase): def setUp(self): self.m_session = mock.Mock() self.sms_mode = user.SmsMode(self.m_session) self.m_session.post.return_value = mock.Mock(ok=True) class EnableSmsModeTests(SmsModeTests): def setUp(self): super().setUp() self.result = self.sms_mode.enable(duration=42) def test_result_is_True(self): self.assertTrue(self.result) def test_payload_is_correct(self): self.assert_kwargs(self.m_session.post, json={'duration': 42}) class EnableSmsModeWithRegistrationTests(SmsModeTests): def setUp(self): super().setUp() self.result = self.sms_mode.enable(duration=42, registration_id=420) def test_result_is_True(self): self.assertTrue(self.result) def test_payload_is_correct(self): payload = {'duration': 42, 'registration_id': 420} self.assert_kwargs(self.m_session.post, json=payload) class DisableSmsModeTests(SmsModeTests): def setUp(self): super().setUp() self.result = self.sms_mode.disable() def test_result_is_True(self): self.assertTrue(self.result)
Python
0.000001
063899021158fe872745b335595b3094db9834d8
Add a test for 'version.
pycket/test/test_version.py
pycket/test/test_version.py
#! /usr/bin/env python # -*- coding: utf-8 -*- # # Test the version here. # import pytest from pycket.test.testhelper import check_equal EXPECTED_VERSION='6.1.1.8' def test_version(): check_equal('(version)', '"%s"' % EXPECTED_VERSION) # EOF
Python
0.000055
e940963a6372a4de1a4a28eff1854716f47471e5
Add deploy script
conda-recipe/deploy.py
conda-recipe/deploy.py
#!/usr/bin/env python """ Deploy dbcollection to pypi and conda. """ import os import shutil import subprocess # PyPi print('PyPi: Upload sdist...') msg1 = subprocess.run(["python", 'setup.py', 'sdist', 'upload'], stdout=subprocess.PIPE) print('PyPi: Upload bdist_wheel...') msg2 = subprocess.run(["python", 'setup.py', 'bdist_wheel', 'upload'], stdout=subprocess.PIPE) # Conda python_versions = ["2.7", "3.5", "3.6"] for i, pyver in enumerate(python_versions): print('\nAnaconda: Start build {}/{}'.format(i+1, len(python_versions))) print(' > Python version: {}'.format(pyver)) temp_output_dir = 'output_build' print(' > Saving artifacts to dir: {}'.format(temp_output_dir)) if os.path.exists(temp_output_dir): shutil.rmtree(temp_output_dir, ignore_errors=True) # build conda print(' > Build conda recipe...') cmd = ["conda", 'build', '--python={}'.format(pyver), '--no-anaconda-upload', 'conda-recipe'] msg = subprocess.run(cmd, stdout=subprocess.PIPE) # parse string message print(' > Parse conda artifact file name + path...') msg_s = str(msg) str_ini = "If you want to upload package(s) to anaconda.org later, type:\\n\\nanaconda upload " str_end = "\\n\\n# To have conda build upload to anaconda.org automatically" ini_id = msg_s.find(str_ini) + len(str_ini) end_id = msg_s.find(str_end) artifact_fname = msg_s[ini_id:end_id] print(' > Artifact name: {}'.format(artifact_fname)) # convert to all platforms print(' > Convert artifact to all platforms...') msg = subprocess.run(["conda", 'convert', "-p", "all", artifact_fname, "-o", temp_output_dir], stdout=subprocess.PIPE) # upload to anaconda print(' > Upload all artifact to all platforms...') print(' -- Uploading artifact: {}'.format(artifact_fname)) msg_upload = subprocess.run(["anaconda", "upload", artifact_fname], stdout=subprocess.PIPE) for root, dirs, files in os.walk(temp_output_dir): if any(files): for fname in files: if fname.endswith('.tar.bz2'): print(' -- Uploading artifact: {} '.format(root + '/' + fname)) msg = subprocess.run(["anaconda", 'upload', root + '/' + fname], stdout=subprocess.PIPE) print('\nRemoving temp dir: {}'.format(temp_output_dir)) if os.path.exists(temp_output_dir): shutil.rmtree(temp_output_dir, ignore_errors=True)
Python
0.000001
b52ba28a8315a0cdeda7593d087607f582f77f18
Create __init__.py
model/__init__.py
model/__init__.py
__version__='0.0.0'
Python
0.000429
721720b1f4d63f1368714f764794c8d406e4982d
Add to_data test
tests/test_firebase.py
tests/test_firebase.py
import pytest import linkatos.firebase as fb def test_to_data(): url = 'https://foo.com' data = {'url': 'https://foo.com'} assert fb.to_data(url) == data
Python
0.000004
6bf9fce3bcc3e13ec252f5f858d70e177577c453
add Level 05
pythonchallenge/level_05.py
pythonchallenge/level_05.py
import unittest import requests import logging import re import pickle # Default is warning, it's to suppress requests INFO log logging.basicConfig(format='%(message)s') def solution(): url = "http://www.pythonchallenge.com/pc/def/banner.p" banner = pickle.loads(requests.get(url).text) ret = [] for g in banner: line = '' for c, count_c in g: line += c * count_c ret.append(line) return ret class SolutionTest(unittest.TestCase): def setUp(self): self.prefix = "http://www.pythonchallenge.com/pc/def/" self.suffix = ".html" def test_solution(self): actual = solution() # It would be identified by pep8, but this is ascii art, who cares! expected = [' ', ' ##### ##### ', ' #### #### ', ' #### #### ', ' #### #### ', ' #### #### ', ' #### #### ', ' #### #### ', ' #### #### ', ' ### #### ### ### ##### ### ##### ### ### #### ', ' ### ## #### ####### ## ### #### ####### #### ####### ### ### #### ', ' ### ### ##### #### ### #### ##### #### ##### #### ### ### #### ', ' ### #### #### ### ### #### #### #### #### ### #### #### ', ' ### #### #### ### #### #### #### #### ### ### #### ', '#### #### #### ## ### #### #### #### #### #### ### #### ', '#### #### #### ########## #### #### #### #### ############## #### ', '#### #### #### ### #### #### #### #### #### #### #### ', '#### #### #### #### ### #### #### #### #### #### #### ', ' ### #### #### #### ### #### #### #### #### ### #### ', ' ### ## #### #### ### #### #### #### #### #### ### ## #### ', ' ### ## #### #### ########### #### #### #### #### ### ## #### ', ' ### ###### ##### ## #### ###### ########### ##### ### ######', ' '] self.assertEquals(actual, expected) origin_url = ''.join([self.prefix, 'channel', self.suffix]) try: r = requests.get(origin_url) except: raise self.assertTrue(r.ok) next_entry = [re.sub(r'(.*)URL=(.*)\.html\"\>', r'\2', line) for line in r.iter_lines() if re.match(r'.*URL.*', line)] r.close() if len(next_entry) != 0: r = requests.get( ''.join([self.prefix, next_entry[0], self.suffix])) logging.warn('Level 06 is %s' % r.url) else: logging.warn('Level 06 is %s' % origin_url) if __name__ == "__main__": unittest.main(failfast=True)
Python
0
4b2df28a979312875d5a72a1713f535b0e34a1e6
fix config mocking.
solr/test_solr.py
solr/test_solr.py
# (C) Datadog, Inc. 2010-2016 # All rights reserved # Licensed under Simplified BSD License (see LICENSE) # stdlib import threading import time from types import ListType import unittest import mock # 3p from nose.plugins.attrib import attr import logging # project from aggregator import MetricsAggregator from tests.checks.common import Fixtures LOG_INFO = { 'log_level': None, 'log_to_event_viewer': False, 'log_to_syslog': False, 'syslog_host': None, 'syslog_port': None, 'log_level': logging.INFO, 'disable_file_logging': True, 'collector_log_file': '/var/log/datadog/collector.log', 'forwarder_log_file': '/var/log/datadog/forwarder.log', 'dogstatsd_log_file': '/var/log/datadog/dogstatsd.log', 'jmxfetch_log_file': '/var/log/datadog/jmxfetch.log', 'go-metro_log_file': '/var/log/datadog/go-metro.log', } with mock.patch('config.get_logging_config', return_value=LOG_INFO): from dogstatsd import Server from jmxfetch import JMXFetch STATSD_PORT = 8127 class DummyReporter(threading.Thread): def __init__(self, metrics_aggregator): threading.Thread.__init__(self) self.finished = threading.Event() self.metrics_aggregator = metrics_aggregator self.interval = 10 self.metrics = None self.finished = False self.start() def run(self): while not self.finished: time.sleep(self.interval) self.flush() def flush(self): metrics = self.metrics_aggregator.flush() if metrics: self.metrics = metrics @attr(requires='solr') class JMXTestCase(unittest.TestCase): def setUp(self): aggregator = MetricsAggregator("test_host") self.server = Server(aggregator, "localhost", STATSD_PORT) self.reporter = DummyReporter(aggregator) self.t1 = threading.Thread(target=self.server.start) self.t1.start() confd_path = Fixtures.directory() self.jmx_daemon = JMXFetch(confd_path, {'dogstatsd_port': STATSD_PORT}) self.t2 = threading.Thread(target=self.jmx_daemon.run) self.t2.start() def tearDown(self): self.server.stop() self.reporter.finished = True self.jmx_daemon.terminate() def testTomcatMetrics(self): count = 0 while self.reporter.metrics is None: time.sleep(1) count += 1 if count > 25: raise Exception("No metrics were received in 25 seconds") metrics = self.reporter.metrics self.assertTrue(isinstance(metrics, ListType)) self.assertTrue(len(metrics) > 8, metrics) self.assertEquals(len([t for t in metrics if 'instance:solr_instance' in t['tags'] and t['metric'] == "jvm.thread_count"]), 1, metrics) self.assertTrue(len([t for t in metrics if "jvm." in t['metric'] and 'instance:solr_instance' in t['tags']]) > 4, metrics) self.assertTrue(len([t for t in metrics if "solr." in t['metric'] and 'instance:solr_instance' in t['tags']]) > 4, metrics)
# (C) Datadog, Inc. 2010-2016 # All rights reserved # Licensed under Simplified BSD License (see LICENSE) # stdlib import threading import time from types import ListType import unittest # 3p from nose.plugins.attrib import attr # project from aggregator import MetricsAggregator from dogstatsd import Server from jmxfetch import JMXFetch from shared.test.common import Fixtures STATSD_PORT = 8127 class DummyReporter(threading.Thread): def __init__(self, metrics_aggregator): threading.Thread.__init__(self) self.finished = threading.Event() self.metrics_aggregator = metrics_aggregator self.interval = 10 self.metrics = None self.finished = False self.start() def run(self): while not self.finished: time.sleep(self.interval) self.flush() def flush(self): metrics = self.metrics_aggregator.flush() if metrics: self.metrics = metrics @attr(requires='solr') class JMXTestCase(unittest.TestCase): def setUp(self): aggregator = MetricsAggregator("test_host") self.server = Server(aggregator, "localhost", STATSD_PORT) self.reporter = DummyReporter(aggregator) self.t1 = threading.Thread(target=self.server.start) self.t1.start() confd_path = Fixtures.directory() self.jmx_daemon = JMXFetch(confd_path, {'dogstatsd_port': STATSD_PORT}) self.t2 = threading.Thread(target=self.jmx_daemon.run) self.t2.start() def tearDown(self): self.server.stop() self.reporter.finished = True self.jmx_daemon.terminate() def testTomcatMetrics(self): count = 0 while self.reporter.metrics is None: time.sleep(1) count += 1 if count > 25: raise Exception("No metrics were received in 25 seconds") metrics = self.reporter.metrics self.assertTrue(isinstance(metrics, ListType)) self.assertTrue(len(metrics) > 8, metrics) self.assertEquals(len([t for t in metrics if 'instance:solr_instance' in t['tags'] and t['metric'] == "jvm.thread_count"]), 1, metrics) self.assertTrue(len([t for t in metrics if "jvm." in t['metric'] and 'instance:solr_instance' in t['tags']]) > 4, metrics) self.assertTrue(len([t for t in metrics if "solr." in t['metric'] and 'instance:solr_instance' in t['tags']]) > 4, metrics)
Python
0
fe37335645993ad10c9902aaaaf0ca2c53912d49
Create Average Movies rating etl
movies_avg_etl.py
movies_avg_etl.py
import pyspark spark = ( pyspark.sql.SparkSession.builder.appName("FromDatabase") .config("spark.driver.extraClassPath", "<driver_location>/postgresql-42.2.18.jar") .getOrCreate() ) # Read table from db using Spark JDBC def extract_movies_to_df(): movies_df = ( spark.read.format("jdbc") .option("url", "jdbc:postgresql://localhost:5432/etl_pipeline") .option("dbtable", "movies") .option("user", "<username") .option("password", "<password>") .option("driver", "org.postgresql.Driver") .load() ) return movies_df # Read users table from db using Spark JDBC def extract_users_to_df(): users_df = ( spark.read.format("jdbc") .option("url", "jdbc:postgresql://localhost:5432/etl_pipeline") .option("dbtable", "users") .option("user", "<username") .option("password", "<password>") .option("driver", "org.postgresql.Driver") .load() ) return users_df # transforming tables def transform_avg_ratings(movies_df, users_df): avg_rating = users_df.groupby("movie_id").mean("rating") # join movies_df and avg_rating table on id df = movies_df.join(avg_rating, movies_df.id == avg_rating.movies_id) df = df.drop("movie_id") return df # Write the result into avg_ratings table in db def load_df_to_db(df): mode = "overwrite" url = "jdbc:postgresql://localhost:5432/etl_pipeline" spark.write() properties = { "user": "<username>", "password": "<password>", "driver": "org.postgresql.Driver", } df.write.jdbc(url=url, table="avg_ratings", mode=mode, properties=properties) if __name__ == "__main__": movies_df = extract_movies_to_df() users_df = extract_users_to_df() ratings_df = transform_avg_ratings(movies_df, users_df) load_df_to_db(ratings_df)
Python
0
c95bfb10f87bd0a637d0ad790d484b7957441371
Add WSGI support.
pypi.wsgi
pypi.wsgi
#!/usr/bin/python import sys,os prefix = os.path.dirname(__file__) sys.path.insert(0, prefix) import cStringIO, webui, store, config store.keep_conn = True class Request: def __init__(self, environ, start_response): self.start_response = start_response self.rfile = cStringIO.StringIO(environ['wsgi.input'].read()) self.wfile = cStringIO.StringIO() self.config = config.Config(prefix+'/config.ini', 'webui') def send_response(self, code, message=''): self.status = '%s %s' % (code, message) self.headers = [] def send_header(self, keyword, value): self.headers.append((keyword, value)) def set_content_type(self, content_type): self.send_header('Content-Type', content_type) def end_headers(self): self.start_response(self.status, self.headers) def debug(environ, start_response): if environ['PATH_INFO'].startswith("/auth") and \ "HTTP_AUTHORIZATION" not in environ: start_response("401 login", [('WWW-Authenticate', 'Basic realm="foo"')]) return start_response("200 ok", [('Content-type', 'text/plain')]) environ = environ.items() environ.sort() for k,v in environ: yield "%s=%s\n" % (k, v) return def application(environ, start_response): if "HTTP_AUTHORIZATION" in environ: environ["HTTP_CGI_AUTHORIZATION"] = environ["HTTP_AUTHORIZATION"] r = Request(environ, start_response) webui.WebUI(r, environ).run() return [r.wfile.getvalue()]
Python
0
39313cd933e0038b9a9bfa8b6b4cb50e3707d455
add k_min.py
Algo-1/week2/7-K-Min/k_min.py
Algo-1/week2/7-K-Min/k_min.py
class KMin: # Quick sort @staticmethod def swap(numbers, i, j): temp = numbers[i] numbers[i] = numbers[j] numbers[j] = temp # The last element is a pivot, all smaller elements are to left of it # and greater elements to right @staticmethod def partition(numbers, l, r): x = numbers[r] i = l for j in range(l, r): if numbers[j] <= x: KMin.swap(numbers, i, j) i += 1 KMin.swap(numbers, i, r) return i @staticmethod def kthSmallest(numbers, l, r, k): if k > 0 and k <= r - l + 1: pos = KMin.partition(numbers, l, r) if pos - l == k - 1: return numbers[pos] if pos - l > k - 1: return KMin.kthSmallest(numbers, l, pos - 1, k) return KMin.kthSmallest(numbers, pos + 1, r, k - pos + l - 1) # Finds the k-th minimum element in an unsorted collection. # numbers - [int] # k - int @staticmethod def kthMinimum(numbers, k): return KMin.kthSmallest(numbers, 0, len(numbers) - 1, k) def main(): numbers = [33, 8, 5, 2, 3, 6, 1, 4, 9, 99] for i in range(1, len(numbers) + 1): print(KMin.kthMinimum(numbers, i)) if __name__ == '__main__': main()
Python
0.000104
f44fd9df7ac7fa5e553e99d98c1376439a33ffc8
Change device pull to handle root,and renamed local file as well history.db from results.db
wlauto/workloads/androbench/__init__.py
wlauto/workloads/androbench/__init__.py
# Copyright 2013-2015 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import sqlite3 from wlauto import AndroidUiAutoBenchmark class Androbench(AndroidUiAutoBenchmark): name = 'androbench' description = """Androbench measures the storage performance of device""" package = 'com.andromeda.androbench2' activity = '.main' run_timeout = 10 * 60 def update_result(self, context): super(Androbench, self).update_result(context) dbn = 'databases/history.db' db = self.device.path.join(self.device.package_data_directory, self.package, dbn) host_results = os.path.join(context.output_directory, 'history.db') self.device.pull_file(db, host_results, as_root=True) qs = 'select * from history' conn = sqlite3.connect(host_results) c = conn.cursor() c.execute(qs) results = c.fetchone() context.result.add_metric('Sequential Read ', results[8], 'MB/s') context.result.add_metric('Sequential Write ', results[9], 'MB/s') context.result.add_metric('Random Read ', results[10], 'MB/s') context.result.add_metric('Random Write ', results[12], 'MB/s')
# Copyright 2013-2015 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import sqlite3 from wlauto import AndroidUiAutoBenchmark class Androbench(AndroidUiAutoBenchmark): name = 'androbench' description = """Androbench measures the storage performance of device""" package = 'com.andromeda.androbench2' activity = '.main' run_timeout = 10 * 60 def update_result(self, context): super(Androbench, self).update_result(context) dbn = 'databases/history.db' db = self.device.path.join(self.device.package_data_directory, self.package, dbn) host_results = os.path.join(context.output_directory, 'results.db') self.device.pull_file(db, host_results) qs = 'select * from history' conn = sqlite3.connect(host_results) c = conn.cursor() c.execute(qs) results = c.fetchone() context.result.add_metric('Sequential Read ', results[8], 'MB/s') context.result.add_metric('Sequential Write ', results[9], 'MB/s') context.result.add_metric('Random Read ', results[10], 'MB/s') context.result.add_metric('Random Write ', results[12], 'MB/s')
Python
0
f700ca39535c5eb14015dd84f4bc0dad2b086d23
Add ex_fzf.py
examples/ex_fzf.py
examples/ex_fzf.py
#!/usr/bin/env python import string import textwrap import pprint from dynmen import Menu fzf = Menu(command=('fzf',)) exampl_inp_dict = vars(string) exampl_inp_dict = {k:v for k,v in exampl_inp_dict.items() if not k.startswith('_')} def print_obj(obj, prefix=' '): txt = pprint.pformat(obj) lines = [] for line in txt.splitlines(): line = textwrap.indent(line, prefix) lines.append(line) print('\n'.join(lines)) def run_n_print(entries, fn_str): fn = globals()[fn_str.split('.')[0]] for attr in fn_str.split('.')[1:]: fn = getattr(fn, attr) print("\nLAUNCHING '{}' with -".format(fn_str)) print_obj(entries) output = fn(entries) print('OUTPUT IS -') print_obj(output) return output run_n_print(exampl_inp_dict, 'fzf') run_n_print(exampl_inp_dict, 'fzf.sort') run_n_print(list(exampl_inp_dict), 'fzf')
Python
0.000005
45edceb65a9cac9f61215ad77e9c048d092c0b57
add examples/roster.py
examples/roster.py
examples/roster.py
import dbus import dbus.glib import gobject import sys from account import read_account, connect from telepathy.client.channel import Channel from telepathy.constants import ( CONNECTION_HANDLE_TYPE_CONTACT, CONNECTION_HANDLE_TYPE_LIST, CONNECTION_STATUS_CONNECTED, CONNECTION_STATUS_DISCONNECTED) from telepathy.errors import NotAvailable from telepathy.interfaces import ( CHANNEL_INTERFACE_GROUP, CHANNEL_TYPE_CONTACT_LIST, CONN_INTERFACE) def print_members(conn, chan): current, local_pending, remote_pending = ( chan[CHANNEL_INTERFACE_GROUP].GetAllMembers()) for member in current: print ' - %s' % ( conn[CONN_INTERFACE].InspectHandles( CONNECTION_HANDLE_TYPE_CONTACT, [member])[0]) if not current: print ' (none)' class RosterClient: def __init__(self, conn): self.conn = conn conn[CONN_INTERFACE].connect_to_signal( 'StatusChanged', self.status_changed_cb) def _request_list_channel(self, name): handle = self.conn[CONN_INTERFACE].RequestHandles( CONNECTION_HANDLE_TYPE_LIST, [name])[0] chan_path = self.conn[CONN_INTERFACE].RequestChannel( CHANNEL_TYPE_CONTACT_LIST, CONNECTION_HANDLE_TYPE_LIST, handle, True) return Channel(self.conn._dbus_object._named_service, chan_path) def status_changed_cb(self, state, reason): if state == CONNECTION_STATUS_DISCONNECTED: print 'disconnected: %s' % reason self.quit() return if state != CONNECTION_STATUS_CONNECTED: return print 'connected' for name in ('subscribe', 'publish', 'hide', 'allow', 'deny', 'known'): try: chan = self._request_list_channel(name) except dbus.DBusException: print "'%s' channel is not available" % name continue # hack chan._valid_interfaces.add(CHANNEL_INTERFACE_GROUP) print '%s: members' % name print_members(self.conn, chan) chan[CHANNEL_INTERFACE_GROUP].connect_to_signal('MembersChanged', lambda *args: self.members_changed_cb(name, *args)) print 'waiting for changes' def members_changed_cb(self, name, message, added, removed, local_pending, remote_pending, actor, reason): if added: for handle in added: print '%s: added: %d' % (name, added) if removed: for handle in removed: print '%s: removed: %d' % (name, added) def run(self): self.loop = gobject.MainLoop() try: self.loop.run() except KeyboardInterrupt: print 'interrupted' def quit(self): self.loop.quit() if __name__ == '__main__': assert len(sys.argv) == 2 account_file = sys.argv[1] manager, protocol, account = read_account(account_file) conn = connect(manager, protocol, account) client = RosterClient(conn) print "connecting" conn[CONN_INTERFACE].Connect() client.run() print "disconnecting" try: conn[CONN_INTERFACE].Disconnect() except dbus.dbus_bindings.DBusException: pass
Python
0
7d198f3eaca6a91b731b3e25c0285cd46e72935a
Remove duplicates in authorized origins table
swh/web/common/migrations/0005_remove_duplicated_authorized_origins.py
swh/web/common/migrations/0005_remove_duplicated_authorized_origins.py
# Copyright (C) 2019 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU Affero General Public License version 3, or any later version # See top-level LICENSE file for more information from __future__ import unicode_literals from django.db import migrations from swh.web.common.models import SaveAuthorizedOrigin def _remove_duplicated_urls_in_authorized_list(apps, schema_editor): sao = SaveAuthorizedOrigin.objects for url in sao.values_list('url', flat=True).distinct(): sao.filter(pk__in=sao.filter( url=url).values_list('id', flat=True)[1:]).delete() class Migration(migrations.Migration): dependencies = [ ('swh.web.common', '0004_auto_20190204_1324'), ] operations = [ migrations.RunPython(_remove_duplicated_urls_in_authorized_list) ]
Python
0.000001
91541cf82f435cb261d9debc85a2a8ae6dd74ab1
Add a function to initialize the logging.
xutils/init_logging.py
xutils/init_logging.py
# encoding: utf-8 from __future__ import print_function, absolute_import, unicode_literals, division import logging def init_logging(logger=None, level="DEBUG", log_file="", file_config=None, dict_config=None): # Initialize the argument logger with the arguments, level and log_file. if logger: fmt = "%(asctime)s - %(pathname)s - %(funcName)s - %(lineno)d - %(levelname)s - %(message)s" datefmt = "%Y-%m-%d %H:%M:%S" formatter = logging.Formatter(fmt=fmt, datefmt=datefmt) level = getattr(logging, level.upper()) logger.setLevel(level) if log_file: from logging.handlers import TimedRotatingFileHandler handler = TimedRotatingFileHandler(log_file, when="midnight", interval=1, backupCount=30) else: handler = logging.StreamHandler() handler.setLevel(level) handler.setFormatter(formatter) logger.addHandler(handler) # Initialize logging by the configuration file, file_config. if file_config: logging.config.fileConfig(file_config, disable_existing_loggers=False) # Initialize logging by the dict configuration, dict_config. if dict_config and hasattr(logging.config, "dictConfig"): logging.config.dictConfig(dict_config)
Python
0
507e3bad4e877330eea29675dafb8210ab6bada5
Add tests for file agent
tests/test_agent.py
tests/test_agent.py
""" Tests for a agent. """ import io import os import pytest from onirim import action from onirim import agent from onirim import component def file_agent(in_str): return agent.File(io.StringIO(in_str), open(os.devnull, "w")) def content(): return component.Content([]) @pytest.mark.parametrize( "in_str, expected", [ ("play\n0\n", (action.Phase1.play, 0)), ("discard\n4\n", (action.Phase1.discard, 4)), ] ) def test_file_phase_1_action(in_str, expected): """ Test input parsing of phase_1_action. """ assert file_agent(in_str).phase_1_action(content()) == expected @pytest.mark.parametrize( "in_str, expected", [ ("key\n2\n", (action.Nightmare.by_key, {"idx": 2})), ("door\n3\n", (action.Nightmare.by_door, {"idx": 3})), ("hand\n", (action.Nightmare.by_hand, {})), ("deck\n", (action.Nightmare.by_deck, {})), ] ) def test_file_nightmare_action(in_str, expected): """ Test input parsing of nightmare action. """ assert file_agent(in_str).nightmare_action(content()) == expected @pytest.mark.parametrize( "in_str, expected", [ ("yes\n", True), ("no\n", False), ] ) def test_file_open_door(in_str, expected): """ Test input parsing of open door. """ assert file_agent(in_str).open_door(content(), None) == expected #def test_file_key_discard_react(in_str, expected): #TODO
Python
0
c67e1af4f765f143cb1b8420e053c1a9f00edd05
Add migrations for new statuses.
course_discovery/apps/course_metadata/migrations/0168_auto_20190404_1733.py
course_discovery/apps/course_metadata/migrations/0168_auto_20190404_1733.py
# -*- coding: utf-8 -*- # Generated by Django 1.11.15 on 2019-04-04 17:33 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.manager import djchoices.choices class Migration(migrations.Migration): dependencies = [ ('course_metadata', '0167_auto_20190403_1606'), ] operations = [ migrations.AlterModelManagers( name='course', managers=[ ('everything', django.db.models.manager.Manager()), ], ), migrations.AlterModelManagers( name='courseentitlement', managers=[ ('everything', django.db.models.manager.Manager()), ], ), migrations.AlterModelManagers( name='courserun', managers=[ ('everything', django.db.models.manager.Manager()), ], ), migrations.AlterModelManagers( name='seat', managers=[ ('everything', django.db.models.manager.Manager()), ], ), migrations.AlterField( model_name='courserun', name='status', field=models.CharField(choices=[('published', 'Published'), ('unpublished', 'Unpublished'), ('reviewed', 'Reviewed'), ('review_by_legal', 'Awaiting Review from Legal'), ('review_by_internal', 'Awaiting Internal Review')], db_index=True, default='unpublished', max_length=255, validators=[djchoices.choices.ChoicesValidator({'published': 'Published', 'review_by_internal': 'Awaiting Internal Review', 'review_by_legal': 'Awaiting Review from Legal', 'reviewed': 'Reviewed', 'unpublished': 'Unpublished'})]), ), ]
Python
0
d308874989667f36da1638f22d6b2d7e823b5ebd
Add script to extract reads or alignments matching a barcode.
extract-barcode.py
extract-barcode.py
""" code to extract a single cell from a set of alignments or reads marked via Valentine's umis repository: https://github.com/vals/umis """ import regex as re import sys from argparse import ArgumentParser from pysam import AlignmentFile def extract_barcode(sam, barcode): parser_re = re.compile('.*:CELL_(?P<CB>.*):UMI_(?P<MB>.*)') sam_file = AlignmentFile(sam, mode='r') filter_file = AlignmentFile("-", mode='wh', template=sam_file) track = sam_file.fetch(until_eof=True) for i, aln in enumerate(track): if aln.is_unmapped: continue match = parser_re.match(aln.qname) CB = match.group('CB') if CB == barcode: filter_file.write(aln) def stream_fastq(file_handler): ''' Generator which gives all four lines if a fastq read as one string ''' next_element = '' for i, line in enumerate(file_handler): next_element += line if i % 4 == 3: yield next_element next_element = '' def extract_barcode_fastq(fastq, barcode): parser_re = re.compile('.*:CELL_(?P<CB>.*):UMI_(?P<MB>.*)') fastq_file = stream_fastq(open(fastq)) for read in fastq_file: match = parser_re.match(read) CB = match.group('CB') if CB == barcode: sys.stdout.write(read) if __name__ == "__main__": parser = ArgumentParser("extract reads/alignments from a single cell") parser.add_argument("file", help="A SAM or FASTQ file") parser.add_argument("barcode", help="barcode of the cell to extract") args = parser.parse_args() extract_fn = extract_barcode_sam if args.file.endswith(".sam") else extract_barcode_fastq extract_fn(args.file, args.barcode)
Python
0
048d0d7ce30b66af8bf48bcb0cb7f8bfb90fff0c
Add tests for Part, Pin, Bus and Net iterators.
tests/test_iters.py
tests/test_iters.py
import pytest from skidl import * from .setup_teardown import * def test_iters_1(): """Test bus iterator.""" b_size = 4 b = Bus('chplx', b_size) for hi in b: for lo in b: if hi != lo: led = Part('device','LED') hi += led['A'] lo += led['K'] for l in b: assert(len(l) == 2 * (b_size-1)) def test_iters_2(): """Test pin iterator.""" q = Part('device','Q_NPN_CEB') s = 0 for p1 in q: for p2 in q: if p1 != p2: s += 1 assert(s == len(q) * (len(q)-1)) def test_iters_3(): """Test net iterator.""" b = Net() for hi in b: for lo in b: if hi != lo: led = Part('device','LED') hi += led['A'] lo += led['K'] for l in b: assert(len(l) == 0)
Python
0
60fbfa0b440a762fd25f19148313f5ba27d619aa
add a testing file
DataStructures/Trees/main.py
DataStructures/Trees/main.py
import BST #Environent for testing BST def main(): print 'Testing' main()
Python
0.000001
00aad4a302518400dbb936c7e2ce1d7560c5762f
Add files via upload
src/que_.py
src/que_.py
class our_queue(object): def __init__(self): """initializes queue""" self.head = self self.tail = self self.next_node = None self.data = None self.size = 0 def enqueue(self, val): """creates new node, pushes it to bottom of the queue and makes it the tail""" self.size += 1 new_qu = our_queue() if self.head.data is None: self.head = new_qu self.head.next_node = None else: self.tail.next_node = new_qu new_qu.data = val self.tail = new_qu return self.head def dequeue(self): """ Removes the head of the queue and returns the value. New head is established. """ current = self.head temp_data = None try: temp_data = current.data if temp_data is None: raise IndexError('que is empty') self.head = current.next_node self.size -= 1 return temp_data except AttributeError: raise IndexError('que is empyt') def peek(self): """ peeks at the data of the head """ current = self.head temp_data = None try: temp_data = current.data if temp_data is None: raise IndexError('que is empty') return temp_data except AttributeError: raise IndexError('que is empty') def __len__(self): """returns the length of the double linked list""" length = self.size return length temp = our_queue() temp.enqueue(4) temp.enqueue(3) print(len(temp))
Python
0
ccc663b3a96268dcdf2256d461a11d845a1044a1
Add the original test case of bug #1469629, formatted according to local conventions.
Lib/test/leakers/test_dictself.py
Lib/test/leakers/test_dictself.py
'''Test case for "self.__dict__ = self" circular reference bug (#1469629)''' import gc class LeakyDict(dict): pass def leak(): ld = LeakyDict() ld.__dict__ = ld del ld gc.collect(); gc.collect(); gc.collect()
Python
0
994a956486ff94ea777aa300270ae065d2ea62c6
Add a script to send the contents of newly created files to STDOUT or a TCP/IP socket
samson/scripts/file_monitor.py
samson/scripts/file_monitor.py
#!/usr/bin/python # -*- encoding: utf-8 -*- # Telefónica Digital - Product Development and Innovation # # THIS CODE AND INFORMATION ARE PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, # EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE. # # Copyright (c) 2012 Telefónica Investigación y Desarrollo S.A.U. # All rights reserved. """ A script to detect newly created files in a given directory and send the contents to STDOUT or a TCP/IP socket. """ import os import pyinotify import socket import sys from optparse import OptionParser class DataFileReader(): """Read a given file""" def __init__(self,filename): self.filename = "" def read_file(self): sys.stderr.write(".") try: data_file = open (self.filename, "r") data_file_contents = data_file.read() except IOError: sys.stderr.write("Error: Unable to find file (%s) or read its data" % data_file_str) finally: data_file.close() if data_file_contents: return data_file_contents else: return None class SocketModHandler(pyinotify.ProcessEvent, DataFileReader): """Handle inotify events to be sent to a TCP/IP socket""" sock = None def __init__(self, host, port): self.host = host self.port = port # socket does DNS checks and will fail if the specified socket is not open # TODO: Add some logic handling failures if self.sock is None: self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.connect((host, int(port))) def __del__(self): """Clean up""" self.sock.shutdown(socket.SHUT_WR) self.sock.close() def process_IN_CLOSE_WRITE(self, evt): """Act on files that were opened for writing and have been closed""" self.filename = os.path.join(evt.path, evt.name) sys.stderr.write(".") data_file_contents = self.read_file() total_sent=0 MSGLEN=len(data_file_contents) while total_sent < MSGLEN: sent = self.sock.send(data_file_contents[total_sent:]) if sent == 0: raise RuntimeError("socket died") total_sent = total_sent + sent class StdOutModHandler(pyinotify.ProcessEvent, DataFileReader): """Handle inotify events to be sent to STDOUT""" def process_IN_CLOSE_WRITE(self, evt): """Act on files that were opened for writing and have been closed""" self.filename = os.path.join(evt.path, evt.name) data = DataFileReader.read_file(self) # Write to STDOUT sys.stdout.write(data) def main(): # Executing with -h shows all the available args # optparse is deprecated in Python 2.7 however it's likely this script will # be deployed with earlier releases. parser = OptionParser() parser.add_option ("-d", "--dir", dest="data_dir", help="Look for files in DIR") parser.add_option ("-t", "--tcp", dest="socket", help="Send the data to host:port") parser.add_option ("-s", action="store_true", default=False, dest="stdout", help="Send the data to stdout") (options, args) = parser.parse_args() if options.stdout and options.socket: # We're not that chatty and only write to one of STDOUT or a TCP socket parser.error("Only one of -t/--tcp or -s can be specified") if not options.data_dir: options.data_dir = os.getcwd() if os.path.exists(options.data_dir): sys.stderr.write("Monitoring %s\n" % options.data_dir) if options.socket: (host, port) = options.socket.split(":") sys.stderr.write("Sending data to %s on %s\n" % (host, port)) handler = SocketModHandler(host, port) else: # Send the output to STDOUT sys.stderr.write("Writing to STDOUT\n") handler = StdOutModHandler() wm = pyinotify.WatchManager() notifier = pyinotify.Notifier(wm, handler) wdd = wm.add_watch(options.data_dir, pyinotify.IN_CLOSE_WRITE) notifier.loop() else: sys.stderr.write("No such directory, %s\n" % options.data_dir) sys.stderr.write("Exiting\n") if __name__ == "__main__": main()
Python
0
ceaabf80649a8a83c6ddfc548a3fa369c973e5c6
Complete alg fizzbuzz
alg_fizzbuzz.py
alg_fizzbuzz.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function def fizzbuzz(n): ls = [] for i in range(1, n + 1): if i % 15 == 0: ls.append('fizzbuzz') elif i % 3 == 0: ls.append('fizz') elif i % 5 == 0: ls.append('buzz') else: ls.append(i) return ls def main(): n = 100 fizzbuzz_ls = fizzbuzz(n) print(fizzbuzz_ls) if __name__ == '__main__': main()
Python
0.99999
e670901ebaf7422f7a71f78a3dc94730eba5605b
Add a module full of hinting helpers.
fmn/lib/hinting.py
fmn/lib/hinting.py
""" Helpers for "datanommer hints" for rules. Rules can optionally define a "hint" for a datanommer query. For instance, if a rule has to do with filtering for bodhi messages, then a provided hint could be {'category': 'bodhi'}. This simply speeds up the process of looking for potential message matches in the history by letting the database server do some of the work for us. Without this, we have to comb through literally every message ever and then try to see what matches and what doesn't in python-land: Slow! Rules define their hints with the @hint decorator defined here. When querying datanommer, the ``gather_hinting`` helper here can be used to construct the hint dict for ``datanommer.grep(..., **hints)``. """ import collections import functools import fedmsg.config def hint(invertible=True, **hints): """ A decorator that can optionally hang datanommer hints on a rule. """ def wrapper(fn): @functools.wraps(fn) def replacement(*args, **kwargs): return fn(*args, **kwargs) # Hang hints on the function. replacement.hints = hints replacement.hinting_invertible = invertible return replacement return wrapper def prefixed(topic, prefix='org.fedoraproject'): config = fedmsg.config.load_config() # This is memoized for us. return '.'.join([prefix, config['environment'], topic]) def gather_hinting(filter, valid_paths): """ Construct hint arguments for datanommer from a filter. """ hinting = collections.defaultdict(list) for rule in filter.rules: root, name = rule.code_path.split(':', 1) info = valid_paths[root][name] for key, value in info['datanommer-hints'].items(): # If the rule is inverted, but the hint is not invertible, then # there is no hinting we can provide. Carry on. if rule.negated and not info['hints-invertible']: continue # Otherwise, construct the inverse hint if necessary if rule.negated: key = 'not_' + key # And tack it on. hinting[key] += value return hinting
Python
0
1d31feb4fadadc377fbb3cf0f18c38f5a8d39aca
disable tray icon when fail
launcher/1.2.0/start.py
launcher/1.2.0/start.py
#!/usr/bin/env python # coding:utf-8 import os, sys current_path = os.path.dirname(os.path.abspath(__file__)) python_path = os.path.abspath( os.path.join(current_path, os.pardir, os.pardir, 'python27', '1.0')) noarch_lib = os.path.abspath( os.path.join(python_path, 'lib', 'noarch')) sys.path.append(noarch_lib) if sys.platform == "linux" or sys.platform == "linux2": from gtk_tray import sys_tray elif sys.platform == "win32": current_path = os.path.dirname(os.path.abspath(__file__)) sys.path.append(current_path) from win_tray import sys_tray elif sys.platform == "darwin": darwin_lib = os.path.abspath( os.path.join(python_path, 'lib', 'darwin')) sys.path.append(darwin_lib) extra_lib = "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python" sys.path.append(extra_lib) osx_lib = os.path.join(python_path, 'lib', 'osx') sys.path.append(osx_lib) try: from mac_tray import sys_tray except: from non_tray import sys_tray else: from non_tray import sys_tray import atexit import logging import webbrowser import web_control import module_init import update import config import setup_win_python def exit_handler(): print 'Stopping all modules before exit!' module_init.stop_all() web_control.stop() atexit.register(exit_handler) def main(): # change path to launcher global __file__ __file__ = os.path.abspath(__file__) if os.path.islink(__file__): __file__ = getattr(os, 'readlink', lambda x: x)(__file__) os.chdir(os.path.dirname(os.path.abspath(__file__))) web_control.confirm_xxnet_exit() setup_win_python.check_setup() module_init.start_all_auto() web_control.start() #config.load() if config.get(["modules", "launcher", "popup_webui"], 1) == 1: webbrowser.open("http://127.0.0.1:8085/") update.start() sys_tray.serve_forever() module_init.stop_all() sys.exit() if __name__ == '__main__': current_path = os.path.dirname(os.path.abspath(__file__)) version = current_path.split(os.path.sep)[-1] logging.info("launcher version: %s", version) try: main() except KeyboardInterrupt: # Ctrl + C on console sys.exit
#!/usr/bin/env python # coding:utf-8 import os, sys current_path = os.path.dirname(os.path.abspath(__file__)) python_path = os.path.abspath( os.path.join(current_path, os.pardir, os.pardir, 'python27', '1.0')) noarch_lib = os.path.abspath( os.path.join(python_path, 'lib', 'noarch')) sys.path.append(noarch_lib) if sys.platform == "linux" or sys.platform == "linux2": from gtk_tray import sys_tray elif sys.platform == "win32": current_path = os.path.dirname(os.path.abspath(__file__)) sys.path.append(current_path) from win_tray import sys_tray elif sys.platform == "darwin": darwin_lib = os.path.abspath( os.path.join(python_path, 'lib', 'darwin')) sys.path.append(darwin_lib) extra_lib = "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python" sys.path.append(extra_lib) osx_lib = os.path.join(python_path, 'lib', 'osx') sys.path.append(osx_lib) from mac_tray import sys_tray else: from non_tray import sys_tray import atexit import logging import webbrowser import web_control import module_init import update import config import setup_win_python def exit_handler(): print 'Stopping all modules before exit!' module_init.stop_all() web_control.stop() atexit.register(exit_handler) def main(): # change path to launcher global __file__ __file__ = os.path.abspath(__file__) if os.path.islink(__file__): __file__ = getattr(os, 'readlink', lambda x: x)(__file__) os.chdir(os.path.dirname(os.path.abspath(__file__))) web_control.confirm_xxnet_exit() setup_win_python.check_setup() module_init.start_all_auto() web_control.start() #config.load() if config.get(["modules", "launcher", "popup_webui"], 1) == 1: webbrowser.open("http://127.0.0.1:8085/") update.start() sys_tray.serve_forever() module_init.stop_all() sys.exit() if __name__ == '__main__': current_path = os.path.dirname(os.path.abspath(__file__)) version = current_path.split(os.path.sep)[-1] logging.info("launcher version: %s", version) try: main() except KeyboardInterrupt: # Ctrl + C on console sys.exit
Python
0.000001
719dd9064904d2e94cacd5c9ab349b0658344294
Create weather_proc.py
tmp/weather_proc.py
tmp/weather_proc.py
import argparse from datetime import datetime import numpy as np # timeslot indexing funtion def get_time_index(timestamp): day = int(timestamp.date().day) - 1 slot = int((timestamp.time().hour * 3600 + timestamp.time().minute * 60 + timestamp.time().second) / 600) return day * 144 + slot ap = argparse.ArgumentParser() ap.add_argument("-w", "--weather", required=True, help="Path to the weather data file") ap.add_argument("-o", "--output", required=True, help="Path to the output file") args = vars(ap.parse_args()) total_timeslots = 19 * 144 weather_dataset = np.zeros((total_timeslots, 11), dtype="float") print('reading weather') weather_file = open(args['weather'], 'r') for line in weather_file: weather_data = line.split('\t') time_key = get_time_index(datetime.strptime(weather_data[0].strip(), '%Y-%m-%d %H:%M:%S')) if time_key > total_timeslots: continue climate = int(weather_data[1].strip()) temperature = float(weather_data[2].strip()) pollution = float(weather_data[3].strip()) weather_dataset[time_key][climate - 1] += 1. weather_dataset[time_key][9] += temperature weather_dataset[time_key][10] += pollution weather_file.close() count = np.sum(weather_dataset[:, 0:9], axis=1) count[ count == 0 ] = 1.; weather_dataset[:, 9] = weather_dataset[:, 9] / count weather_dataset[:, 10] = weather_dataset[:, 10] / count np.savetxt(args["output"], weather_dataset, delimiter=',', fmt='%f')
Python
0.000047
9f2e4aad6d3a4004e80378f44aa178b37dd6da57
add ShellExecError
tpl/errors.py
tpl/errors.py
# -*- coding:utf-8 -*- from gettext import gettext as _ class BaseError(BaseException): ERR_MSG = _('') class ShellExecError(BaseError): ERR_MSG = _('Command exit code not zero. \nExit Code:\n{}.\nOut:\n{}\nErr:\n{}') def __init__(self, exit_code, out, err): self.message = self.ERR_MSG.format(exit_code, out, err) super(ShellExecError, self).__init__(self.message)
Python
0.000001
3d027df005725cbc5dfbba0262b0c52c5392d7f0
Add whoami resource which decodes token and returns user info from token
app/resources/check_token.py
app/resources/check_token.py
from flask import make_response, jsonify from flask_restful import Resource, reqparse, marshal, fields from app.models import User from app.common.auth.token import JWT user_fields = { "id": fields.Integer, "username": fields.String, "created_at": fields.DateTime } class WhoAmIResource(Resource): """ This class takes a token from the Authorization header and then returns the user info for the token if its valid """ def __init__(self): self.parser = reqparse.RequestParser() self.parser.add_argument("Authorization", location="headers", required=True) def get(self): """ get method """ args = self.parser.parse_args() token = args["Authorization"] # get token from header try: user_id = int(JWT.decode_token(token)) user = User.query.get(user_id) return marshal(user, user_fields), 200 except ValueError: return make_response(jsonify({ "status": "failed", "message": "Invalid token, please login again" }), 401)
Python
0
62484ca423d6adfa19a581d7b74472e8475cf817
Create findbro.py
findbro/findbro.py
findbro/findbro.py
# findbro.py v0.1 # Matches Bro logs against a specified list of UIDs # Can run on N number of Bro logs # Performs no error checking # Should only be run on directories that contains only gzip Bro logs # Best way to collect UIDs is via bro-cut and grep # # Josh Liburdi 2016 from os import listdir import sys import gzip import argparse def write_file(fout_name,file_contents): fout = gzip.open(fout_name, 'w') fout.write(file_contents) fout.close() def proc_bro(fout_name,input,uid_list): file_cache = '' with gzip.open(input) as fin: lines = fin.readlines() file_cache += lines[6] file_cache += lines[7] for line in lines[8:-1]: if any(uid in line for uid in uid_list): file_cache += line if len(file_cache.split('\n')) == 3: print 'No matches in %s' % input else: print '%d matches in %s' % ( (len(file_cache.split('\n')) - 3), input ) write_file(fout_name,file_cache) def main(): parser = argparse.ArgumentParser(description='Merge Bro logs from a single day') parser.add_argument('--bro-dir', '-bd', dest='directory', action='store') parser.add_argument('--label', '-l', dest='label', action='store', default=None) parser.add_argument('--uid', '-u', dest='uid_file', action='store') argsout = parser.parse_args() dir_list = listdir(argsout.directory) log_dict = {} uid_list = [line.strip() for line in open(argsout.uid_file, 'r')] for log_file in dir_list: log_type = log_file.split('.')[0] log_dict.setdefault(log_type,[]).append(log_file) for key,list_val in log_dict.iteritems(): if argsout.label is None: fout_name = key + '.log.gz' else: fout_name = key + '.' + argsout.label + '.log.gz' for f in list_val: fpath = argsout.directory + f proc_bro(fout_name,fpath,uid_list) if __name__ == "__main__": main()
Python
0.000001
f34dabd23faa7d50e507b829e576c1968bdc2d52
Print The Message Happy New Year
src/iterations/exercise3.py
src/iterations/exercise3.py
# Print The Message "Happy new Year" followed by the name of a person # taken from a list for all people mentioned in the list. def print_Happy_New_Year_to( listOfPeople ): for user in listOfPeople: print 'Happy New Year, ', user print 'Done!' def main( ): listOfPeople=['John', 'Mary', 'Luke'] print_Happy_New_Year_to( listOfPeople ) quit(0) main( )
Python
0.000028
67cb63bcb776b1a89d8e96a7b90c02724ef5b0b6
update migrations
sweettooth/extensions/migrations/0005_auto_20190112_1733.py
sweettooth/extensions/migrations/0005_auto_20190112_1733.py
# -*- coding: utf-8 -*- # Generated by Django 1.11.18 on 2019-01-12 17:33 from __future__ import unicode_literals import autoslug.fields from django.db import migrations, models import sweettooth.extensions.models class Migration(migrations.Migration): dependencies = [ ('extensions', '0004_auto_20181216_2102'), ] operations = [ migrations.AlterField( model_name='extension', name='icon', field=models.ImageField(blank=True, default='', upload_to=sweettooth.extensions.models.make_icon_filename), ), migrations.AlterField( model_name='extension', name='slug', field=autoslug.fields.AutoSlugField(editable=False, populate_from='name'), ), ]
Python
0.000023
3aa6ba18655a92753f33622ac80be66eb3b69ff6
Add useful python functions
device_resolutions.py
device_resolutions.py
from math import sqrt import csv def as_orientation(x, y, is_portrait=False): if is_portrait: return (y, x) if x > y else (x, y) else: return (x, y) if x > y else (y, x) def as_portrait(x, y): """Given a dimensions, return that pair in portrait orientation""" return as_orientation(x, y, is_portrait=True) def as_landscape(x, y): """Given a dimensions, return that pair in landscape orientation""" return as_orientation(x, y, is_portrait=False) def calc_hypotenuse(a, b): return sqrt(a**2 + b**2) def calc_ppi(width_px, height_px, hypotenuse_in): """ Given the diagnonal measurement of the screen in inches (`hypotenuse_in`), calculate the pixels-per-inch (ppi) offered by the screen. """ hypotenuse_px = calc_hypotenuse(width_px, height_px) return hypotenuse_px / hypotenuse_in # @TODO port to CSV COMMON_ASPECT_RATIOS = ( (3, 4, "3:4"), (1, 1, "1:1"), (5, 4, "5:4"), (4, 3, "4:3"), (1.43, 1, "IMAX 1.43:1"), (3, 2, "3:2"), (5, 3, "5:3") (14, 9, "14:9"), (16, 10, "16:10"), (16, 9, "16:9"), (17, 9, "17:9"), (21, 9, "21:9"), (1.375, 1, "Academy Ratio 1.375:1"), (2.35, 1, "CinemaScope 2.35:1"), (2.59, 1, "Cinemara 2.59:1"), (2.75, 1, "Ultra Panavision 70 2.75:1"), (2.76, 1, "MGM 65 2.76:1") ) def find_aspect_ratio(x, y): """ Given an aspect ratio, find an aspect ratio description using a list of common aspect ratios. """ ratio = x / y for cx, cy, name in COMMON_ASPECT_RATIOS: if ratio == (cx/cy): return (ratio, cx, cy, name) return (ratio, ratio, 1, "")
Python
0.000043
dad5f0a06dd057eccde5a086c84d5c639bb74ae9
Add back peaks for backwards compatibility with a deprecation warning.
dipy/reconst/peaks.py
dipy/reconst/peaks.py
import warnings w_s = "The module 'dipy.reconst.peaks' is deprecated." w_s += " Please use the module 'dipy.direction.peaks' instead" warnings.warn(w_s, DeprecationWarning) from dipy.direction.peaks import *
Python
0
52a8a0c0def2930667155660c8844bb6836f9ff5
add script for table of orders/country
scripts/country_order_stats.py
scripts/country_order_stats.py
import sqlite3 import pandas as pd TICKET_SALE_START_DATE = '2016-01-01' conn = sqlite3.connect('data/site/p3.db') c = conn.cursor() query = c.execute(""" SELECT ORDER_ID, COUNTRY_ID FROM assopy_orderitem, assopy_order WHERE assopy_orderitem.order_id == assopy_order.id AND assopy_order.created >= date(TICKET_SALE_START_DATE)"""") countries = query.fetchall() df = pd.DataFrame(countries, columns=['order_id', 'country']) counts = df.groupby('country').count().sort_values(by='order_id', ascending=False) print(counts)
Python
0
696b9d1177d24ca6c455052f15e529f4952196a0
add test
@test/test_lang_with.py
@test/test_lang_with.py
# -*- coding: utf-8 -*- # # Copyright (c) 2018~2999 - Cologler <skyoflw@gmail.com> # ---------- # # ---------- from jasily.lang import with_it, with_objattr, with_objattrs class SomeLock: def __init__(self): self.locked = False def __enter__(self): self.locked = True def __exit__(self, *args): self.locked = False def test_with_it(): lock = SomeLock() @with_it(lock) def func(): assert lock.locked return 1 assert not lock.locked assert func() == 1 assert not lock.locked def test_with_objattr(): class X: def __init__(self): self.some_lock = SomeLock() @with_objattr('some_lock') def func(self): assert self.some_lock.locked return 1 x = X() assert not x.some_lock.locked assert x.func() == 1 assert not x.some_lock.locked def test_with_objattrs(): class X: def __init__(self): self.some_lock_1 = SomeLock() self.some_lock_2 = SomeLock() @with_objattrs('some_lock_1', 'some_lock_2') def func(self): assert self.some_lock_1.locked assert self.some_lock_2.locked return 1 x = X() assert not x.some_lock_1.locked assert not x.some_lock_2.locked assert x.func() == 1 assert not x.some_lock_1.locked assert not x.some_lock_2.locked
Python
0.000002
791ce2275933f16cf483dad1b16948441292e61c
add hook for google-api-python-client (#3965)
scripts/hooks/hook-pydrive2.py
scripts/hooks/hook-pydrive2.py
from PyInstaller.utils.hooks import copy_metadata datas = copy_metadata("pydrive2") datas += copy_metadata("google-api-python-client")
Python
0
534db68d8f773c459788650590b6585fc0369e19
create a default permission handler for ObjectOwner
apps/Localizr/permissions.py
apps/Localizr/permissions.py
from rest_framework.permissions import IsAuthenticated, SAFE_METHODS class IsObjectOwner(IsAuthenticated): def has_object_permission(self, request, view, obj): if request.method in SAFE_METHODS: return True if hasattr(obj, 'created_by'): return obj.created_by == request.user return False
Python
0
f7d3ca5d537140e07ff95d082f2a78e86bc06604
Add flip
zl/indicators/flip.py
zl/indicators/flip.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 Jason Koelker # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import numbers from zipline.transforms import utils as transforms BULL = 'Bull' BEAR = 'Bear' class Flip(object): __metaclass__ = transforms.TransformMeta def __init__(self, period=4, setup_price='close_price'): self.period = period self.setup_price = setup_price self.sid_windows = collections.defaultdict(self.create_window) def create_window(self): return FlipWindow(self.period, self.setup_price) def update(self, event): window = self.sid_windows[event.sid] window.update(event) return window() class FlipWindow(transforms.EventWindow): def __init__(self, period, setup_price): transforms.EventWindow.__init__(self, window_length=period + 2) self.period = period self.setup_price = setup_price def handle_add(self, event): assert hasattr(event, self.setup_price) value = getattr(event, self.setup_price, None) assert isinstance(value, numbers.Number) def handle_remove(self, event): pass def __call__(self): if len(self.ticks) < self.window_length: return Yp = getattr(self.ticks[-1], self.setup_price) Xp = getattr(self.ticks[-2], self.setup_price) X = getattr(self.ticks[0], self.setup_price) Y = getattr(self.ticks[1], self.setup_price) if (Xp > X) and (Yp < Y): return BEAR if (Xp < X) and (Yp > Y): return BULL
Python
0.000053
4cea1c1231da1583fb177e976f473fa52b9ec450
Fix SelectJmes documentation
scrapy/loader/processors.py
scrapy/loader/processors.py
""" This module provides some commonly used processors for Item Loaders. See documentation in docs/topics/loaders.rst """ from scrapy.utils.misc import arg_to_iter from scrapy.utils.datatypes import MergeDict from .common import wrap_loader_context class MapCompose(object): def __init__(self, *functions, **default_loader_context): self.functions = functions self.default_loader_context = default_loader_context def __call__(self, value, loader_context=None): values = arg_to_iter(value) if loader_context: context = MergeDict(loader_context, self.default_loader_context) else: context = self.default_loader_context wrapped_funcs = [wrap_loader_context(f, context) for f in self.functions] for func in wrapped_funcs: next_values = [] for v in values: next_values += arg_to_iter(func(v)) values = next_values return values class Compose(object): def __init__(self, *functions, **default_loader_context): self.functions = functions self.stop_on_none = default_loader_context.get('stop_on_none', True) self.default_loader_context = default_loader_context def __call__(self, value, loader_context=None): if loader_context: context = MergeDict(loader_context, self.default_loader_context) else: context = self.default_loader_context wrapped_funcs = [wrap_loader_context(f, context) for f in self.functions] for func in wrapped_funcs: if value is None and self.stop_on_none: break value = func(value) return value class TakeFirst(object): def __call__(self, values): for value in values: if value is not None and value != '': return value class Identity(object): def __call__(self, values): return values class SelectJmes(object): """ Query the input string for the jmespath (given at instantiation), and return the answer Requires : jmespath(https://github.com/jmespath/jmespath) Note: SelectJmes accepts only one input element at a time. """ def __init__(self, json_path): self.json_path = json_path import jmespath self.compiled_path = jmespath.compile(self.json_path) def __call__(self, value): """Query value for the jmespath query and return answer :param value: a data structure (dict, list) to extract from :return: Element extracted according to jmespath query """ return self.compiled_path.search(value) class Join(object): def __init__(self, separator=u' '): self.separator = separator def __call__(self, values): return self.separator.join(values)
""" This module provides some commonly used processors for Item Loaders. See documentation in docs/topics/loaders.rst """ from scrapy.utils.misc import arg_to_iter from scrapy.utils.datatypes import MergeDict from .common import wrap_loader_context class MapCompose(object): def __init__(self, *functions, **default_loader_context): self.functions = functions self.default_loader_context = default_loader_context def __call__(self, value, loader_context=None): values = arg_to_iter(value) if loader_context: context = MergeDict(loader_context, self.default_loader_context) else: context = self.default_loader_context wrapped_funcs = [wrap_loader_context(f, context) for f in self.functions] for func in wrapped_funcs: next_values = [] for v in values: next_values += arg_to_iter(func(v)) values = next_values return values class Compose(object): def __init__(self, *functions, **default_loader_context): self.functions = functions self.stop_on_none = default_loader_context.get('stop_on_none', True) self.default_loader_context = default_loader_context def __call__(self, value, loader_context=None): if loader_context: context = MergeDict(loader_context, self.default_loader_context) else: context = self.default_loader_context wrapped_funcs = [wrap_loader_context(f, context) for f in self.functions] for func in wrapped_funcs: if value is None and self.stop_on_none: break value = func(value) return value class TakeFirst(object): def __call__(self, values): for value in values: if value is not None and value != '': return value class Identity(object): def __call__(self, values): return values class SelectJmes(object): """ Query the input string for the jmespath (given at instantiation), and return the answer Requires : jmespath(https://github.com/jmespath/jmespath) Note: SelectJmes accepts only one input element at a time. """ def __init__(self, json_path): self.json_path = json_path import jmespath self.compiled_path = jmespath.compile(self.json_path) def __call__(self, value): """Query value for the jmespath query and return answer :param str value: a string with JSON data to extract from :return: Element extracted according to jmespath query """ return self.compiled_path.search(value) class Join(object): def __init__(self, separator=u' '): self.separator = separator def __call__(self, values): return self.separator.join(values)
Python
0
e07cc0ea6e56339d117fd5d81c0939b0c658727e
Create cnn.py
Classifying_datasets/Convolutional_Neural_Networks/Convolutional_Neural_Networks/cnn.py
Classifying_datasets/Convolutional_Neural_Networks/Convolutional_Neural_Networks/cnn.py
# Convolutional Neural Network # Part 1 - Building the CNN # Importing the Keras libraries and packages from keras.models import Sequential from keras.layers import Conv2D from keras.layers import MaxPooling2D from keras.layers import Flatten from keras.layers import Dense # Initialising the CNN classifier = Sequential() # Step 1 - Convolution classifier.add(Conv2D(64, (3, 3), input_shape = (64, 64, 3), activation = 'relu')) # Step 2 - Pooling classifier.add(MaxPooling2D(pool_size = (2, 2))) # Adding a second convolutional layer classifier.add(Conv2D(64, (3, 3), activation = 'relu')) classifier.add(MaxPooling2D(pool_size = (2, 2))) # Step 3 - Flattening classifier.add(Flatten()) # Step 4 - Full connection classifier.add(Dense(units = 512, activation = 'relu')) classifier.add(Dense(units = 1, activation = 'sigmoid')) # Compiling the CNN classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy']) # Part 2 - Fitting the CNN to the images from keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator(rescale = 1./255, shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True) test_datagen = ImageDataGenerator(rescale = 1./255) training_set = train_datagen.flow_from_directory('dataset/training_set', target_size = (64, 64), batch_size = 32, class_mode = 'binary') test_set = test_datagen.flow_from_directory('dataset/test_set', target_size = (64, 64), batch_size = 32, class_mode = 'binary') classifier.fit_generator(training_set, steps_per_epoch = 8000, epochs = 25, validation_data = test_set, validation_steps = 2000)
Python
0.000003
5199ee1a544b2aa59895a1b22359d6a9adb765a3
Add .prepare-commit-msg.py
.prepare-commit-msg.py
.prepare-commit-msg.py
#!/usr/bin/env python # This script is an optional git hook and will prepend the issue # number to a commit message in the correct format for Github to parse. # # If you wish to use it, create a shortcut to this file in .git/hooks called # 'prepare-commit-msg' e.g. from top folder of your project: # ln -s ../../.prepare-commit-msg.py .git/hooks/prepare-commit-msg # # or, for Windows users: # mklink .git\hooks\prepare-commit-msg .prepare-commit-msg.py import sys import re from subprocess import check_output # By default, the hook will check to see if the branch name starts with # 'issue-' and will then prepend whatever follows in the commit message. # e.g. for a branch named 'issue-123', the commit message will start with # '[#123]' # If you wish to use a diferent prefix on branch names, change it here. issue_prefix = 'issue-' commit_msg_filepath = sys.argv[1] branch = check_output( ['git', 'symbolic-ref', '--short', 'HEAD'] ).strip().decode(encoding='UTF-8') if branch.startswith(issue_prefix): issue_number = re.match('%s(.*)' % issue_prefix, branch).group(1) print( f'prepare-commit-msg: Prepending [#{issue_number}] to commit message') with open(commit_msg_filepath, 'r+') as f: content = f.read() f.seek(0, 0) f.write(f'[#{issue_number}] {content}') else: print("prepare-commit-msg: No changes made to commit message")
Python
0.000011
ce28c5642c3ab543fc48e2f4f1f0b2f2a62890a2
Add script to extract information for playbook files
src/misc/parse_tool_playbook_yaml.py
src/misc/parse_tool_playbook_yaml.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys import os import argparse import re import yaml def get_revision_number(yaml_content, tool_name): for tool in yaml_content['tools']: if tool["name"] == tool_name: if tool.has_key("revision"): print tool["revision"][0] def get_owner(yaml_content, tool_name): for tool in yaml_content['tools']: if tool["name"] == tool_name: print tool['owner'] if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--file', required=True) parser.add_argument('--tool_name', required=True) parser.add_argument('--tool_function', required=True) args = parser.parse_args() with open(args.file,'r') as yaml_file: yaml_content = yaml.load(yaml_file) functions = { 'get_revision_number': get_revision_number, 'get_owner': get_owner } functions[args.tool_function](yaml_content, args.tool_name)
Python
0
f8ee6bcd2742e1afb2645c5195d84bd9d2db06bb
Create utils.py
functions/utils.py
functions/utils.py
__all__ = ['isProductVersionOK', 'computePixelBlockExtents', 'computeCellSize', 'Projection', 'Trace'] ## ----- ## ----- ## ----- ## ----- ## ----- ## ----- ## ----- ## ----- ## def isProductVersionOK(productInfo, major, minor, build): v = productInfo['major']*1.e+10 + int(0.5+productInfo['minor']*10)*1.e+6 + productInfo['build'] return v >= major*1e+10 + minor*1e+7 + build def computePixelBlockExtents(tlc, shape, props): nRows, nCols = shape if len(shape) == 2 else shape[1:] # dimensions of request pixel block e, w, h = props['extent'], props['width'], props['height'] # dimensions of parent raster dX, dY = (e[2]-e[0])/w, (e[3]-e[1])/h # cell size of parent raster xMin, yMax = e[0]+tlc[0]*dX, e[3]-tlc[1]*dY # top-left corner of request on map return (xMin, yMax-nRows*dY, xMin+nCols*dX, yMax) # extents of request on map def computeCellSize(props, sr=None, proj=None): e, w, h = props['extent'], props['width'], props['height'] # dimensions of parent raster if sr is None: return (e[2]-e[0])/w, (e[3]-e[1])/h # cell size of parent raster if proj is None: proj = Projection() # reproject extents (xMin, xMax) = proj.transform(props['spatialReference'], sr, e[0], e[2]) (yMin, yMax) = proj.transform(props['spatialReference'], sr, e[1], e[3]) return (xMax-xMin)/w, (yMax-yMin)/h # cell size of parent raster ## ----- ## ----- ## ----- ## ----- ## ----- ## ----- ## ----- ## ----- ## class Projection(): def __init__(self): pyprojModule = __import__('pyproj') self._inProj, self._outProj = None, None self._inEPSG, self._outEPSG = -1, -1 self._projClass = getattr(pyprojModule, 'Proj') self._transformFunc = getattr(pyprojModule, 'transform') def transform(self, inEPSG, outEPSG, x, y): if inEPSG != self._inEPSG: self._inProj = self._projClass("+init=EPSG:{0}".format(inEPSG)) self._inEPSG = inEPSG if outEPSG != self._outEPSG: self._outProj = self._projClass("+init=EPSG:{0}".format(outEPSG)) self._outEPSG = outEPSG return self._transformFunc(self._inProj, self._outProj, x, y) ## ----- ## ----- ## ----- ## ----- ## ----- ## ----- ## ----- ## ----- ## class Trace(): def __init__(self): ctypes = __import__('ctypes') self.trace = ctypes.windll.kernel32.OutputDebugStringA self.trace.argtypes = [ctypes.c_char_p] self.c_char_p = ctypes.c_char_p def log(self, s): self.trace(self.c_char_p(s.encode('utf-8'))) return s ## ----- ## ----- ## ----- ## ----- ## ----- ## ----- ## ----- ## ----- ##
Python
0
24c763ead7af8a669ff1055b3f352f513274a47f
Insert a note at a specific position in a linked list
all-domains/data-structures/linked-lists/insert-a-node-at-a-specific-positin-in-a-linked-list/solution.py
all-domains/data-structures/linked-lists/insert-a-node-at-a-specific-positin-in-a-linked-list/solution.py
# https://www.hackerrank.com/challenges/insert-a-node-at-a-specific-position-in-a-linked-list # Python 2 """ Insert Node at a specific position in a linked list head input could be None as well for empty list Node is defined as class Node(object): def __init__(self, data=None, next_node=None): self.data = data self.next = next_node return back the head of the linked list in the below method. """ # This is a "method-only" submission. # You only need to complete this method. def InsertNth(head, data, position): if head is None: return Node(data=data) else: current = head if position == 0: node_to_insert = Node(data=data, next_node=current) return node_to_insert else: prev = None for i in xrange(position): prev = current current = current.next new_node = Node(data=data) prev.next = new_node new_node.next = current return head # def display_linked_list(head): # s = '' # while True: # s += '{}->'.format(head.data) # if head.next == None: # break # else: # head = head.next # s += 'NULL' # print(s) # # # # LL = Node(1) # c = Node(3) # b = Node(2, c) # head = Node(1, b) # # head = InsertNth(head, 'x', 1) # # display_linked_list(head)
Python
0.000001
ea7f1aba46d5dd468635812b3bca435b446b0846
Add test for changeset
tests/commands/test_changeset.py
tests/commands/test_changeset.py
# -*- encoding: utf-8 -*- import mock from awscfncli import cfn from click.testing import CliRunner __author__ = 'ray' __date__ = '1/14/17' def test_cfn_changeset_create(tmpdir): with mock.patch('boto3.client', return_value=mock.Mock()) as mock_client: mock_config = \ """ Stack: Region: us-east-1 StackName: ExampleStack TemplateURL: https://s3.amazonaws.com/example.template """ path = tmpdir.join('config.yml') path.write(mock_config) mock_client.return_value.create_change_set.return_value = { 'Id': 'MockId' } runner = CliRunner() runner.invoke(cfn, ['changeset', 'create', path.strpath, 'change_name']) mock_client.return_value.create_change_set.assert_called_with( StackName='ExampleStack', ChangeSetName='change_name', TemplateURL='https://s3.amazonaws.com/example.template', ChangeSetType='UPDATE' ) mock_client.return_value.get_waiter.assert_called_once() def test_cfn_changeset_describe(tmpdir): with mock.patch('boto3.client', return_value=mock.Mock()) as mock_client: mock_config = \ """ Stack: Region: us-east-1 StackName: ExampleStack TemplateURL: https://s3.amazonaws.com/example.template """ path = tmpdir.join('config.yml') path.write(mock_config) mock_client.return_value.describe_change_set.return_value = { 'ChangeSetName': 'MockName', 'ExecutionStatus': 'AVAILABLE', 'Status': 'CREATE_PENDING', 'Description': 'MockDescription', 'StatusReason': 'MockStatusReason', 'Changes': [ { 'ResourceChange': { 'Action': 'Add', 'LogicalResourceId': 'MockId', 'ResourceType': 'EC2', 'PhysicalResourceId': 'MockId', 'Replacement': False, 'Scope': 'MockScope' } }, { 'ResourceChange': { 'Action': 'Modify', 'LogicalResourceId': 'MockId', 'ResourceType': 'EC2', 'PhysicalResourceId': 'MockId', 'Replacement': False, 'Scope': 'MockScope' } }, { 'ResourceChange': { 'Action': 'Delete', 'LogicalResourceId': 'MockId', 'ResourceType': 'EC2', 'PhysicalResourceId': 'MockId', 'Replacement': False, 'Scope': 'MockScope' } } ] } runner = CliRunner() runner.invoke(cfn, ['changeset', 'describe', path.strpath, 'change_name']) mock_client.return_value.describe_change_set.assert_called_with( StackName='ExampleStack', ChangeSetName='change_name' ) def test_cfn_changeset_execute(tmpdir): with mock.patch('boto3.client', return_value=mock.Mock()) as mock_client, \ mock.patch('boto3.resource', return_value=mock.Mock()) as mock_resource: mock_config = \ """ Stack: Region: us-east-1 StackName: ExampleStack TemplateURL: https://s3.amazonaws.com/example.template """ path = tmpdir.join('config.yml') path.write(mock_config) mock_stack = mock.Mock() mock_stack.stack_status = 'REVIEW_IN_PROGRESS' mock_resource.return_value.Stack.return_value = mock_stack runner = CliRunner() runner.invoke(cfn, ['changeset', 'execute', path.strpath, 'change_name']) mock_client.return_value.execute_change_set.assert_called_with( StackName='ExampleStack', ChangeSetName='change_name' ) mock_client.return_value.get_waiter.assert_called_once() def test_cfn_changeset_list(tmpdir): with mock.patch('boto3.client', return_value=mock.Mock()) as mock_client, \ mock.patch('boto3.resource', return_value=mock.Mock()) as mock_resource: mock_config = \ """ Stack: Region: us-east-1 StackName: ExampleStack TemplateURL: https://s3.amazonaws.com/example.template """ path = tmpdir.join('config.yml') path.write(mock_config) mock_stack = mock.Mock() mock_stack.stack_id = 'MockId' mock_resource.return_value.Stack.return_value = mock_stack mock_client.return_value.list_change_sets.return_value = { 'Summaries': [ { 'ChangeSetName': 'MockName1', 'ChangeSetId': 'MockId1', 'Description': 'MockDescription', 'ExecutionStatus': 'AVAILABLE', 'Status': 'CREATE_PENDING', 'StatusReason': 'MockStatusReason' }, { 'ChangeSetName': 'MockName2', 'ChangeSetId': 'MockId2', 'Description': 'MockDescription', 'ExecutionStatus': 'AVAILABLE', 'Status': 'CREATE_PENDING', 'StatusReason': 'MockStatusReason' } ] } runner = CliRunner() runner.invoke(cfn, ['changeset', 'list', path.strpath]) mock_client.return_value.list_change_sets.assert_called_with( StackName=mock_stack.stack_id, )
Python
0
db914944615f16c4b170e7dfd428901d5fc29271
Add test for image.fromstring - refs #1805
tests/python_tests/image_test.py
tests/python_tests/image_test.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys import os, mapnik from timeit import Timer, time from nose.tools import * from utilities import execution_path def setup(): # All of the paths used are relative, if we run the tests # from another directory we need to chdir() os.chdir(execution_path('.')) def test_image_open_from_string(): filepath = '../data/images/dummy.png' im1 = mapnik.Image.open(filepath) im2 = mapnik.Image.fromstring(open(filepath,'rb').read()) eq_(im1.width(),im2.width()) length = len(im1.tostring()) eq_(length,len(im2.tostring())) eq_(len(mapnik.Image.fromstring(im1.tostring('png')).tostring()),length) eq_(len(mapnik.Image.fromstring(im1.tostring('jpeg')).tostring()),length) eq_(len(mapnik.Image.fromstring(im1.tostring('tiff')).tostring()),length) if __name__ == "__main__": setup() [eval(run)() for run in dir() if 'test_' in run]
Python
0
dedcdaf1a55b08c275af29c535a7ae574b8ee5d2
Add 20150517 question.
LeetCode/number_of_islands.py
LeetCode/number_of_islands.py
""" Given a 2d grid map of '1's (land) and '0's (water), count the number of islands. An island is surrounded by water and is formed by connecting adjacent lands horizontally or vertically. You may assume all four edges of the grid are all surrounded by water. Example 1: 11110 11010 11000 00000 Answer: 1 Example 2: 11000 11000 00100 00011 Answer: 3 Tags: DFS, BFS Difficulty: Medium """ class Solution: # @param {character[][]} grid # @return {integer} def numIslands(self, grid): if not grid: return 0 if not grid[0]: return 0 width = len(grid[0]) height = len(grid) visited = [[False] * width for _ in xrange(height)] count = 0 i = 0 while i < height: j = 0 while j < width: if grid[i][j] == '1' and not visited[i][j]: self.bfs(grid, visited, [(i, j)]) count += 1 j += 1 i += 1 return count def bfs(self, grid, visited, to_be_visited): if not to_be_visited: return x, y = to_be_visited.pop() if visited[x][y] or grid[x][y] == '0': return visited[x][y] = True if x > 0: to_be_visited.append((x - 1, y)) if x < len(visited) - 1: to_be_visited.append((x + 1, y)) if y > 0: to_be_visited.append((x, y - 1)) if y < len(visited[0]) - 1: to_be_visited.append((x, y + 1)) while to_be_visited: self.bfs(grid, visited, to_be_visited)
Python
0.000001
681c67381eef9384845e0041214011797be6ea03
Create text2hex.py
text2hex.py
text2hex.py
# Program Name : text2hex # Programmer : The Alpha # Credits : Iranpython.blog.ir # Version : 0.91(Beta Version) # Linted By : Pyflakes # Info : text2hex is a simple tool that uses to convert strings to hex. from PyQt4.QtCore import * from PyQt4.QtGui import * import sys import binascii class TextToHex(QDialog): def __init__(self): QDialog.__init__(self) self.setWindowTitle("Text2Hex") layout = QGridLayout() self.label_cp = QLabel("<b><code><h3>pystudent copyright</h3></code></b>") label_text = QLabel("<b><code><h3>Text :</h3></code></b>") self.line_edit_text = QLineEdit() label_hex = QLabel("<b><code><h3>Hex :</h3></code></b>") self.line_edit_hex = QLineEdit() self.line_edit_hex.setReadOnly(True) self.convert_button = QPushButton("Convert") self.exit_button = QPushButton("Exit") layout.addWidget(label_text, 0, 0) layout.addWidget(self.line_edit_text, 0, 1) layout.addWidget(label_hex, 1, 0) layout.addWidget(self.line_edit_hex, 1, 1) layout.addWidget(self.convert_button, 2, 0) layout.addWidget(self.label_cp, 2, 1) layout.addWidget(self.exit_button, 2, 2) self.convert_button.clicked.connect(self.convertor) self.exit_button.clicked.connect(self.close) self.setLayout(layout) def convertor(self): data = self.line_edit_text.text() hex_text = binascii.hexlify(bytes(data, 'utf-8')) hex_text = str(hex_text) hex_text = hex_text.replace("b'", "") hex_text = hex_text.replace("'", "") hex_text = "0x"+hex_text self.line_edit_hex.setText(hex_text) if hex_text == "0x": self.line_edit_hex.setText("") app = QApplication(sys.argv) dialog = TextToHex() dialog.show() app.exec_()
Python
0.001615
dce13f074187cb95644b0ac3cfd84d1e0649f93c
Fix bytes/str handling in disqus SSO.
mezzanine/generic/templatetags/disqus_tags.py
mezzanine/generic/templatetags/disqus_tags.py
from __future__ import unicode_literals from future.builtins import bytes, int import base64 import hashlib import hmac import json import time from mezzanine import template register = template.Library() @register.simple_tag def disqus_id_for(obj): """ Returns a unique identifier for the object to be used in DISQUS JavaScript. """ return "%s-%s" % (obj._meta.object_name, obj.id) @register.inclusion_tag("generic/includes/disqus_sso.html", takes_context=True) def disqus_sso_script(context): """ Provides a generic context variable which adds single-sign-on support to DISQUS if ``COMMENTS_DISQUS_API_PUBLIC_KEY`` and ``COMMENTS_DISQUS_API_SECRET_KEY`` are specified. """ settings = context["settings"] public_key = getattr(settings, "COMMENTS_DISQUS_API_PUBLIC_KEY", "") secret_key = getattr(settings, "COMMENTS_DISQUS_API_SECRET_KEY", "") user = context["request"].user if public_key and secret_key and user.is_authenticated(): context["public_key"] = public_key context["sso_data"] = _get_disqus_sso(user, public_key, secret_key) return context def _get_disqus_sso(user, public_key, secret_key): # Based on snippet provided on http://docs.disqus.com/developers/sso/ # create a JSON packet of our data attributes data = json.dumps({ 'id': '%s' % user.id, 'username': user.username, 'email': user.email, }) # encode the data to base64 message = base64.b64encode(bytes(data, encoding="utf8")) # generate a timestamp for signing the message timestamp = int(time.time()) # generate our hmac signature sig = hmac.HMAC(bytes(secret_key, encoding="utf8"), bytes('%s %s' % (message, timestamp), encoding="utf8"), hashlib.sha1).hexdigest() # Messages are of the form <message> <signature> <timestamp> return '%s %s %s' % (message, sig, timestamp)
from __future__ import unicode_literals from future.builtins import int, str import base64 import hashlib import hmac import json import time from mezzanine import template register = template.Library() @register.simple_tag def disqus_id_for(obj): """ Returns a unique identifier for the object to be used in DISQUS JavaScript. """ return "%s-%s" % (obj._meta.object_name, obj.id) @register.inclusion_tag("generic/includes/disqus_sso.html", takes_context=True) def disqus_sso_script(context): """ Provides a generic context variable which adds single-sign-on support to DISQUS if ``COMMENTS_DISQUS_API_PUBLIC_KEY`` and ``COMMENTS_DISQUS_API_SECRET_KEY`` are specified. """ settings = context["settings"] public_key = getattr(settings, "COMMENTS_DISQUS_API_PUBLIC_KEY", "") secret_key = getattr(settings, "COMMENTS_DISQUS_API_SECRET_KEY", "") user = context["request"].user if public_key and secret_key and user.is_authenticated(): context["public_key"] = public_key context["sso_data"] = _get_disqus_sso(user, public_key, secret_key) return context def _get_disqus_sso(user, public_key, secret_key): # Based on snippet provided on http://docs.disqus.com/developers/sso/ # create a JSON packet of our data attributes data = json.dumps({ 'id': '%s' % user.id, 'username': user.username, 'email': user.email, }) # encode the data to base64 message = base64.b64encode(data) # generate a timestamp for signing the message timestamp = int(time.time()) # generate our hmac signature sig = hmac.HMAC(str(secret_key), '%s %s' % (message, timestamp), hashlib.sha1).hexdigest() # Messages are of the form <message> <signature> <timestamp> return '%s %s %s' % (message, sig, timestamp)
Python
0
913a77592a9f399820cddbc7753c24182ad21639
Add options for plots
src/rnaseq_lib/plot/opts.py
src/rnaseq_lib/plot/opts.py
gene_curves = { 'Curve': {'plot': dict(height=120, width=600, tools=['hover'], invert_xaxis=True, yrotation=45, yaxis='left'), 'style': dict(line_width=1.5)}, 'Curve.Percentage_of_Normal_Samples': {'plot': dict(xaxis=None, invert_yaxis=True), 'style': dict(color='Blue')}, 'Curve.Gene_Expression': {'plot': dict(xaxis=None), 'style': dict(color='Green')}, 'Curve.Log2_Fold_Change': {'plot': dict(height=150), 'style': dict(color='Purple')}, 'Scatter': {'style': dict(color='red', size=3)}} gene_kde = {}
Python
0.000001
9fb860e0c5b0ff6e696b8102197c3255f7b2d3d7
The goods
graph_role_deps.py
graph_role_deps.py
#!/usr/bin/python '''Graphs role dependencies in roles/ as a graphviz digraph''' import os import yaml import sys print 'digraph {' for role in os.listdir('./roles'): try: with open('./roles/%s/meta/main.yml' % role) as meta: data = yaml.load(meta) except Exception as exc: print >>sys.stderr, 'Skipping %s: %r' % (role, exc) continue try: deps = data['dependencies'] except Exception as exc: print >>sys.stderr, 'Skipping %s: %r' % (role, exc) continue print '\t"%s" -> {' % role, for dep in deps: print >>sys.stderr, 'dep:', dep name = dep['role'] print '"%s"' % name, print '}' print '}'
Python
0.998626
fa1e30635f57aaffdc74eaa307b8c74f89bf50ae
add base gender choices object
accelerator_abstract/models/base_gender_choices.py
accelerator_abstract/models/base_gender_choices.py
# MIT License # Copyright (c) 2017 MassChallenge, Inc. from __future__ import unicode_literals from django.db import models from accelerator_abstract.models.accelerator_model import AcceleratorModel GENDER_MALE_CHOICE = "Male" GENDER_FEMALE_CHOICE = "Female" GENDER_CISGENDER_CHOICE = "Cisgender" GENDER_TRANSGENDER_CHOICE = "Transgender" GENDER_NON_BINARY_CHOICE = "Non-Binary" GENDER_PREFER_TO_SELF_DESCRIBE_CHOICE = "I Prefer To Self-describe" GENDER_PREFER_NOT_TO_SAY_CHOICE = "I Prefer Not To Say" GENDER_CHOICES = ( GENDER_MALE_CHOICE, GENDER_FEMALE_CHOICE, GENDER_CISGENDER_CHOICE, GENDER_TRANSGENDER_CHOICE, GENDER_NON_BINARY_CHOICE, GENDER_PREFER_TO_SELF_DESCRIBE_CHOICE, GENDER_PREFER_NOT_TO_SAY_CHOICE ) class BaseGenderChoices(AcceleratorModel): name = models.CharField(max_length=255, unique=True) class Meta(AcceleratorModel.Meta): db_table = 'accelerator_genderchoices' abstract = True ordering = ['name', ] verbose_name = "Gender Choice" verbose_name_plural = "Gender Choices"
Python
0.001809
b55ef35a68305269e8a49a8afcdf46d94d06361f
add drf module
src/common/drf.py
src/common/drf.py
from rest_framework.exceptions import APIException class ServiceUnavailable(APIException): status_code = 503 default_detail = 'Service temporarily unavailable, try again later.' default_code = 'service_unavailable'
Python
0.000001
cdfee7e893564157e2143f20dea0b10c8bd33cfb
Create pythonLock.py
ving2/pythonLock.py
ving2/pythonLock.py
from threading import Thread from threading import Lock i = 0 def someThreadFunction1(lock): # Potentially useful thing: # In Python you "import" a global variable, instead of "export"ing it when you declare it # (This is probably an effort to make you feel bad about typing the word "global") global i for j in range (0,1000000): lock.acquire() i += 1 lock.release() def someThreadFunction2(lock): global i for j in range (0,1000000): lock.acquire() i -= 1 lock.release() def main(): lock = Lock() someThread1 = Thread(target = someThreadFunction1, args = ([lock])) someThread1.start() someThread2 = Thread(target = someThreadFunction2, args = ([lock])) someThread2.start() someThread1.join() someThread2.join() print(i) main()
Python
0.00002
c894e509f14cd671eaa49a5d6608bf773a8838c2
Create updaterepo.py
updaterepo.py
updaterepo.py
from os import system as s # s will serve as an easy way to send a command to the system from os import path, remove, listdir import hashlib, shutil, ftplib, gnupg news = listdir('/REPODIRECTORY/new') # Taking inventory of all new packages, placed in a "/new" directory for entry in news: enpath = '/REPODIRECTORY/new/%s' % entry if path.isdir(enpath): # Checking to see if any packages (in directory form, with the DEBIAN directory) have yet to be packaged makedeb = 'dpkg -b %s' % enpath s(makedeb) # Packaging any not-yet-packaged packages shutil.rmtree(enpath) # Deleting the now-packaged package's folder news = listdir('/REPODIRECTORY/new') # Taking inventory of all new packages for file in news: newf = path.join('/REPODIRECTORY/new', file) newfm = path.join('/REPODIRECTORY', file) shutil.move(newf, newfm) # Moving all new packages into the repo root, so they can be accounted for when creating the Packages index remove('Packages') # Removing the old Packages index files remove('Packages.gz') remove('Packages.bz2') s('sudo dpkg-scanpackages -m . /dev/null >Packages') # Creating the Pacakges file s('bzip2 -fks Packages') # Creating the Packages.bz2 file s('gzip -f Packages') # Turning the Packages file into the Packages.gz file s('sudo dpkg-scanpackages -m . /dev/null >Packages') # Creating another Packages file m1 = hashlib.md5(open('Packages').read()).hexdigest() # Calculating checksums for each Packages index file m2 = hashlib.md5(open('Packages.gz').read()).hexdigest() m3 = hashlib.md5(open('Packages.bz2').read()).hexdigest() s1 = path.getsize('Packages') # Getting file size of each Packages index files s2 = path.getsize('Packages.gz') s3 = path.getsize('Packages.bz2') sums = '%s %s Packages\n%s %s Packages.gz\n%s %s Packages.bz2\n' % (m1, s1, m2, s2, m3, s3) with open("Release", "r+") as f: # Writing the sums & file sizes of the Packages index files to the Release file old = f.read() old = old[:XXX] ### This XXX varies on how long the Release file is, as this line skips to the end of the Release file to tag on the sums f.seek(0) f.write(old + sums) gpg = gnupg.GPG() nosign = open('Release', "rb") # Signing the Release file signed = gpg.sign_file(nosign, keyid='GPGSIGNATUREID', passphrase='GPGSIGNATUREPASSWORD') remove('Release.gpg') # Removing the old Release.gpg signed file open("Release.gpg", "w").write(str(signed)[XXX:]) # Create and write signature data to Release.gpg # On the line above, the XXX varies on how long the Release file is, as gpg.sign_file from 5 lines up outputs more than Cydia wants session = ftplib.FTP('FTPADDRESS','FTPUSERNAME','FTPPASSWORD') # Setting up a FTP connection ftplib.FTP.cwd(session,'/FTPUPLOADDIRECTORY/') news.append('Packages') # Preparing files for upload (only new packages, and the index files) news.append('Packages.gz') news.append('Packages.bz2') news.append('Release') news.append('Release.gpg') for file in news: # Upload each file, and print as each file is uploaded upl = open(file, 'rb') upcmd = 'STOR %s' % file session.storbinary(upcmd, upl) print '%s uploaded.' % file upl.close() print 'Finished uploads.'
Python
0