commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
2ab9c74041b998e1cad3a7a9c1f5be6feb7b63ac
Add todo for config validation
scd/config.py
scd/config.py
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import print_function from __future__ import unicode_literals import collections import logging import os.path import six import scd.files import scd.utils import scd.version Parser = collections.namedtuple("Parser", ["name", "func"]) @six.python_2_unicode_compatible class Config(object): __slots__ = "raw", "configpath" def __init__(self, configpath, config): self.raw = config self.configpath = os.path.abspath(configpath) @property def project_directory(self): return os.path.dirname(self.configpath) @property def version_scheme(self): return self.raw["version"].get("scheme", "semver") @property @scd.utils.lru_cache() def version(self): plugins = scd.utils.get_version_plugins() return plugins[self.version_scheme](self) @property def version_number(self): return self.raw["version"]["number"] @property def files(self): return [scd.files.File(self, conf) for conf in self.raw["files"]] @property def replacement_patterns(self): return self.raw["replacement_patterns"] @property def search_patterns(self): return self.raw["search_patterns"] @property def defaults(self): return self.raw["defaults"] def __str__(self): return "<Config(path={0.configpath}, raw={0.raw})".format(self) __repr__ = __str__ def get_parsers(): parsers = [] try: import simplejson except ImportError: logging.debug("Use default json as JSON config parser.") import json parsers.append(Parser("JSON", json.loads)) else: logging.debug("Use simplejson as JSON config parser.") parsers.append(Parser("JSON", simplejson.loads)) try: import yaml except ImportError: logging.debug("PyYAML is not importable.") try: import ruamel.yaml except ImportError: logging.debug("ruamel.yaml is not importable.") else: logging.debug("Use ruamel.yaml for YAML config parser.") parsers.append(Parser("YAML", ruamel.yaml.safe_load)) else: logging.debug("Use PyYAML for YAML config parser.") parsers.append(Parser("YAML", yaml.safe_load)) try: import toml except ImportError: logging.debug("toml is not importable.") else: logging.debug("Use toml for TOML config parser.") parsers.append(Parser("TOML", toml.loads)) return parsers def parse(fileobj): content = fileobj.read() if not isinstance(content, six.string_types): content = content.decode("utf-8") for parser in get_parsers(): try: parsed = parser.func(content) break except Exception as exc: logging.warning("Cannot parse %s: %s", parser.name, exc) else: raise ValueError("Cannot parse {0}".format(fileobj.name)) return Config(fileobj.name, parsed)
Python
0
@@ -17,16 +17,41 @@ f-8 -*-%0A +# TODO Config validation%0A %0A%0Afrom _ @@ -2855,16 +2855,77 @@ ontent)%0A + logging.info(%22Parsed config as %25s%22, parser.name)%0A
398157d2b7e42a5de028af2a074c4465c6360e13
add timestamps to result in mongo
results.py
results.py
# experiment result wrapper import numpy as np import uuid import h5py from git import Repo from pymongo import MongoClient path_to_repo = '~/Documents/Thesis/latent_ssvm' path_to_datafile = '/home/dmitry/Documents/Thesis/latent_ssvm/notebooks/experiment_data.hdf5' class experiment(object): def __init__(self, f): self.f = f def __call__(self, description, *args, **kwargs): result = None try: result = self.f(*args, **kwargs) except: raise result.save(description) return result class ExperimentResult(object): def __init__(self, data, meta, is_new=True): # meta information, comments, parameters # this will be stored in mongodb self.meta = meta if is_new: repo = Repo(path_to_repo) self.meta['commit_hash'] = repo.head.commit.hexsha # unique experiment identifier self.meta['id'] = uuid.uuid1().hex # generated data like scores per iteration, model parameters # stored in hdf5 file self.data = data def save_data(self): f = h5py.File(path_to_datafile, 'a', libver='latest') grp = f[self.meta['dataset_name']].create_group(self.meta['id']) for k in self.data.keys(): grp.create_dataset(k, data=self.data[k]) f.close() return grp.id.id def save_meta(self): client = MongoClient() client['lSSVM']['base'].insert(self.meta) client.disconnect() def save(self, description=''): self.description = description self.save_meta() self.save_data() return self.id @staticmethod def load(exp_id): client = MongoClient() meta = client['lSSVM']['base'].find_one({'id' : exp_id}) f = h5py.File(path_to_datafile, 'r', libver='latest') grp = f[meta[u'dataset_name']][exp_id] data = {} for k in grp.keys(): data[k] = np.empty(grp[k].shape) grp[k].read_direct(data[k]) f.close() return ExperimentResult(data, meta, is_new=False) @property def description(self): return self.meta['description'] @description.setter def description(self, description_): self.meta['description'] = description_ @property def id(self): return self.meta['id']
Python
0.000007
@@ -120,16 +120,68 @@ oClient%0A +from datetime import datetime%0Afrom time import time%0A %0A%0Apath_t @@ -1022,16 +1022,114 @@ 1().hex%0A + self.meta%5B'now'%5D = datetime.now().ctime()%0A self.meta%5B'timestamp'%5D = time()%0A
d104046a008bda71bf80f607d4f974a59050548b
Remove debug message
results.py
results.py
# Results management module # import matchupsv2 as matchups_store import players import predictions def get_player_prediction(player_id, preds): result=[] for prediction in preds: if prediction['player'] == player_id and int(prediction['winner']) > 0: p = prediction.copy() del p['player'] result.append(p) return result def get_winner(player, winners): for winner in winners: if winner['player'] == player: return int(winner['winner']) return None def get_matchup(matchups, home, away): for matchup in list(matchups.values()): if matchup['home'] == int(home) and matchup['away'] == int(away): return matchup return None def get_final_winner(year, teams): matchups = matchups_store.get_matchups(year, 4) result = calculate_matchup_result(matchups[0], teams) if result['winner'] != 0: return result['winner'] return 0 def calculate_pts_old(player_id , preds, matchups, teams, winner, final_winner): pts = 0 results = calculate_results(player_id , preds, matchups, teams) for result in results: if result['has_winner']: pts = pts + 5 pts = pts + int(result['winner']['conferenceRank']) if result['has_games']: pts = pts + 10 print('Winner', winner, 'final winner', final_winner) if winner == final_winner: pts = pts + 50 return pts def calculate_pts(player_id , preds, matchups, teams, winner, final_winner): pts = 0 results = calculate_results(player_id , preds, matchups, teams) for result in results: if result['has_winner']: pts = pts + 10 pts = pts + int(result['winner']['conferenceRank']) if result['has_games']: pts = pts + 5 print('Winner', winner, 'final winner', final_winner) if winner == final_winner: pts = pts + 50 return pts def calculate_victories(player_id , preds, matchups, teams): pts = {'winner_count': 0, 'games_count': 0} results = calculate_results(player_id , preds, matchups, teams) for result in results: if result['has_winner']: pts['winner_count'] = pts['winner_count'] + 1 if result['has_games']: pts['games_count'] = pts['games_count'] + 1 return pts def calculate_matchup_result(matchup, teams): home = matchup['home'] away = matchup['away'] match_winner = 0 match_winner_info = {} winner_rank = 0 match_games = 0 if 'result' in matchup: result = matchup['result'] if result['home_win'] == 4: match_winner = home match_winner_info = teams[matchup['home']]['standings'] winner_rank = int(teams[matchup['home']]['standings']['conferenceRank']) elif result['away_win'] == 4: match_winner = away match_winner_info = teams[matchup['away']]['standings'] winner_rank = int(teams[matchup['away']]['standings']['conferenceRank']) match_games = int(result['home_win']) + int(result['away_win']) return {"winner":match_winner, "winner_info":match_winner_info, "winner_rank":winner_rank, "games":match_games} def calculate_results(player_id , preds, matchups, teams): results = [] for prediction in preds: home = prediction['home'] away = prediction['away'] winner = prediction['winner'] games = prediction['games'] matchup = get_matchup(matchups, home, away) res = {'prediction': prediction, 'has_winner': False, 'has_games': False, 'winner':{}, 'games':0} result = calculate_matchup_result(matchup, teams) if result['games'] != 0: if result['winner'] != 0: res['winner'] = result['winner_info'] res['games'] = result['games'] if winner == result['winner']: res['has_winner'] = True if games == result['games']: res['has_games'] = True results.append(res) # if 'result' in matchup: # result = matchup['result'] # match_winner = '' # if result['home_win'] == 4: # match_winner = home # match_winner_info = teams[matchup['home']]['standings'] # winner_rank = int(teams[matchup['home']]['standings']['conferenceRank']) # elif result['away_win'] == 4: # match_winner = away # match_winner_info = teams[matchup['away']]['standings'] # winner_rank = int(teams[matchup['away']]['standings']['conferenceRank']) # match_games = int(result['home_win']) + int(result['away_win']) # if match_winner != '': # res['winner'] = match_winner_info # res['games'] = match_games # if match_winner == winner: # res['has_winner'] = True # if match_games == games: # res['has_games'] = True # results.append(res) return results def filter_predictions(preds, matchups): results = [] for pred in preds: home = pred['home'] away = pred['away'] matchup = get_matchup(matchups, home, away) if matchups_store.is_matchup_started(matchup): results.append(pred) return results def get(player_id, year): result=[] m = matchups_store.get_matchups(year) teams = matchups_store.get_teams(year) preds = predictions.get_all(year) final_winner = get_final_winner(year, teams) winners = predictions.get_winners(year) for player in players.get_all_admin(): player_preds = get_player_prediction(player['id'], preds) winner = get_winner(player['id'], winners) if len(player_preds) > 0: pts = calculate_pts(player['id'], get_player_prediction(player['id'], preds), m, teams, winner, final_winner) oldpts = calculate_pts_old(player['id'], get_player_prediction(player['id'], preds), m, teams, winner, final_winner) victories = calculate_victories(player['id'], get_player_prediction(player['id'], preds), m, teams) winner = predictions.get_winner(player['id'], year) if winner is not None: winner = winner['winner'] else: winner = 0 if player['id'] != player_id: player_preds = filter_predictions(player_preds, m) result.append({'player':player['name'], 'pts':pts, 'oldpts':oldpts, 'winner':winner, 'predictions':player_preds, 'victories':victories}) return result
Python
0.000001
@@ -1332,67 +1332,8 @@ + 10 -%0A%0A print('Winner', winner, 'final winner', final_winner) %0A @@ -1781,66 +1781,8 @@ + 5%0A - print('Winner', winner, 'final winner', final_winner)%0A
8c8d6147f51d8c036f9d7cf9f7aa72e99cd6f4dd
fix list of unselected projects
rhw/models.py
rhw/models.py
from ckeditor.fields import RichTextField from django.conf import settings from django.core.urlresolvers import reverse from django.db import models from django.utils.encoding import python_2_unicode_compatible @python_2_unicode_compatible class Idea(models.Model): title = models.CharField(max_length=100, unique=True) slug = models.SlugField(unique=True) text = RichTextField(blank=True) created = models.DateField() authors = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name='ideas') interested = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name='interested', blank=True) def __str__(self): return self.title def get_absolute_url(self): return reverse('idea', args=(self.slug,)) @python_2_unicode_compatible class RedHackWeek(models.Model): STATUS_IDEAS = 1 STATUS_PROJECTS = 2 STATUS_HACKING = 3 STATUS_VOTING = 4 STATUS_CLOSED = 5 STATUS_CHOICES = { STATUS_IDEAS: 'collecting ideas', STATUS_PROJECTS:'creating list of approved projects', STATUS_HACKING: 'hacking', STATUS_VOTING: 'voting', STATUS_CLOSED: 'closed', } title = models.CharField(max_length=100) slug = models.SlugField(unique=True) text = RichTextField(blank=True) start = models.DateField() end = models.DateField() ideas = models.ManyToManyField(Idea, blank=True, related_name='redhackweeks') status = models.IntegerField(max_length=10, choices=STATUS_CHOICES.items(), default=STATUS_IDEAS) class Meta: ordering = ('-start',) def __str__(self): return self.title def get_absolute_url(self): return reverse('rhw', args=(self.slug,)) def get_status(self): return self.STATUS_CHOICES[self.status] @property def unselected_ideas(self): return self.ideas.filter(project=None) @python_2_unicode_compatible class Project(models.Model): idea = models.OneToOneField(Idea, related_name='project') rhw = models.ForeignKey(RedHackWeek, related_name='projects') title = models.CharField(max_length=100, unique=True) slug = models.SlugField(unique=True) text = RichTextField(blank=True) members = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name='projects', blank=True) votes = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name='votes', blank=True) def __str__(self): return self.idea.title def get_absolute_url(self): return reverse('project', args=(self.slug,))
Python
0.000003
@@ -1924,22 +1924,23 @@ eas. -filter +exclude (project =Non @@ -1939,13 +1939,18 @@ ject -=None +__rhw=self )%0A%0A%0A
76a7b89cd8c935dec87ac89ec36b174c9a0636c4
change lambda with broken typing to def
rich/pager.py
rich/pager.py
from abc import ABC, abstractmethod from typing import Any, Callable class Pager(ABC): """Base class for a pager.""" @abstractmethod def show(self, content: str) -> None: """Show content in pager. Args: content (str): Content to be displayed. """ class SystemPager(Pager): """Uses the pager installed on the system.""" _pager: Callable[[Any, str], Any] = lambda self, content: __import__("pydoc").pager( content ) def show(self, content: str) -> None: """Use the same pager used by pydoc.""" self._pager(content) if __name__ == "__main__": # pragma: no cover from .__main__ import make_test_card from .console import Console console = Console() with console.pager(styles=True): console.print(make_test_card())
Python
0.000003
@@ -379,65 +379,61 @@ +def _pager -: Callable%5B%5BAny, str%5D, Any%5D = lambda self, content: +(self, content: str) -%3E Any:%0A return __i @@ -459,29 +459,15 @@ ger( -%0A content -%0A )%0A%0A
9bd5662194007d995c924d2d57f6af5c75075472
fix dashboard json output
ctfengine/views.py
ctfengine/views.py
import hashlib from flask import abort, flash, jsonify, render_template, request, redirect, \ url_for from ctfengine import app from ctfengine import database from ctfengine import lib from ctfengine import models @app.route('/') def index(): scores = models.Handle.topscores() total_points = database.conn.query(models.Flag.total_points()).first()[0] if request.wants_json(): return jsonify({ 'scores': [(x.handle, x.score) for x in scores], 'total_points': total_points, }) return render_template('index.html', scores=scores, total_points=total_points) @app.route('/submit', methods=['POST']) def submit_flag(): entered_handle = request.form['handle'].strip() entered_flag = request.form['flag'].strip() if len(entered_handle) <= 0 or len(entered_flag) <= 0: return make_error("Please enter a handle and a flag.") flag = models.Flag.get(entered_flag) if not flag: return make_error(request, "That is not a valid flag.") # search for handle handle = models.Handle.get(entered_handle) if not handle: handle = models.Handle(entered_handle, 0) database.conn.add(handle) database.conn.commit() existing_entry = models.FlagEntry.query.filter( models.FlagEntry.handle == handle.id, models.FlagEntry.flag == flag.id).first() if existing_entry: return make_error(request, "You may not resubmit flags.") # update points for user handle.score += flag.points database.conn.commit() # log flag submission entry = models.FlagEntry(handle.id, flag.id, request.remote_addr, request.user_agent.string) database.conn.add(entry) database.conn.commit() # mark machine as dirty if necessary if flag.machine: machine = database.conn.query(models.Machine).get(flag.machine) machine.dirty = True database.conn.commit() if request.wants_json(): return jsonify(entry.serialize()) flash("Flag scored.") return redirect(url_for('index')) @app.route('/dashboard') def dashboard(): machines = database.conn.query(models.Machine).all() if request.wants_json(): return jsonify({ 'machines': machines, }) return render_template('dashboard.html', machines=machines) def make_error(request, msg, code=400): if request.wants_json(): response = jsonify({'message': msg}) response.status_code = code return response else: flash(msg) return redirect(url_for('index'))
Python
0.000129
@@ -2275,16 +2275,40 @@ chines': + %5Bm.serialize() for m in machine @@ -2308,16 +2308,17 @@ machines +%5D ,%0A
821e87d574ec4eeb3c8e740c82dba3a979d9bae9
allow for Decimal and other types not inherently addable to float in SMA calculator.
cubes/statutils.py
cubes/statutils.py
from collections import deque from cubes.model import Attribute def _wma(values): n = len(values) denom = n * (n + 1) / 2 total = 0.0 idx = 1 for val in values: total += float(idx) * float(val) idx += 1 return round(total / denom, 4) def _sma(values): # use all the values return round(reduce(lambda i, c: c + i, values, 0.0) / len(values), 2) def weighted_moving_average_factory(measure, drilldown_paths, source_aggregations): return _moving_average_factory(measure, drilldown_paths, source_aggregations, _wma, 'wma') def simple_moving_average_factory(measure, drilldown_paths, source_aggregations): return _moving_average_factory(measure, drilldown_paths, source_aggregations, _sma, 'sma') def _moving_average_factory(measure, drilldown_paths, source_aggregations, avg_func, aggregation_name): if not drilldown_paths or not source_aggregations: return lambda item: None # if the level we're drilling to doesn't have aggregation_units configured, # we're not doing any calculations key_drilldown_paths = [] num_units = None for path in drilldown_paths: relevant_level = path[2][-1] these_num_units = None if relevant_level.info: these_num_units = relevant_level.info.get('aggregation_units', None) if these_num_units is None: key_drilldown_paths.append(path) else: num_units = these_num_units if num_units is None or not isinstance(num_units, int) or num_units < 2: return lambda item: None # if no key_drilldown_paths, the key is always the empty tuple. def key_extractor(item): vals = [] for dim, hier, levels in key_drilldown_paths: for level in levels: vals.append( item.get(level.key.ref()) ) return tuple(vals) calculators = [] measure_baseref = measure.ref() for agg in source_aggregations: if agg != "identity": measure_ref = measure_baseref + "_" + agg else: measure_ref = measure_baseref calculators.append( _calc_func(measure_ref + "_" + aggregation_name, measure_ref, avg_func, key_extractor, num_units) ) def calculator(item): for calc in calculators: calc(item) return calculator def _calc_func(field_name, measure_ref, avg_func, key_extractor, num_units): by_value_map = {} def f(item): by_value = key_extractor(item) val_list = by_value_map.get(by_value) if val_list is None: val_list = deque() by_value_map[by_value] = val_list val = item.get(measure_ref) if val is not None: val_list.append(val) while len(val_list) > num_units: val_list.popleft() if len(val_list) > 0: item[field_name] = avg_func(val_list) return f
Python
0
@@ -349,17 +349,24 @@ a i, c: -c +float(c) + i, va
65e8aba17517247770ba27d796016c49fa41e0ab
correct handling of measure.ref() and aggregation selection in statutils' calculated aggregations
cubes/statutils.py
cubes/statutils.py
from collections import deque from cubes.model import Attribute def _wma(values): n = len(values) denom = n * (n + 1) / 2 total = 0.0 idx = 1 for val in values: total += float(idx) * float(val) idx += 1 return round(total / denom, 4) def _sma(values): # use all the values return round(reduce(lambda i, c: c + i, values, 0.0) / len(values), 2) def weighted_moving_average_factory(measure, drilldown_paths): return _moving_average_factory(measure, drilldown_paths, _wma, '_wma') def simple_moving_average_factory(measure, drilldown_paths): return _moving_average_factory(measure, drilldown_paths, _sma, '_sma') def _moving_average_factory(measure, drilldown_paths, avg_func, field_suffix): if not drilldown_paths: return lambda item: None # if the level we're drilling to doesn't have aggregation_units configured, # we're not doing any calculations relevant_level = drilldown_paths[-1][2][-1] if not relevant_level.info: return lambda item: None num_units = relevant_level.info.get('aggregation_units', None) if num_units is None or not isinstance(num_units, int) or num_units < 2: return lambda item: None def key_extractor(item): vals = [] for dim, hier, levels in drilldown_paths[:-1]: for level in levels: vals.append( item.get(level.key.ref()) ) return tuple(vals) field_name = measure.ref() + field_suffix by_value_map = {} def f(item): by_value = key_extractor(item) val_list = by_value_map.get(by_value) if val_list is None: val_list = deque() by_value_map[by_value] = val_list val = item.get(measure.ref()) if val is not None: val_list.append(val) while len(val_list) > num_units: val_list.popleft() if len(val_list) >= num_units: item[field_name] = avg_func(val_list) return f
Python
0
@@ -519,17 +519,16 @@ _wma, ' -_ wma')%0A%0Ad @@ -655,17 +655,16 @@ _sma, ' -_ sma')%0A%0Ad @@ -726,28 +726,32 @@ g_func, -field_suffix +aggregation_name ):%0A i @@ -934,62 +934,165 @@ -relevant_level = drilldown_paths%5B-1%5D%5B2%5D%5B-1%5D%0A if not +key_drilldown_paths = %5B%5D%0A num_units = None%0A for path in drilldown_paths:%0A relevant_level = path%5B2%5D%5B-1%5D%0A these_num_units = None%0A if rel @@ -1121,37 +1121,18 @@ -return lambda item: None%0A + these_ num_ @@ -1198,114 +1198,632 @@ -if num_units is None or not isinstance(num_units, int) or num_units %3C 2:%0A return lambda item: None%0A + if these_num_units is None:%0A key_drilldown_paths.append(path)%0A else:%0A num_units = these_num_units%0A%0A if num_units is None or not isinstance(num_units, int) or num_units %3C 2:%0A return lambda item: None%0A%0A # determine the measure on which to calculate.%0A measure_ref = measure.ref()%0A for agg in measure.aggregations:%0A if agg == aggregation_name:%0A continue%0A if agg != %22identity%22:%0A measure_ref += %22_%22 + agg%0A break%0A%0A field_name = measure_ref + '_' + aggregation_name%0A%0A # if no key_drilldown_paths, the key is always the empty tuple. %0A @@ -1895,24 +1895,28 @@ , levels in +key_ drilldown_pa @@ -1922,13 +1922,8 @@ aths -%5B:-1%5D :%0A @@ -2041,53 +2041,8 @@ ls)%0A - field_name = measure.ref() + field_suffix %0A%0A @@ -2300,22 +2300,20 @@ (measure -. +_ ref -() )%0A
582cacac1411312ad5e5dc132562883693f3877a
bump version
cyvcf2/__init__.py
cyvcf2/__init__.py
from .cyvcf2 import (VCF, Variant, Writer, r_ as r_unphased, par_relatedness, par_het) Reader = VCFReader = VCF __version__ = "0.8.6"
Python
0
@@ -150,7 +150,7 @@ 0.8. -6 +7 %22%0A
e624e5ae4b93f61f3a53ad911d9fe3b8baa68377
Bump to 4.0 RC 2 dev.
reviewboard/__init__.py
reviewboard/__init__.py
"""Review Board version and package information. These variables and functions can be used to identify the version of Review Board. They're largely used for packaging purposes. """ from __future__ import unicode_literals #: The version of Review Board. #: #: This is in the format of: #: #: (Major, Minor, Micro, Patch, alpha/beta/rc/final, Release Number, Released) #: VERSION = (4, 0, 0, 0, 'rc', 1, True) def get_version_string(): """Return the Review Board version as a human-readable string.""" version = '%s.%s' % (VERSION[0], VERSION[1]) if VERSION[2] or VERSION[3]: version += ".%s" % VERSION[2] if VERSION[3]: version += ".%s" % VERSION[3] if VERSION[4] != 'final': if VERSION[4] == 'rc': version += ' RC%s' % VERSION[5] else: version += ' %s %s' % (VERSION[4], VERSION[5]) if not is_release(): version += " (dev)" return version def get_package_version(): """Return the Review Board version as a Python package version string. Returns: unicode: The Review Board package version. """ version = '%s.%s' % (VERSION[0], VERSION[1]) if VERSION[2] or VERSION[3]: version = '%s.%s' % (version, VERSION[2]) if VERSION[3]: version = '%s.%s' % (version, VERSION[3]) tag = VERSION[4] if tag != 'final': if tag == 'alpha': tag = 'a' elif tag == 'beta': tag = 'b' version = '%s%s%s' % (version, tag, VERSION[5]) return version def is_release(): """Return whether this is a released version of Review Board.""" return VERSION[6] def get_manual_url(): """Return the URL to the Review Board manual for this version.""" if VERSION[2] == 0 and VERSION[4] != 'final': manual_ver = 'dev' else: manual_ver = '%s.%s' % (VERSION[0], VERSION[1]) return 'https://www.reviewboard.org/docs/manual/%s/' % manual_ver def initialize(load_extensions=True, setup_logging=True, setup_templates=True): """Begin initialization of Review Board. This sets up the logging, generates cache serial numbers, loads extensions, and sets up other aspects of Review Board. Once it has finished, it will fire the :py:data:`reviewboard.signals.initializing` signal. This must be called at some point before most features will work, but it will be called automatically in a standard install. If you are writing an extension or management command, you do not need to call this yourself. Args: load_extensions (bool, optional): Whether extensions should be automatically loaded upon initialization. If set, extensions will only load if the site has been upgraded to the latest version of Review Board. setup_logging (bool, optional): Whether to set up logging based on the configured settings. This can be disabled if the caller has their own logging configuration. setup_templates (bool, optional): Whether to set up state for template rendering. This can be disabled if the caller has no need for template rendering of any kind. This does not prevent template rendering from happening, but may change the output of some templates. Keep in mind that many pieces of functionality, such as avatars and some management commands, may be impacted by this setting. """ import importlib import logging import os os.environ.setdefault(str('DJANGO_SETTINGS_MODULE'), str('reviewboard.settings')) import settings_local # Set RBSITE_PYTHON_PATH to the path we need for any RB-bundled # scripts we may call. os.environ[str('RBSITE_PYTHONPATH')] = \ os.path.dirname(settings_local.__file__) from django import setup from django.apps import apps if not apps.ready: setup() from django.conf import settings from django.db import DatabaseError from djblets import log from djblets.cache.serials import generate_ajax_serial from djblets.siteconfig.models import SiteConfiguration from reviewboard import signals from reviewboard.admin.siteconfig import load_site_config from reviewboard.extensions.base import get_extension_manager is_running_test = getattr(settings, 'RUNNING_TEST', False) if setup_logging and not is_running_test: # Set up logging. log.init_logging() load_site_config() if (setup_templates or load_extensions) and not is_running_test: if settings.DEBUG: logging.debug("Log file for Review Board v%s (PID %s)" % (get_version_string(), os.getpid())) # Generate the AJAX serial, used for AJAX request caching. generate_ajax_serial() # Store the AJAX serial as a template serial, so we have a reference # to the real serial last modified timestamp of our templates. This # is useful since the extension manager will be modifying AJAX_SERIAL # to prevent stale caches for templates using hooks. Not all templates # use hooks, and may want to base cache keys off TEMPLATE_SERIAL # instead. # # We only want to do this once, so we don't end up replacing it # later with a modified AJAX_SERIAL later. if not getattr(settings, 'TEMPLATE_SERIAL', None): settings.TEMPLATE_SERIAL = settings.AJAX_SERIAL siteconfig = SiteConfiguration.objects.get_current() if load_extensions and not is_running_test: installed_version = get_version_string() if siteconfig.version == installed_version: # Load all extensions try: get_extension_manager().load() except DatabaseError: # This database is from a time before extensions, so don't # attempt to load any extensions yet. pass else: logging.warning('Extensions will not be loaded. The site must ' 'be upgraded from Review Board %s to %s.', siteconfig.version, installed_version) signals.initializing.send(sender=None) def finalize_setup(is_upgrade=False, register_scmtools=True): """Internal function to upgrade internal state after installs/upgrades. This should only be called by Review Board install or upgrade code. Args: is_upgrade (bool, optional): Whether this is finalizing an upgrade, rather than a new install. register_scmtools (bool, optional): Whether to register SCMTools when finalizing. Version Added: 4.0: """ from reviewboard import signals from reviewboard.admin.management.sites import init_siteconfig from reviewboard.scmtools.models import Tool # Add/update any SCMTool registrations. if register_scmtools: Tool.objects.register_from_entrypoints() # Update the recorded product version. init_siteconfig() # Notify anything else that needs to listen. signals.finalized_setup.send(sender=None, is_upgrade=is_upgrade) #: An alias for the the version information from :py:data:`VERSION`. #: #: This does not include the last entry in the tuple (the released state). __version_info__ = VERSION[:-1] #: An alias for the version used for the Python package. __version__ = get_package_version()
Python
0
@@ -400,14 +400,15 @@ c', -1, Tru +2, Fals e)%0A%0A
e53ae572ac6c232a6afc01ae9ad2988ea1ef456a
Bump version.
robobrowser/__init__.py
robobrowser/__init__.py
__version__ = '0.4.0' from .browser import RoboBrowser
Python
0
@@ -12,17 +12,17 @@ = '0.4. -0 +1 '%0A%0Afrom
9efc16a9ce2187636d2ba75ff7982033854dbbe8
optimise apriltags
robotd/vision/vision.py
robotd/vision/vision.py
"""Classes for handling vision""" from robotd.native.apriltag._apriltag import ffi, lib from robotd.vision.camera import Camera from robotd.vision.camera_base import CameraBase from robotd.vision.tokens import Token class Vision: """Class that handles the vision library""" def __init__(self, camera: CameraBase, token_size): # Pygame camera object self.camera = camera # size of the tokens the camera is testing self.token_size = token_size # apriltag detector object self._detector = None # image from camera self.image = None def init(self): self.camera.init() self._init_library() def __del__(self): self._deinit_library() def _init_library(self): # init detector self._detector = lib.apriltag_detector_create() """ apriltag_detector_t* td, float decimate, default: 1.0, "Decimate input image by this factor" float sigma, default: 0.0, "Apply low-pass blur to input; negative sharpens" int refine_edges, default: 1, "Spend more time trying to align edges of tags" int refine_decode, default: 0, "Spend more time trying to decode tags" int refine_pose default: 0, "Spend more time trying to find the position of the tag" """ lib.apriltag_init(self._detector, 1.0, 0.0, 1, 0, 1) size = self.camera.get_image_size() self.image = lib.image_u8_create_stride(size[0], size[1], size[0]) def _deinit_library(self): # Always destroy the detector if self._detector: lib.apriltag_detector_destroy(self._detector) if self.image: lib.image_u8_destroy(self.image) def _parse_results(self, results): markers = [] for i in range(results.size): detection = lib.zarray_get_detection(results, i) markers.append(Token(detection, self.token_size, self.camera.focal_length)) lib.destroy_detection(detection) return markers def snapshot(self): """ Take an image and process it """ # get the PIL image from the camera img = self.camera.capture_image() total_length = img.size[0] * img.size[1] # Detect the markers ffi.memmove(self.image.buf, img.tobytes(), total_length) results = lib.apriltag_detector_detect(self._detector, self.image) tokens = self._parse_results(results) # Remove the array now we've got them lib.zarray_destroy(results) return tokens, img if __name__ == "__main__": # webcam CAM_DEVICE = "/dev/video0" CAM_IMAGE_SIZE = (1280, 720) FOCAL_DISTANCE = 720 camera = Camera(CAM_DEVICE, CAM_IMAGE_SIZE, 720) # File Camera # camera = FileCamera() v = Vision(camera, (0.1, 0.1)) v.init() while True: tokens, _ = v.snapshot() print(len(tokens), "tokens seen")
Python
0.999998
@@ -1425,17 +1425,17 @@ , 1, 0, -1 +0 )%0A
70b4be757d671bc86876b4568632bb6fe6064001
Fix a Django deprecation warning
admin_interface/templatetags/admin_interface_tags.py
admin_interface/templatetags/admin_interface_tags.py
# -*- coding: utf-8 -*- from django import template from admin_interface.models import Theme register = template.Library() @register.assignment_tag(takes_context = True) def get_admin_interface_theme(context): theme = None request = context.get('request', None) if request: theme = getattr(request, 'admin_interface_theme', None) if not theme: theme = Theme.get_active_theme() if request: request.admin_interface_theme = theme return theme
Python
0.006582
@@ -136,18 +136,14 @@ ter. -assignment +simple _tag
2f3b5a6e0600f92ae0803ad3df44948dd5408444
comment out stdout log handler
cssbot/log.py
cssbot/log.py
# # Copyright (C) 2011 by Brian Weck # Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php # import logging from datetime import date import utils def __configure_logging(): # configure the base logger for the pkg l = logging.getLogger("cssbot") l.setLevel(logging.DEBUG) # format. formatter = logging.Formatter("%(asctime)s : [%(levelname)s] %(name)s : %(message)s") # stdout handler. ch = logging.StreamHandler() ch.setLevel(logging.WARN) ch.setFormatter(formatter) l.addHandler(ch) # file handler today = date.today() log_date = "%d%02d" % (today.year, today.month) #"%d%02d%02d" % (today.year, today.month, today.day) fh = logging.FileHandler("log/cssbot-%s.log" % log_date) fh.setLevel(logging.INFO) fh.setFormatter(formatter) l.addHandler(fh) def getLogger(name=None): if not name: name = "cssbot" return logging.getLogger(name) # utils.dirs.switch_cwd_to_script_loc() __configure_logging()
Python
0
@@ -433,16 +433,17 @@ andler.%0A +# ch = @@ -466,16 +466,17 @@ ndler()%0A +# ch.se @@ -496,16 +496,17 @@ g.WARN)%0A +# ch.se @@ -519,32 +519,33 @@ tter(formatter)%0A +# l.addHandler(
9abce530e50e4c1132e512abd51f39a60e4bd261
change max_depth check to >=
ai/minimax.py
ai/minimax.py
# http://en.wikipedia.org/wiki/Alpha%E2%80%93beta_pruning def minimax(game, player, depth, max_depth): if game.game_over() or depth > max_depth: return game.heuristic_value(player), [] best_score = -float('inf') if game.current_player == player else float('inf') best_moves = [] for move in list(game.valid_moves()): game.move(move) score, _ = minimax(game, player, depth+1, max_depth) game.undo_move() if game.current_player == player: if score > best_score: best_score = score best_moves = [move] elif score == best_score: best_moves.append(move) else: if score < best_score: best_score = score best_moves = [move] elif score == best_score: best_moves.append(move) return best_score, best_moves def alphabeta(game, player, depth, max_depth, alpha, beta): """ Alpha beta minimax algorithm :param game: game object with game_over, heuristic_value, and valid_moves methods :param player: maximizing player :param depth: current depth in tree :param max_depth: maximum depth in tree to search :param alpha: minimum score maximizing player will get :param beta: maximum score minimizing player will get :return: score, list of moves """ # base case - game over or depth exceeded if game.game_over() or depth > max_depth: return game.heuristic_value(player, depth), [] # init best score to worst possible best_score = -float('inf') if game.current_player == player else float('inf') best_moves = [] # loop all moves for move in list(game.valid_moves()): # make move game.move(move) # recurse score, _ = alphabeta(game, player, depth+1, max_depth, alpha, beta) # undo moves game.undo_move() # save the best scores if game.current_player == player: alpha = max(alpha, score) if score > best_score: best_score = score best_moves = [move] elif score == best_score: best_moves.append(move) else: beta = min(beta, score) if score < best_score: best_score = score best_moves = [move] elif score == best_score: best_moves.append(move) # prune branch if the minimum score the max player gets exceeds the max score the min player gets if alpha >= beta: break return best_score, best_moves
Python
0
@@ -124,32 +124,33 @@ ver() or depth %3E += max_depth:%0A @@ -1466,16 +1466,17 @@ depth %3E += max_dep
3f54ba70972afc0b32a6de106b0f1758536c98a5
Support building on Cuda11
cuda_setup.py
cuda_setup.py
# Adapted from https://github.com/rmcgibbo/npcuda-example and # https://github.com/cupy/cupy/blob/master/cupy_setup_build.py import logging import os import sys from distutils import ccompiler, errors, msvccompiler, unixccompiler from setuptools.command.build_ext import build_ext as setuptools_build_ext def find_in_path(name, path): "Find a file in a search path" # adapted fom http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/ for dir in path.split(os.pathsep): binpath = os.path.join(dir, name) if os.path.exists(binpath): return os.path.abspath(binpath) return None def locate_cuda(): """Locate the CUDA environment on the system If a valid cuda installation is found this returns a dict with keys 'home', 'nvcc', 'include', and 'lib64' and values giving the absolute path to each directory. Starts by looking for the CUDAHOME env variable. If not found, everything is based on finding 'nvcc' in the PATH. If nvcc can't be found, this returns None """ nvcc_bin = 'nvcc' if sys.platform.startswith("win"): nvcc_bin = 'nvcc.exe' # first check if the CUDAHOME env variable is in use if 'CUDAHOME' in os.environ: home = os.environ['CUDAHOME'] nvcc = os.path.join(home, 'bin', nvcc_bin) elif 'CUDA_PATH' in os.environ: home = os.environ['CUDA_PATH'] nvcc = os.path.join(home, 'bin', nvcc_bin) else: # otherwise, search the PATH for NVCC nvcc = find_in_path(nvcc_bin, os.environ['PATH']) if nvcc is None: logging.warning('The nvcc binary could not be located in your $PATH. Either add it to ' 'your path, or set $CUDAHOME to enable CUDA extensions') return None home = os.path.dirname(os.path.dirname(nvcc)) if not os.path.exists(os.path.join(home, "include")): logging.warning("Failed to find cuda include directory, attempting /usr/local/cuda") home = "/usr/local/cuda" cudaconfig = {'home': home, 'nvcc': nvcc, 'include': os.path.join(home, 'include'), 'lib64': os.path.join(home, 'lib64')} post_args = ['-gencode=arch=compute_30,code=sm_30', '-gencode=arch=compute_50,code=sm_50', '-gencode=arch=compute_60,code=sm_60', '-gencode=arch=compute_60,code=compute_60', '--ptxas-options=-v', '-O2'] if sys.platform == "win32": cudaconfig['lib64'] = os.path.join(home, 'lib', 'x64') post_args += ['-Xcompiler', '/MD'] else: post_args += ['-c', '--compiler-options', "'-fPIC'"] for k, v in cudaconfig.items(): if not os.path.exists(v): logging.warning('The CUDA %s path could not be located in %s', k, v) return None cudaconfig['post_args'] = post_args return cudaconfig # This code to build .cu extensions with nvcc is taken from cupy: # https://github.com/cupy/cupy/blob/master/cupy_setup_build.py class _UnixCCompiler(unixccompiler.UnixCCompiler): src_extensions = list(unixccompiler.UnixCCompiler.src_extensions) src_extensions.append('.cu') def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): # For sources other than CUDA C ones, just call the super class method. if os.path.splitext(src)[1] != '.cu': return unixccompiler.UnixCCompiler._compile( self, obj, src, ext, cc_args, extra_postargs, pp_opts) # For CUDA C source files, compile them with NVCC. _compiler_so = self.compiler_so try: nvcc_path = CUDA['nvcc'] post_args = CUDA['post_args'] # TODO? base_opts = build.get_compiler_base_options() self.set_executable('compiler_so', nvcc_path) return unixccompiler.UnixCCompiler._compile( self, obj, src, ext, cc_args, post_args, pp_opts) finally: self.compiler_so = _compiler_so class _MSVCCompiler(msvccompiler.MSVCCompiler): _cu_extensions = ['.cu'] src_extensions = list(unixccompiler.UnixCCompiler.src_extensions) src_extensions.extend(_cu_extensions) def _compile_cu(self, sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None): # Compile CUDA C files, mainly derived from UnixCCompiler._compile(). macros, objects, extra_postargs, pp_opts, _build = \ self._setup_compile(output_dir, macros, include_dirs, sources, depends, extra_postargs) compiler_so = CUDA['nvcc'] cc_args = self._get_cc_args(pp_opts, debug, extra_preargs) post_args = CUDA['post_args'] for obj in objects: try: src, _ = _build[obj] except KeyError: continue try: self.spawn([compiler_so] + cc_args + [src, '-o', obj] + post_args) except errors.DistutilsExecError as e: raise errors.CompileError(str(e)) return objects def compile(self, sources, **kwargs): # Split CUDA C sources and others. cu_sources = [] other_sources = [] for source in sources: if os.path.splitext(source)[1] == '.cu': cu_sources.append(source) else: other_sources.append(source) # Compile source files other than CUDA C ones. other_objects = msvccompiler.MSVCCompiler.compile( self, other_sources, **kwargs) # Compile CUDA C sources. cu_objects = self._compile_cu(cu_sources, **kwargs) # Return compiled object filenames. return other_objects + cu_objects class cuda_build_ext(setuptools_build_ext): """Custom `build_ext` command to include CUDA C source files.""" def run(self): if CUDA is not None: def wrap_new_compiler(func): def _wrap_new_compiler(*args, **kwargs): try: return func(*args, **kwargs) except errors.DistutilsPlatformError: if not sys.platform == 'win32': CCompiler = _UnixCCompiler else: CCompiler = _MSVCCompiler return CCompiler( None, kwargs['dry_run'], kwargs['force']) return _wrap_new_compiler ccompiler.new_compiler = wrap_new_compiler(ccompiler.new_compiler) # Intentionally causes DistutilsPlatformError in # ccompiler.new_compiler() function to hook. self.compiler = 'nvidia' setuptools_build_ext.run(self) CUDA = locate_cuda() build_ext = cuda_build_ext if CUDA else setuptools_build_ext
Python
0
@@ -2254,64 +2254,8 @@ = %5B -'-gencode=arch=compute_30,code=sm_30',%0A '-ge @@ -2402,24 +2402,85 @@ ompute_60',%0A + '-gencode=arch=compute_70,code=compute_70',%0A
057110e3aa4007ad7221873029bed383ee1e0e3b
Remove platform check
aiotkinter.py
aiotkinter.py
import asyncio import tkinter import sys if sys.platform == 'win32': raise ImportError('%s is not available on your platform'.format(__name__)) class _TkinterSelector(asyncio.selectors._BaseSelectorImpl): def __init__(self): super().__init__() self._tk = tkinter.Tk(useTk=0) self._ready = [] def register(self, fileobj, events, data=None): key = super().register(fileobj, events, data) mask = 0 if events & asyncio.selectors.EVENT_READ: mask |= tkinter.READABLE if events & asyncio.selectors.EVENT_WRITE: mask |= tkinter.WRITABLE def ready(fd, mask): assert key.fd == fd events = 0 if mask & tkinter.READABLE: events |= asyncio.selectors.EVENT_READ if mask & tkinter.WRITABLE: events |= asyncio.selectors.EVENT_WRITE self._ready.append((key, events)) self._tk.createfilehandler(key.fd, mask, ready) return key def unregister(self, fileobj): key = super().unregister(fileobj) self._tk.deletefilehandler(key.fd) return key def select(self, timeout=None): self._ready = [] if timeout is not None: timeout = int(timeout*1000) token = self._tk.createtimerhandler(timeout, lambda: True) self._tk.dooneevent() if timeout is not None: token.deletetimerhandler() return self._ready class TkinterEventLoopPolicy(asyncio.DefaultEventLoopPolicy): def new_event_loop(self): try: return self._loop_factory(selector=_TkinterSelector()) except TypeError: raise Exception('The default event loop is not a selector event loop')
Python
0
@@ -27,126 +27,8 @@ ter%0A -import sys%0A%0Aif sys.platform == 'win32':%0A raise ImportError('%25s is not available on your platform'.format(__name__)) %0A%0Acl
7ab744fe8464ce85a27431adf94039c45551010f
Remove Google analytics code.
publishconf.py
publishconf.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # from __future__ import unicode_literals # This file is only used if you use `make publish` or # explicitly specify it as your config file. import os import sys sys.path.append(os.curdir) from pelicanconf import * SITEURL = 'https://dicasdejava.com.br' RELATIVE_URLS = False FEED_ALL_ATOM = 'feeds/all.atom.xml' CATEGORY_FEED_ATOM = 'feeds/%s.atom.xml' DELETE_OUTPUT_DIRECTORY = True # Plugins PLUGIN_PATHS = ['./pelican-plugins'] PLUGINS = ['sitemap', 'minify', 'tag_cloud'] SITEMAP = { 'format': 'xml', 'exclude': ['autor/', 'tag/', 'categoria/', 'arquivo/'], 'priorities': { 'articles': 0.5, 'indexes': 0.5, 'pages': 0.5 }, 'changefreqs': { 'articles': 'monthly', 'indexes': 'daily', 'pages': 'monthly' } } # Following items are often useful when publishing DISQUS_SITENAME = "dicas-de-java" GOOGLE_ANALYTICS = "UA-39997045-4" MINIFY = { 'remove_comments': True, 'remove_all_empty_space': True, 'remove_optional_attribute_quotes': False }
Python
0.000001
@@ -920,43 +920,8 @@ va%22%0A -GOOGLE_ANALYTICS = %22UA-39997045-4%22%0A %0A%0AMI
e9240123541e91145bac29f52ff8f04f412aaf6c
Add reminder to use scope helper in module.py
pycc/module.py
pycc/module.py
from collections import namedtuple import os ImportResult = namedtuple('ImportResult', ('module', 'target',)) class Package(object): """Contains AST nodes and metadata for modules in a Python package.""" def __init__(self, location): self.location = os.path.realpath(location) self.root = os.path.split(self.location)[1] self._locations = {} self._paths = {} def _path_from_location(self, location): """Generate a Python path from a given disk location.""" path = location.split(os.path.split(self.location)[0])[1] if path.endswith('__init__.py'): path = os.path.split(path)[0] # Strip the .py off the file to make a valid python path. if path.endswith('.py'): path = path[:-3] return path def add(self, location, node): """Add an AST module node for a given disk location.""" location = os.path.realpath(location) if self.location not in location: raise ValueError("Module must be children of the root package.") path = self._path_from_location(location) mod = Module( location=location, path=path, node=node, package=self, ) self._locations[location] = mod self._paths[path] = mod return mod def modules(self): """Get a generator of Module objects in the package.""" return self._paths.itervalues() def get(self, path): """Get the closest Module to an import path.""" if '.' in path: path = path.replace('.', os.sep) if not path.startswith(os.sep): path = os.path.join(os.sep, path) if path in self._paths: return ImportResult(module=self._paths[path], target=None) path, target = os.path.split(path) if path not in self._paths: return None return ImportResult(module=self._paths[path], target=target) def __repr__(self): return '<Package {0} -- {1}>'.format( self.location, self.root, ) class Module(object): """Contains AST nodes and metadata for a single Python module.""" __slots__ = ( 'path', 'location', 'node', 'package', ) def __init__(self, location, path, node, package): self.location = location self.path = path self.node = node self.package = package def __repr__(self): return '<Module {0} -- {1}>'.format( self.location, self.path, )
Python
0
@@ -1940,16 +1940,70 @@ n None%0A%0A + # TODO(kevinconway): Make target a scope.Name%0A
e48fcd11e5a208d8177862c824d2fa6f3fbf1ee2
Remove body of __main__ method in jobs file
d1lod/jobs.py
d1lod/jobs.py
""" jobs.py A collection of common jobs for the D1 LOD service. """ import os import sys import uuid import datetime from redis import StrictRedis sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir)) from d1lod import dataone from d1lod.sesame import store from d1lod.sesame import repository from d1lod.sesame import interface conn = StrictRedis(host='redis', port='6379') namespaces = { 'owl': 'http://www.w3.org/2002/07/owl#', 'rdfs': 'http://www.w3.org/2000/01/rdf-schema#', 'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#', 'xsd': 'http://www.w3.org/2001/XMLSchema#', 'foaf': 'http://xmlns.com/foaf/0.1/', 'dcterms': 'http://purl.org/dc/terms/', 'datacite': 'http://purl.org/spar/datacite/', 'glbase': 'http://schema.geolink.org/', 'd1dataset': 'http://lod.dataone.org/dataset/', 'd1person': 'http://lod.dataone.org/person/', 'd1org': 'http://lod.dataone.org/organization/', 'd1node': 'https://cn.dataone.org/cn/v1/node/', 'd1landing': 'https://search.dataone.org/#view/' } SESAME_HOST = os.getenv('GRAPHDB_PORT_8080_TCP_ADDR', 'localhost') SESAME_PORT = os.getenv('GRAPHDB_PORT_8080_TCP_PORT', '8080') SESAME_REPOSITORY = 'test' REDIS_LAST_RUN_KEY = "lastrun" def getNowString(): """ Returns the current time in UTC as a string with the format of 2015-01-01T12:34:56.789Z """ t = datetime.datetime.utcnow() return t.strftime("%Y-%m-%dT%H:%M:%S.%fZ") def getLastRun(): """ Gets the time job was run """ if not conn.exists(REDIS_LAST_RUN_KEY): return None else: return conn.get(REDIS_LAST_RUN_KEY) def setLastRun(to=None): """ Sets the last run timestamp """ if to is None: to = getNowString() print "Setting lastrun: %s" % to conn.set(REDIS_LAST_RUN_KEY, to) def calculate_stats(): """ Collect and print out statistics about the graph. """ JOB_NAME = "JOB_GRAPH_STATS" print "[%s] Job started" % JOB_NAME s = store.SesameStore(SESAME_HOST, SESAME_PORT) r = repository.SesameRepository(s, SESAME_REPOSITORY, namespaces) i = interface.SesameInterface(r) print "[%s] repository.size: %d" % (JOB_NAME, r.size()) def update_graph(): """ Job that updates the entire graph. Datasets that have been added to the DataOne network since the last run will be added to the triple store. """ JOB_NAME = "JOB_UPDATE" print "[%s] Job started" % JOB_NAME s = store.SesameStore(SESAME_HOST, SESAME_PORT) r = repository.SesameRepository(s, SESAME_REPOSITORY, namespaces) i = interface.SesameInterface(r) # Grab size before doing work before_size = r.size() from_string = getLastRun() if from_string is None: setLastRun() return to_string = getNowString() print "[%s] Hourly job running FROM:%s TO:%s" % (JOB_NAME, from_string, to_string) query_string = dataone.createSinceQueryURL(from_string, to_string, None, 0) num_results = dataone.getNumResults(query_string) print "[%s] num_results: %d" % (JOB_NAME, num_results) # Calculate the number of pages we need to get to get all results page_size=1000 num_pages = num_results / page_size if num_results % page_size > 0: num_pages += 1 print "[%s] num_pages: %d" % (JOB_NAME, num_pages) # Process each page for page in range(1, num_pages + 1): page_xml = dataone.getSincePage(from_string, to_string, page, page_size) docs = page_xml.findall(".//doc") for doc in docs: identifier = dataone.extractDocumentIdentifier(doc) print "[%s] addDataset: %s" % (JOB_NAME, doc) i.addDataset(doc) setLastRun(to_string) # Grab size after doing work after_size = r.size() size_diff = after_size - before_size print "[%s] size_difference: %d" % (JOB_NAME, size_diff) def one_off_job(): s = store.SesameStore(SESAME_HOST, SESAME_PORT) r = repository.SesameRepository(s, SESAME_REPOSITORY, namespaces) i = interface.SesameInterface(r) identifier = 'doi:10.6085/AA/YB15XX_015MU12004R00_20080619.50.1' doc = dataone.getSolrIndexFields(identifier) i.addDataset(doc) if __name__ == '__main__': hourly_job()
Python
0.000038
@@ -4296,17 +4296,26 @@ -hourly_job() +print %22main executed%22 %0A
dccb841600c35ce9b0e93953221088ba11bc2a02
Fix hard-coded http:// in akvo/iati/
akvo/iati/exports/org_elements/document_link.py
akvo/iati/exports/org_elements/document_link.py
# -*- coding: utf-8 -*- # Akvo RSR is covered by the GNU Affero General Public License. # See more details in the license.txt file located at the root folder of the Akvo RSR module. # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >. from lxml import etree def document_link(organisation, request): """ Generate the document-link elements. :param organisation: Organisation object :param request: Django request object :return: A list of Etree elements """ document_link_elements = [] if organisation.logo: logo_element = etree.Element("document-link") logo_element.attrib['url'] = 'http://' + request.get_host() + organisation.logo.url logo_element.attrib['format'] = "image/jpeg" title_element = etree.SubElement(logo_element, "title") narrative_element = etree.SubElement(title_element, "narrative") narrative_element.text = "Organisation logo" category_element = etree.SubElement(logo_element, "category") category_element.attrib['code'] = "A12" document_link_elements.append(logo_element) for document in organisation.documents.all(): if document.url or document.document or document.format or document.title or \ document.categories.all() or document.language or document.document_date: document_element = etree.Element("document-link") if document.url: document_element.attrib['url'] = document.url elif document.document: document_element.attrib['url'] = 'http://' + request.get_host() + \ document.document.url if document.format: document_element.attrib['format'] = document.format if document.title: title_element = etree.SubElement(document_element, "title") narrative_element = etree.SubElement(title_element, "narrative") narrative_element.text = document.title if document.title_language: narrative_element.attrib['{http://www.w3.org/XML/1998/namespace}lang'] = \ document.title_language for category in document.categories.all(): category_element = etree.SubElement(document_element, "category") category_element.attrib['code'] = category.category if document.language: language_element = etree.SubElement(document_element, "language") language_element.attrib['code'] = document.language if document.document_date: date_element = etree.SubElement(document_element, "document-date") date_element.attrib['iso-date'] = str(document.document_date) for country in document.countries.all(): if country.country or country.text: country_element = etree.SubElement(document_element, "recipient-country") if country.country: country_element.attrib['code'] = country.country if country.text: narrative_element = etree.SubElement(country_element, "narrative") narrative_element.text = country.text document_link_elements.append(document_element) return document_link_elements
Python
0.000002
@@ -672,34 +672,70 @@ b%5B'url'%5D = ' -http://' + +%7B%7D://%7B%7D%7B%7D'.format(%0A request.scheme, request.get @@ -741,18 +741,17 @@ t_host() - + +, organis @@ -761,24 +761,34 @@ on.logo.url%0A + )%0A logo @@ -1663,90 +1663,82 @@ = ' -http://' + request.get_host() + %5C%0A +%7B%7D://%7B%7D%7B%7D'.format(%0A request.scheme, request.get_host(), doc @@ -1755,16 +1755,34 @@ ment.url +%0A ) %0A%0A
1021c07edf6016a8d34e5bfec7a38445aff9cbaf
Rearrange create-or-find-policy logic.
s3same/iam.py
s3same/iam.py
import json from botocore.exceptions import ClientError IAMName = 's3same_travis' def _policy_string(bucket): return json.dumps({ "Version": "2012-10-17", "Statement": [ { "Action": [ "s3:ListBucket" ], "Effect": "Allow", "Resource": [ "arn:aws:s3:::{}".format(bucket) ] }, { "Action": [ "s3:PutObject", "s3:PutObjectAcl" ], "Effect": "Allow", "Resource": [ "arn:aws:s3:::{}/*".format(bucket) ] } ] }) def _all_policies(iam, **kwargs): marker = None while True: if marker: kwargs['Marker'] = marker response = iam.list_policies(**kwargs) for policy in response.get('Policies', []): yield policy marker = response.get('Marker') if not response.get('IsTruncated') or marker is None: break def _policy_arn(iam, bucket): policy_arn = None try: # Try creating the policy... response = iam.create_policy( PolicyName=IAMName, PolicyDocument=_policy_string(bucket), ) # ... and getting its ARN from the response. policy_arn = response.get('Policy', {}).get('Arn') except ClientError as e: if e.response['Error']['Code'] != 'EntityAlreadyExists': raise # If the policy already exists, go find it and get its ARN. policy_arn = next( (p.get('Arn') for p in _all_policies(iam, Scope='Local') if p.get('PolicyName') == IAMName ), None) # If we failed to get an ARN from creating or finding the policy, that's bad. if not policy_arn: raise ValueError # TODO: FIXME return policy_arn def _create_group_if_needed(iam, bucket): try: iam.create_group(GroupName=IAMName) except ClientError as e: if e.response['Error']['Code'] != 'EntityAlreadyExists': raise iam.attach_group_policy( GroupName=IAMName, PolicyArn=_policy_arn(iam, bucket), ) def credentials_for_new_user(iam, username, bucket=IAMName): _create_group_if_needed(iam, bucket) try: iam.create_user(UserName=username) except ClientError as e: if e.response['Error']['Code'] != 'EntityAlreadyExists': raise iam.add_user_to_group(UserName=username, GroupName=IAMName) return iam.create_access_key(UserName=username).get('AccessKey')
Python
0.000004
@@ -779,127 +779,73 @@ ef _ -all +find _polic -ies +y (iam -, **kwargs):%0A marker = None%0A while True:%0A if marker:%0A kwargs%5B'Marker'%5D = marker +):%0A kwargs = %7B'Scope': 'Local'%7D%0A while True: %0A @@ -956,13 +956,70 @@ -yield +if policy.get('PolicyName') == IAMName:%0A return pol @@ -1030,22 +1030,32 @@ -m +kwargs%5B'M arker +'%5D = respo @@ -1122,14 +1122,24 @@ or -m +kwargs%5B'M arker +'%5D is @@ -1160,13 +1160,19 @@ -b re -ak +turn None %0A @@ -1177,27 +1177,30 @@ %0Adef +_create _policy -_arn (iam, bu @@ -1210,30 +1210,8 @@ t):%0A - policy_arn = None%0A @@ -1411,73 +1411,14 @@ -# ... and getting its ARN from the response.%0A policy_arn = +return res @@ -1439,23 +1439,8 @@ icy' -, %7B%7D).get('Arn' )%0A @@ -1593,358 +1593,204 @@ ts, -go find it and get its ARN.%0A policy_arn = next(%0A (p.get('Arn')%0A for p in _all +return None.%0A return None%0A%0Adef _policy_arn(iam, bucket):%0A policy = _create _polic -ies +y (iam, -Scope='Local')%0A if p.get('PolicyName') == IAMName%0A ),%0A None)%0A # If we failed to get an ARN from creating or finding the policy, that's bad.%0A if not policy_arn +bucket) or _find_policy(iam)%0A try:%0A return policy%5B'Arn'%5D%0A except (TypeError, KeyError) :%0A @@ -1825,35 +1825,28 @@ DO: -FIXME%0A return policy_arn +more specific error? %0A%0Ade
a7d3376d730a04fec533a46a4faefb5c88f618f9
Add docstring for pymt plugins.
pymt/plugin.py
pymt/plugin.py
"""Dynamically find and load plugins.""" from __future__ import print_function __all__ = [] import os import logging import importlib from glob import glob from .framework.bmi_bridge import bmi_factory from .babel import setup_babel_environ def load_plugin(entry_point, callback=None): """Load a generic plugin. Parameters ---------- entry_point : str A entry point specify for the plugin. The specifier should be a dotted module name followed by a ``:`` and an identifier nameing an object within the module. callback : func If provided, this function will be called on the plugin and the result returned. Returns ------- object The plugin. """ module_name, cls_name = entry_point.split(':') plugin = None try: module = importlib.import_module(module_name) except ImportError: logging.info('Unable to import {module}.'.format(module=module_name)) else: try: plugin = module.__dict__[cls_name] except KeyError: logging.info('{plugin} not contained in {module}.'.format( plugin=cls_name, module=module_name)) if callback and plugin: plugin = callback(plugin) return plugin def load_all_plugins(entry_points=[], callback=None): """Load multiple plugins. Parameters ---------- entry_points : iterable of str, optional A list of plugins to load. callback : func, optional A callback function that is called on each loaded plugin. Returns ------- list A list of loaded plugins. """ try: entry_points += os.environ['PYMT_PLUGINS'].split(';') except KeyError: pass all_plugins = [] for entry_point in entry_points: all_plugins.append(load_plugin(entry_point, callback=callback)) return all_plugins def load_bmi_plugin(entry_point): """Load a plugin that implements as BMI. Parameters ---------- entry_point : str A entry point specify for the plugin. The specifier should be a dotted module name followed by a ``:`` and an identifier nameing an object within the module. Returns ------- object The BMI plugin. """ return load_plugin(entry_point, callback=bmi_factory) def discover_csdms_plugins(): """Look for plugins in the csdms package. Returns ------- list or str Entry point specifications. """ entry_points = [] try: csdms_module = importlib.import_module('csdms') except ImportError: logging.info('Unable to import {module}.'.format(module='csdms')) else: files = glob(os.path.join(csdms_module.__path__[0], '*so')) for path in files: name, ext = os.path.splitext(os.path.basename(path)) entry_points.append('csdms.{name}:{name}'.format(name=name)) return entry_points def load_csdms_plugins(): """Load all available csdms plugins. Returns ------- list BMI plugins. """ setup_babel_environ() entry_points = discover_csdms_plugins() return load_all_plugins(entry_points, callback=bmi_factory)
Python
0
@@ -30,16 +30,1149 @@ plugins. +%0A%0APyMT plugins are components that expose the CSDMS Basic%0AModel Interface and provide CSDMS Model Metadata. With%0Athese two things, third-party components can be imported%0Ainto the PyMT modeling framework.%0A%0ABy default PyMT searches a package named %60csdms%60, if%0Ait exists, for possible plugins that implement a BMI.%0AThe corresponding model metadata for each plugin is%0Aassumed to be located under %60share/csdms%60 in a folder%0Anamed for that plugin. This is the file structure that%0Athe CSDMS babelizer tool uses when wrapping models.%0A%0AAlthough components written in Python can be processed%0Awith the babelizer to bring them into PyMT, this%0Astep should not be necessary as they are already%0Awritten in Python with a BMI. For these models,%0Aplugins can be specified by a string that gives the%0Afully qualified name of the module (in dotted notation)%0Athat contains the object followed by a colon and the%0Aname of the class that implents the BMI. For example,%0A%0A pypkg.mymodule:MyPlugin%0A%0AStandard plugins (those contained in the csdms package)%0Aare automatically loaded while other plugins are%0Adynamically loaded the the pymt %60load_plugin%60 function.%0A %22%22%22%0Afrom
2c9e88ee3addf491bb8abfb9b0691b282d5ab6ec
Add todo re: pre-processing name parameter
pynano/base.py
pynano/base.py
import requests import xmltodict from .history import NanoHistorySequence as History from .day import NanoDay class NanoBase(object): """Base object for pynano API objects. This object implements the common functionality for fetching and processing API data from the NaNoWriMo API. By default the API is queried "lazily", that is requests are only sent when data from the API is actually requested. This behavior can be overridden with the *prefetch* parameter. """ # API name for this object _name = None # Endpoint URLs for this object; must include a '{name}' placeholder _primary_url = None _history_url = None # The history day class for this object _day_class = NanoDay # The API field with a given day's date _date_field = 'wcdate' # Caches for retrieved data _data = None _history = None def __init__(self, name, prefetch=False): """ :param str name: The name of the current object in the NaNoWriMo API :param bool prefetch: If True, the API will be queried immediately; otherwise it is queried only when needed """ self._name = name if prefetch: self._fetch() @property def history(self): """Historical data for this object. :rtype: NanoHistorySequence """ if self._history is None: # Haven't fetched history data yet, do so now self._fetch(True) # Return the history object return self._history def _fetch_element(self, index): """Get a particular data element, fetching from the API if necessary.""" try: # Attempt to return the requested data element return self._data[index] except (KeyError, TypeError): # Didn't find it, or nothing fetched yet # Fetch data from the API self._fetch() # If we still don't find it, allow the exception to be raised return self._data[index] def _fetch(self, history=False): """Fetch data from the API. If the history parameter is True, then the history URL will be queried for data; if it is False, then the primary URL will be queried. """ if history: # Fetching history data url = self._history_url else: # Fetching primary data url = self._primary_url # Now fetch from the server r = requests.get(url.format(name=self._name)) # Parse the returned data (removing root element) data = next(iter(xmltodict.parse(r.text).values())) # Now stash the data if history: # Stash the history data self._history = self._process_history(data['wordcounts']) if self._data is None: # Haven't fetched primary data, stash what we got del data['wordcounts'] self._data = data else: # Stash the primary data self._data = data def _process_history(self, history_data): """Process the history API's data into an easily-indexed dict.""" processed = {} for data in history_data['wcentry']: # Get the date from the proper field date = data[self._date_field] # Normalize the date field del data[self._date_field] data['date'] = date # Break out the date elements year, month, day = date.split('-') if month != '11': # Do nothing if we somehow got non-November data continue # Construct a Day object from our data the_day = self._day_class(data) # Index the data by day less 1 (0-indexed data) processed[int(day)-1] = the_day return History(processed)
Python
0
@@ -479,16 +479,128 @@ rameter. +%0A%0A .. todo::%0A Process %60name%60 to match what the API requires of it, e.g. replace%0A spaces with '-'. %0A %22%22%22
b51aa47a5b9b0a4b57904049af2e073682f77350
Add line blocks to docstring for Slot (#167)
pyquil/slot.py
pyquil/slot.py
############################################################################## # Copyright 2016-2017 Rigetti Computing # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################## """ Contains Slot pyQuil placeholders for constructing Quil template programs. """ class Slot(object): """ A placeholder for a parameter value. Arithmetic operations: ``+-*/`` Logical: abs, max, <, >, <=, >=, !=, == Arbitrary functions are not supported :param float value: A value to initialize to. Defaults to 0.0 :param function func: An initial function to determine the final parameterized value. """ def __init__(self, value=0.0, func=None): self._value = value self.compute_value = func if func is not None else lambda: self._value def value(self): """ Computes the value of this Slot parameter. """ return self.compute_value() def __repr__(self): return "<Slot {}>".format(self.value()) def __str__(self): return str(self.value()) def __add__(self, val): return Slot(self, lambda: self.value() + val) def __radd__(self, val): return Slot(self, lambda: val + self.value()) def __sub__(self, val): return Slot(self, lambda: self.value() - val) def __rsub__(self, val): return Slot(self, lambda: val - self.value()) def __mul__(self, val): return Slot(self, lambda: self.value() * val) def __rmul__(self, val): return Slot(self, lambda: val * self.value()) def __div__(self, val): return Slot(self, lambda: self.value() / val) __truediv__ = __div__ def __rdiv__(self, val): return Slot(self, lambda: val / self.value()) __rtruediv__ = __rdiv__ def __neg__(self): return Slot(self, lambda: -self.value()) def __abs__(self): return Slot(self, lambda: abs(self.value())) def __max__(self, other): return max(other, self.value()) def __lt__(self, other): return self.value() < other def __le__(self, other): return self.value() <= other def __eq__(self, other): return self.value() == other def __ne__(self, other): return self.value() != other def __gt__(self, other): return self.value() > other def __ge__(self, other): return self.value() >= other
Python
0
@@ -922,16 +922,18 @@ ue.%0A%0A + %7C Arithme @@ -960,16 +960,18 @@ */%60%60%0A + %7C Logical @@ -1006,16 +1006,18 @@ , ==%0A + %7C Arbitra
0ec3a663c9ec8b28b30e5a0b29d2c5109c657863
Remove prettyprinting indent
pyr/console.py
pyr/console.py
from code import InteractiveConsole import readline, atexit, os, sys, pprint from pygments import highlight from pygments.lexers import PythonLexer from pygments.formatters import Terminal256Formatter from pygments.styles import get_style_by_name from pyr.compiler import PyrCompiler # py3 compatibility # raw_input renamed to input try: input = raw_input except NameError: pass # new string types try: basestring isstr = lambda s: isinstance(s, basestring) except NameError: isstr = lambda s: isinstance(s, str) def softspace(file, newvalue): oldvalue = 0 try: oldvalue = file.softspace except AttributeError: pass try: file.softspace = newvalue except (AttributeError, TypeError): # "attribute-less object" or "read-only attributes" pass return oldvalue class PyrConsole(InteractiveConsole): def __init__(self, locals=None, filename="<console>", histfile=None, pygments_style=None): InteractiveConsole.__init__(self, locals, filename) if not histfile: histfile = os.path.expanduser("~/.pyr_history") self.init_history(histfile) self.init_syntax_highlighting(pygments_style) self.init_pretty_printer() self.compile = PyrCompiler() def init_history(self, histfile): readline.parse_and_bind("tab: complete") if hasattr(readline, "read_history_file"): try: readline.read_history_file(histfile) except IOError: pass atexit.register(self.save_history, histfile) def save_history(self, histfile): readline.write_history_file(histfile) def init_syntax_highlighting(self, pygments_style): self.past_lines = [] if pygments_style: if isstr(pygments_style): self.pygments_style = get_style_by_name(pygments_style) else: self.pygments_style = pygments_style else: self.pygments_style = get_style_by_name('default') self.lexer = PythonLexer() self.formatter = Terminal256Formatter(style=self.pygments_style) def raw_input(self, prompt=""): line = input(prompt) self.syntax_highlight(line, prompt) return line def syntax_highlight(self, line, prompt): if not line.strip(): return is_first_line = (prompt == sys.ps1) if is_first_line: self.past_lines = [line] pretty_line = highlight(line, self.lexer, self.formatter) else: self.past_lines.append(line) code_so_far = "\n".join(self.past_lines) pretty_code = highlight(code_so_far, self.lexer, self.formatter) if pretty_code[-1] == '\n': pretty_code = pretty_code[:-1] pretty_line = pretty_code.split('\n')[-1] + "\n" sys.stdout.write("\x1b[A") # move up one line sys.stdout.write("\r\x1b[K") # go to the start of the line sys.stdout.write(prompt) sys.stdout.write(pretty_line) sys.stdout.flush() def init_pretty_printer(self): self.pp = pprint.PrettyPrinter(indent=4) def pretty_print(self, result): if not result: return output = self.pp.pformat(result) if isstr(result): result = "'%s'" % result if self.pp.isreadable(result): output = highlight(output, self.lexer, self.formatter) sys.stdout.write(output) else: sys.stdout.write(output) sys.stdout.write("\n") def runsource(self, source, filename="<input>", symbol="single"): try: # do some special pretty printing if the source is an expression code = self.compile(source, filename, "eval") eval_expression = True except (OverflowError, SyntaxError, ValueError): eval_expression = False if eval_expression: if code is None: return True try: result = eval(code, self.locals) self.pretty_print(result) except SystemExit: raise except: self.showtraceback() else: if softspace(sys.stdout, 0): print return False else: # fall back to default behavior return InteractiveConsole.runsource(self, source, filename, symbol)
Python
0.000058
@@ -3203,16 +3203,8 @@ ter( -indent=4 )%0A%0A
a949f38888bfd0da71dfafb3f39feb3407eeedb6
fix imports, remove old from future imports
pysub/pysub.py
pysub/pysub.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Subtitle downloader using OpenSubtitles.org API Command line command-line user interface """ # Copyright 2016 Nikola Kovacevic <nikolak@outlook.com> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import os import argparse from .pysub_objects import Video, OpenSubtitlesServer from .settings import default_config as config def search_subtitles(file_list, config): """ Searches subitles and if any are found initiates prompt and other functions. Instantiates server and does login. Args: file_list: list, list containing absolute paths of videos for which to search subtitles for """ server = None for count, file_path in enumerate(file_list): video = Video(file_path, config) print("-" * 50 + '\nSearching subtitle for ' '"{}" | ({}/{})'.format(video.file_name, count + 1, len(file_list))) if not config['overwrite'] and video.sub_exists: print("Subtitle already exists") continue if not server: server = OpenSubtitlesServer(config['server'], config['ua'], config['lang']) server.login() if not server.logged_in: exit() if video.file_search_query: video.parse_response(server.query(video.file_search_query, desc="File Based Search")) if video.hash_search_query: video.parse_response(server.query(video.hash_search_query, desc="Hash Based Search")) if not video.subtitles: print("Couldn't find subtitles in " "{} for {}".format(config['lang_name'], file_path)) continue download_prompt(video) if server: server.log_out() # noinspection PyTypeChecker def download_prompt(video, force=False): """ List all found subtitles from video object and ask user to chose which subtitle to download. or to use auto download, skip this one or to quit. Args: video: Video class instance with at leas one item in subtitles attribute """ if config['auto_download'] and not force: if not video.auto_download(): print("Can't choose best subtitle automatically.") if config['not_found_prompt']: download_prompt(video, force=True) return user_choice = None possible_choices = ["a", "q", "s", ""] + range(len(video.subtitles)) print("{:<2}: {:^10} {:<} {}\n{}".format("#", "Downloads", "Subtitle Name", " * - Sync subtitle", "-" * 50)) for num, subtitle in enumerate(video.subtitles): print("{:<2}: {:^10} {:<}".format(num, str(subtitle.download_count) + ['', '*'][subtitle.synced], subtitle.sub_filename.encode('utf-8'))) while user_choice not in possible_choices: user_input = input("return - download first, 's' - skip, " "'a' - auto choose, 'q' - quit \n>>>") user_choice = int( user_input) if user_input.isdigit() else user_input.lower() if user_choice not in possible_choices: print("Invalid input.") if type(user_choice) is int: try: video.subtitles[user_choice].download() except IndexError: print("Invalid input only subtitle choices " "from {} to {} are available".format(0, len(video.subtitles))) elif user_choice.lower() == "a": if not video.auto_download(): print("Can't choose best subtitle automatically.") if config['not_found_prompt']: download_prompt(video, force=True) elif user_choice.lower() == "q": print('Quitting') exit() elif user_choice.lower() == "s": print("skipping...") elif user_choice == "": video.subtitles[0].download() else: print("Invalid input") def main(): """ Parse command line arguments, set CONFIG object, get valid files and call search_subtitles function """ valid_files = [] parser = argparse.ArgumentParser(description='Subtitle downloader for TV Shows') parser.add_argument("folder", type=str, help="Folder which will be scanned for allowed " "video files, and subtitles for those files " "will be downloaded") parser.add_argument("-s", "--subfolder", type=str, help="Subfolder to save subtitles to, relative to " "original video file path") parser.add_argument("-l", "--language", type=str, help="Subtitle language, must be an ISO 639-2 Code " "i.e. (eng,fre,deu) Default English(eng)") parser.add_argument("-a", "--auto", action="store_true", help="Auto download subtitles for all files " "without prompt ") parser.add_argument("-o", "--overwrite", action="store_true", help="Overwrite if subtitle with same filename exist.") parser.add_argument("-f", "--format", type=str, help="Additional file formats that will be checked, " "comma separated, specify only file formats " "e.g. 'avix,temp,format2' (without quotes)") parser.add_argument("-r", "--recursive", action="store_true", help="Search files recursively") parser.add_argument("-p", "--nfprompt", action="store_true", help="Prompt which subtitle to download if auto" "downloader can't choose one") args = parser.parse_args() if args.format: config['file_ext'] += args.format.split(',') directory = args.folder if os.path.isfile(directory): valid_files = [directory] elif os.path.isdir(directory): directory += os.sep if not directory.endswith(os.sep) else "" valid_files = [] if args.recursive: for root, _, files in os.walk(directory): for file in files: if os.path.splitext(file)[1] in config['file_ext']: valid_files.append("{}{}{}".format(root, os.sep, file)) else: valid_files = [directory + name for name in os.listdir(directory) if os.path.splitext(name)[1] in config['file_ext']] else: print("{} is not a valid file or directory".format(directory)) exit() if args.subfolder: config['subfolder'] = args.subfolder config['subfolder'] = config['subfolder'].replace(os.sep, "") if args.language: if len(args.language) == 3: config['lang'] = args.language.lower() elif len(args.language) > 3: config['lang'] = config['languages'].get(args.language.title()) if not config['lang']: raise ValueError("Wrong language value") config['lang_name'] = args.language.title() else: print( 'Argument not ISO 639-2 Code check this for list of valid ' 'codes http://en.wikipedia.org/wiki/List_of_ISO_639-2_codes') exit() if args.auto: config['auto_download'] = True if args.overwrite: config['overwrite'] = True if args.nfprompt: config['not_found_prompt'] = True search_subtitles(valid_files, config) if __name__ == '__main__': main()
Python
0.000001
@@ -741,47 +741,8 @@ e.%0A%0A -from __future__ import print_function%0A%0A impo @@ -769,17 +769,16 @@ e%0A%0Afrom -. pysub_ob @@ -822,17 +822,16 @@ er%0Afrom -. settings
a057850a1dff3319533d983a24de29cd28f63803
Add server check before logging out
pysub/pysub.py
pysub/pysub.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Subtitle downloader using OpenSubtitles.org API Command line command-line user interface """ # Copyright 2014 Nikola Kovacevic <nikolak@outlook.com> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import os import argparse from pysub_objects import Video, OpenSubtitlesServer from settings import default_config as config def search_subtitles(file_list, config): """ Searches subitles and if any are found initiates prompt and other functions. Instantiates server and does login. Args: file_list: list, list containing absolute paths of videos for which to search subtitles for """ server = None for count, file_path in enumerate(file_list): video = Video(file_path, config) print("-" * 50 + '\nSearching subtitle for ' '"{}" | ({}/{})'.format(video.file_name, count + 1, len(file_list))) if not config['overwrite'] and video.sub_exists: print("Subtitle already exists") continue if not server: server = OpenSubtitlesServer(config['server'], config['ua'], config['lang']) server.login() if not server.logged_in: exit() if video.file_search_query: video.parse_response(server.query(video.file_search_query, desc="File Based Search")) if video.hash_search_query: video.parse_response(server.query(video.hash_search_query, desc="Hash Based Search")) if not video.subtitles: print("Couldn't find subtitles in " "{} for {}".format(config['lang_name'], file_path)) continue download_prompt(video) server.log_out() # noinspection PyTypeChecker def download_prompt(video, force=False): """ List all found subtitles from video object and ask user to chose which subtitle to download. or to use auto download, skip this one or to quit. Args: video: Video class instance with at leas one item in subtitles attribute """ if config['auto_download'] and not force: if not video.auto_download(): print("Can't choose best subtitle automatically.") if config['not_found_prompt']: download_prompt(video, force=True) return user_choice = None possible_choices = ["a", "q", "s", ""] + range(len(video.subtitles)) print("{:<2}: {:^10} {:<} {}\n{}".format("#", "Downloads", "Subtitle Name", " * - Sync subtitle", "-" * 50)) for num, subtitle in enumerate(video.subtitles): print("{:<2}: {:^10} {:<}".format(num, str(subtitle.download_count) + ['', '*'][subtitle.synced], subtitle.sub_filename.encode('utf-8'))) while user_choice not in possible_choices: user_input = raw_input("return - download first, 's' - skip, " "'a' - auto choose, 'q' - quit \n>>>") user_choice = int( user_input) if user_input.isdigit() else user_input.lower() if user_choice not in possible_choices: print("Invalid input.") if type(user_choice) is int: try: video.subtitles[user_choice].download() except IndexError: print("Invalid input only subtitle choices " "from {} to {} are available".format(0, len(video.subtitles))) elif user_choice.lower() == "a": if not video.auto_download(): print("Can't choose best subtitle automatically.") if config['not_found_prompt']: download_prompt(video, force=True) elif user_choice.lower() == "q": print('Quitting') exit() elif user_choice.lower() == "s": print("skipping...") elif user_choice == "": video.subtitles[0].download() else: print("Invalid input") def main(): """ Parse command line arguments, set CONFIG object, get valid files and call search_subtitles function """ valid_files = [] parser = argparse.ArgumentParser(description='Subtitle downloader for TV Shows') parser.add_argument("folder", type=str, help="Folder which will be scanned for allowed " "video files, and subtitles for those files " "will be downloaded") parser.add_argument("-s", "--subfolder", type=str, help="Subfolder to save subtitles to, relative to " "original video file path") parser.add_argument("-l", "--language", type=str, help="Subtitle language, must be an ISO 639-2 Code " "i.e. (eng,fre,deu) Default English(eng)") parser.add_argument("-a", "--auto", action="store_true", help="Auto download subtitles for all files " "without prompt ") parser.add_argument("-o", "--overwrite", action="store_true", help="Overwrite if subtitle with same filename exist.") parser.add_argument("-f", "--format", type=str, help="Additional file formats that will be checked, " "comma separated, specify only file formats " "e.g. 'avix,temp,format2' (without quotes)") parser.add_argument("-r", "--recursive", action="store_true", help="Search files recursively") parser.add_argument("-p", "--nfprompt", action="store_true", help="Prompt which subtitle to download if auto" "downloader can't choose one") args = parser.parse_args() if args.format: config['file_ext'] += args.format.split(',') directory = args.folder if os.path.isfile(directory): valid_files = [directory] elif os.path.isdir(directory): directory += os.sep if not directory.endswith(os.sep) else "" valid_files = [] if args.recursive: for root, _, files in os.walk(directory): for file in files: if os.path.splitext(file)[1] in config['file_ext']: valid_files.append("{}{}{}".format(root, os.sep, file)) else: valid_files = [directory + name for name in os.listdir(directory) if os.path.splitext(name)[1] in config['file_ext']] else: print("{} is not a valid file or directory".format(directory)) exit() if args.subfolder: config['subfolder'] = args.subfolder config['subfolder'] = config['subfolder'].replace(os.sep, "") if args.language: if len(args.language) == 3: config['lang'] = args.language.lower() elif len(args.language) > 3: config['lang'] = config['languages'].get(args.language.title()) if not config['lang']: raise ValueError("Wrong language value") config['lang_name'] = args.language.title() else: print( 'Argument not ISO 639-2 Code check this for list of valid ' 'codes http://en.wikipedia.org/wiki/List_of_ISO_639-2_codes') exit() if args.auto: config['auto_download'] = True if args.overwrite: config['overwrite'] = True if args.nfprompt: config['not_found_prompt'] = True search_subtitles(valid_files, config) if __name__ == '__main__': main()
Python
0
@@ -2547,16 +2547,35 @@ video)%0A%0A + if server:%0A serv
6b308f3be482dae291a725c29fcf9e93f7251dfc
add fucking changes to http client
python/http.py
python/http.py
# Copyright 2013 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import logging import urllib2 from keystoneclient.v2_0 import Client as keystoneclient from keystoneclient import exceptions logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s %(filename)s:' '%(lineno)d -- %(message)s', filename=os.path.join(LOGS_DIR, 'sys_test.log'), filemode='w') console = logging.StreamHandler() console.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s - %(levelname)s %(filename)s:' '%(lineno)d -- %(message)s') console.setFormatter(formatter) logger = logging.getLogger(__name__) logger.addHandler(console) def debug(logger): def wrapper(func): @functools.wraps(func) def wrapped(*args, **kwargs): logger.debug( "Calling: {} with args: {} {}".format( func.__name__, args, kwargs ) ) result = func(*args, **kwargs) logger.debug( "Done: {} with result: {}".format(func.__name__, result)) return result return wrapped return wrapper logwrap = debug(logger) class HTTPClient(object): def __init__(self, url, keystone_url, credentials, **kwargs): logger.info('Initiate HTTPClient with url %s', url) self.url = url self.keystone_url = keystone_url self.creds = dict(credentials, **kwargs) self.keystone = None self.authenticate() self.opener = urllib2.build_opener(urllib2.HTTPHandler) def authenticate(self): try: logger.info('Initialize keystoneclient with url %s', self.keystone_url) self.keystone = keystoneclient( auth_url=self.keystone_url, **self.creds) # it depends on keystone version, some versions doing auth # explicitly some dont, but we are making it explicitly always self.keystone.authenticate() logger.debug('Authorization token is successfully updated') except exceptions.AuthorizationFailure: logger.warning( 'Cant establish connection to keystone with url %s', self.keystone_url) @property def token(self): if self.keystone is not None: return self.keystone.auth_token return None def get(self, endpoint): req = urllib2.Request(self.url + endpoint) return self._open(req) def post(self, endpoint, data=None, content_type="application/json"): if not data: data = {} logger.info('self url is %s' % self.url) req = urllib2.Request(self.url + endpoint, data=json.dumps(data)) req.add_header('Content-Type', content_type) return self._open(req) def put(self, endpoint, data=None, content_type="application/json"): if not data: data = {} req = urllib2.Request(self.url + endpoint, data=json.dumps(data)) req.add_header('Content-Type', content_type) req.get_method = lambda: 'PUT' return self._open(req) def delete(self, endpoint): req = urllib2.Request(self.url + endpoint) req.get_method = lambda: 'DELETE' return self._open(req) def _open(self, req): try: return self._get_response(req) except urllib2.HTTPError as e: if e.code == 401: logger.warning('Authorization failure: {0}'.format(e.read())) self.authenticate() return self._get_response(req) else: raise def _get_response(self, req): if self.token is not None: logger.debug('Set X-Auth-Token to {0}'.format(self.token)) req.add_header("X-Auth-Token", self.token) return self.opener.open(req)
Python
0
@@ -603,16 +603,26 @@ cense.%0A%0A +import os%0A import j @@ -959,27 +959,85 @@ oin( -LOGS_DIR, 'sys_test +os.path.join(os.getcwd()),%0A 'nailgun .log
51d7d217bc1454b93145657c1e8a3e05fecd6cd4
add logger
sequana/mh.py
sequana/mh.py
# -*- coding: utf-8 -*- # # This file is part of Sequana software # # Copyright (c) 2016 - Sequana Development Team # # File author(s): # Thomas Cokelaer <thomas.cokelaer@pasteur.fr> # # Distributed under the terms of the 3-clause BSD license. # The full license is in the LICENSE file, distributed with this software. # # website: https://github.com/sequana/sequana # documentation: http://sequana.readthedocs.io # ############################################################################## from random import uniform, gauss from sequana.lazy import numpy as np from sequana.lazy import pylab from sequana import logger logger.name = __name__ __all__ = ["MetropolisHasting"] class MetropolisHasting(): """ .. plot:: from sequana.mh import MetropolisHasting m = MetropolisHasting() m.Xtarget = [0. , 0.005, 0.01 , 0.016, 0.021, 0.027, 0.032, 0.037, 0.043, 0.048, 0.054, 0.059, 0.065, 0.07 , 0.075, 0.081, 0.086, 0.092, 0.097, 0.103] m.Ytarget = [83, 315, 611, 675, 1497, 5099, 7492, 2797, 842, 334, 117, 63, 33, 22, 11, 3, 3, 1, 0, 2] vec = m.simulate(100000) m.check(bins=100) .. warning: be aware of border effects. For instance if the profile does not go to zero at the lower bound or upper_bound, then the final histogram may be biased on the boundaries. One would need to incrase the boundaries by a range larger than the step used in the proposal / jump function and remove the data outside of the expected boundaries manually. """ def __init__(self): self.burning = 20000 self.lower_bound = 0000 self.upper_bound = 100000 self.aprob = [] self._Xtarget = None def _set_x(self, X): self._Xtarget = np.array(X) self.lower_bound = min(self._Xtarget) self.upper_bound = max(self._Xtarget) def _get_x(self): return self._Xtarget Xtarget = property(_get_x, _set_x) def _set_y(self, Y): self._Ytarget = np.array(Y) def _get_y(self): return self._Ytarget Ytarget = property(_get_y, _set_y) def simulate(self, n=100000, burning=20000, step=None, x0=None): if step is None: self.step = (self.upper_bound - self.lower_bound) / 100. step = self.step if x0 is None: self.x0 = (self.upper_bound - self.lower_bound) / 2 + self.lower_bound self.aprob = [] # function target profile NS = self.Ytarget / sum(self.Ytarget) sdnorm = lambda x: np.interp(x, self.Xtarget, NS) x = self.x0 vec = [x] # starting seed # a gaussian jump centered on 0 is used as a random inovation # if the candidate is outside of the boundaries, we try another # candidate def jumper(x): #jump = uniform(-step, step) jump = gauss(0, step) xprime = x + jump while xprime < self.lower_bound or xprime>self.upper_bound: jump = gauss(0, step) xprime = x + jump return xprime for i in range(1, n*2+burning): xprime = jumper(x) aprob = min([1., sdnorm(xprime)/sdnorm(x)]) #acceptance probability u = uniform(0, 1) if u < aprob: x = xprime vec.append(x) self.aprob.append(aprob) if len(vec) == n + burning: break self.burning_vector = vec[0:burning] self.vec = vec[burning:] return vec[burning:] def diagnostics(self, bins=60, clear=True): if clear: pylab.clf() pylab.subplot(3,1,1) pylab.hist(self.aprob, bins=bins) pylab.title("Acceptation") pylab.subplot(3,1,2) pylab.plot(self.vec) pylab.title("proposition") pylab.subplot(3,1,3) y, x, _ = pylab.hist(self.vec, bins, density=True, lw=0.5, ec="k") M1 = max(y) # this normalisation is an approximation/hack pylab.plot(self.Xtarget, self.Ytarget/ (max(self.Ytarget)/M1), "-ro") pylab.title("simulated (blue) and target (red) distributions") def check(self, bins=60): y, x, _ = pylab.hist(self.vec, bins, density=True, lw=0.5, ec="k") M1 = max(y) # this normalisation is an approximation/hack pylab.plot(self.Xtarget, self.Ytarget/ (max(self.Ytarget)/M1), "-ro") pylab.title("simulated (blue) and target (red) distributions")
Python
0.000026
@@ -610,57 +610,60 @@ ab%0A%0A -from sequana import logger%0Alogger.name = +import colorlog%0Alogger = colorlog.getLogger( __name__ %0A%0A%0A_ @@ -658,16 +658,19 @@ __name__ +)%0A%0A %0A%0A%0A__all
59b95edb27089eb8f6842b9861945310ec71029b
use packaging.version
py/desidatamodel/test/datamodeltestcase.py
py/desidatamodel/test/datamodeltestcase.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst # -*- coding: utf-8 -*- """Utility class used by other tests. """ import os import tempfile import unittest import logging import shutil from astropy import __version__ as astropyVersion from desiutil.log import log from desiutil.test.test_log import NullMemoryHandler DM = 'DESIDATAMODEL' class DataModelTestCase(unittest.TestCase): @classmethod def setUpClass(cls): cls.astropyVersion = float('.'.join(astropyVersion.split('.')[0:2])) cls.maxDiff = None cls.data_dir = tempfile.mkdtemp() if DM in os.environ: cls.old_env = os.environ[DM] else: cls.old_env = None os.environ[DM] = os.path.dirname( # root/ os.path.dirname( # py/ os.path.dirname( # desidatamodel/ os.path.dirname(__file__)))) # test/ cls.doc_dir = os.path.join(os.environ[DM], 'doc') @classmethod def tearDownClass(cls): if cls.old_env is None: del os.environ[DM] else: os.environ[DM] = cls.old_env shutil.rmtree(cls.data_dir) def setUp(self): # Replace the log handler with something that writes to memory. self.cache_level = log.level root_logger = logging.getLogger(log.name.rsplit('.', 1)[0]) while len(root_logger.handlers) > 0: h = root_logger.handlers[0] h.flush() self.cache_handler = h fmt = h.formatter root_logger.removeHandler(h) mh = NullMemoryHandler() mh.setFormatter(fmt) root_logger.addHandler(mh) log.setLevel(logging.DEBUG) def tearDown(self): root_logger = logging.getLogger(log.name.rsplit('.', 1)[0]) while len(root_logger.handlers) > 0: h = root_logger.handlers[0] h.flush() root_logger.removeHandler(h) root_logger.addHandler(self.cache_handler) log.setLevel(self.cache_level) self.cache_level = None self.cache_handler = None def assertLog(self, logger, order=-1, message=''): """Asserts that the `message` is at line `order` in the log buffer. """ root_logger = logging.getLogger(logger.name.rsplit('.', 1)[0]) handler = root_logger.handlers[0] record = handler.buffer[order] self.assertEqual(record.getMessage(), message) def assertInLog(self, logger, message=''): """Asserts that the `message` is one of the lines in the log buffer. """ root_logger = logging.getLogger(logger.name.rsplit('.', 1)[0]) handler = root_logger.handlers[0] ok = False for record in handler.buffer: if record.getMessage() == message: ok = True break if not ok: self.assertTrue(ok, f'Not found in log messages: {message}') def badUnitMessage(self, unit): """Returns a string that can be used to match errors related to bad units. """ m = "'{0}' did not parse as fits unit: At col {1:d}, Unit 'ergs' not supported by the FITS standard. Did you mean erg?".format(unit, unit.index('ergs')) if self.astropyVersion >= 4: m += " If this is meant to be a custom unit, define it with 'u.def_unit'. To have it recognized inside a file reader or other code, enable it with 'u.add_enabled_units'. For details, see http://docs.astropy.org/en/latest/units/combining_and_defining.html" if self.astropyVersion >= 4.1: m = m.replace('http', 'https') return m
Python
0.000001
@@ -193,16 +193,46 @@ t shutil +%0Afrom packaging import version %0A%0Afrom a @@ -502,22 +502,21 @@ n = -float('.'.join +version.parse (ast @@ -530,25 +530,8 @@ sion -.split('.')%5B0:2%5D) )%0A @@ -3404,17 +3404,36 @@ sion %3E= -4 +version.parse('4.0') :%0A @@ -3732,11 +3732,28 @@ %3E= +version.parse(' 4.1 +') :%0A
07a4cb667e702a1cbb758a3761ec41b89fa98313
Add options to python script
python/test.py
python/test.py
#!/usr/bin/env python # Copyright (C) 2010 Red Hat, Inc. # # This is free software; you can redistribute it and/or modify it # under the terms of the GNU Lesser General Public License as # published by the Free Software Foundation; either version 2.1 of # the License, or (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this software; if not, write to the Free # Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA, or see the FSF site: http://www.fsf.org. opts = { 'host' : 'localhost', 'port' : 8080, 'impl' : "dummy", } opts['uri'] = 'http://%(host)s:%(port)s/rhevm-api-%(impl)s-war/' % opts import http import xmlfmt import yamlfmt import jsonfmt links = http.HEAD_for_links(opts) for fmt in [xmlfmt, yamlfmt, jsonfmt]: print "=== ", fmt.MEDIA_TYPE, " ===" for host in fmt.parseHostCollection(http.GET(opts, links['hosts'], fmt.MEDIA_TYPE)): print fmt.parseHost(http.GET(opts, host.link.href, fmt.MEDIA_TYPE)) for vm in fmt.parseVmCollection(http.GET(opts, links['vms'], fmt.MEDIA_TYPE)): print fmt.parseVM(http.GET(opts, vm.link.href, fmt.MEDIA_TYPE)) foo_vm = fmt.VM() foo_vm.name = 'foo' foo_vm = fmt.parseVM(http.POST(opts, links['vms'], foo_vm.dump(), fmt.MEDIA_TYPE)) bar_host = fmt.Host() bar_host.name = 'bar' bar_host = fmt.parseHost(http.POST(opts, links['hosts'], bar_host.dump(), fmt.MEDIA_TYPE)) print http.POST(opts, foo_vm.link.href + "/start", type = fmt.MEDIA_TYPE) print http.GET(opts, foo_vm.link.href, type = fmt.MEDIA_TYPE) foo_vm.name = 'bar' print http.PUT(opts, foo_vm.link.href, foo_vm.dump(), fmt.MEDIA_TYPE) bar_host.name = 'foo' print http.PUT(opts, bar_host.link.href, bar_host.dump(), fmt.MEDIA_TYPE) print http.DELETE(opts, foo_vm.link.href) print http.DELETE(opts, bar_host.link.href)
Python
0.000002
@@ -817,214 +817,562 @@ g.%0A%0A -opts = %7B%0A 'host' : 'localhost',%0A 'port' : 8080,%0A 'impl' : %22dummy%22,%0A%7D%0Aopts%5B'uri'%5D = 'http://%25(host)s:%25(port)s/rhevm-api-%25(impl)s-war/' %25 opts%0A%0Aimport http%0Aimport xmlfmt%0Aimport yamlfmt%0Aimport jsonfmt +import http%0Aimport xmlfmt%0Aimport yamlfmt%0Aimport jsonfmt%0Aimport sys%0Aimport getopt%0A%0Aopts = %7B%0A 'host' : 'localhost',%0A 'port' : 8080,%0A 'impl' : %22dummy%22,%0A%7D%0A%0Aif len(sys.argv) %3E 1:%0A options, oargs = getopt.getopt(sys.argv%5B1:%5D, %22h:p:i:%22, %5B%22host=%22, %22port=%22, %22impl=%22%5D)%0A for opt, a in options:%0A if opt in (%22-h%22, %22--host%22):%0A opts%5B'host'%5D = a%0A if opt in (%22-p%22, %22--port%22):%0A opts%5B'port'%5D = a%0A if opt in (%22-i%22, %22--impl%22):%0A opts%5B'impl'%5D = a%0A%0A%0Aopts%5B'uri'%5D = 'http://%25(host)s:%25(port)s/rhevm-api-%25(impl)s/' %25 opts %0A%0Ali
4656f7834f2c56f9dffcb775a5c9833304a3a55f
Fix doctests with Python 2
pyuca/utils.py
pyuca/utils.py
""" utilities for formatting the datastructures used in pyuca. Useful mostly for debugging output. """ from __future__ import unicode_literals def hexstrings2int(hexstrings): """ list of hex strings to list of integers >>> hexstrings2int(["0000", "0001", "FFFF"]) [0, 1, 65535] """ return [int(hexstring, 16) for hexstring in hexstrings] def int2hexstrings(number_list): """ list of integers to list of 4-digit hex strings >>> int2hexstrings([0, 1, 65535]) ['0000', '0001', 'FFFF'] """ return ["{:04X}".format(n) for n in number_list] def format_collation_elements(collation_elements): """ format collation element array (list of list of integer weights) >>> format_collation_elements([[1, 2, 3], [4, 5]]) '[0001.0002.0003], [0004.0005]' >>> format_collation_elements(None) """ if collation_elements is None: return None else: return ", ".join( "[" + ".".join( int2hexstrings(collation_element) ) + "]" for collation_element in collation_elements ) def format_sort_key(sort_key): """ format sort key (list of integers) with | level boundaries >>> format_sort_key([1, 0, 65535]) '0001 | FFFF' """ return " ".join( ("{:04X}".format(x) if x else "|") for x in sort_key )
Python
0.000056
@@ -542,16 +542,20 @@ return %5B +str( %22%7B:04X%7D%22 @@ -564,16 +564,17 @@ ormat(n) +) for n i @@ -719,32 +719,36 @@ ights)%0A%0A %3E%3E%3E +str( format_collation @@ -777,16 +777,17 @@ %5B4, 5%5D%5D) +) %0A '%5B0 @@ -1214,24 +1214,28 @@ ies%0A %3E%3E%3E +str( format_sort_ @@ -1248,24 +1248,25 @@ , 0, 65535%5D) +) %0A '0001 %7C
7e3d913c6a669d075a8b50995abbfaed0d550a49
重构,修复错误的拼写
pyactive/tests/test_relation_belongs_to.py
pyactive/tests/test_relation_belongs_to.py
# -*- coding: utf-8 -*- from ..record import ActiveRecord from ..relation import belongs_to, BelongsTo from pyactive.utils import ColumnNotInColumns from datetime import datetime import unittest import fudge class Person(ActiveRecord): __columns__ = ['id', 'created_at', 'updated_at'] class RelationBelongsToTestCase(unittest.TestCase): def test_belongs_to_relation_init(self): class User(ActiveRecord): __columns__ = ['id', 'created_at', 'updated_at'] class Phone(ActiveRecord): __columns__ = ['id', 'created_at', 'updated_at', 'user_id'] r = BelongsTo(target_class=User, owner_class=Phone, foreign_key='user_id', owner_attr='user') self.assertTrue(r.owner is Phone) self.assertEqual('user_id', r.foreign_key) self.assertTrue(r.target is User) self.assertEqual('id', r.target_pk_column) self.assertEqual('user', r.owner_attr) def test_belogns_to_relation_init_without_owner_attr(self): class User(ActiveRecord): __columns__ = ['id', 'created_at', 'updated_at'] class Phone(ActiveRecord): __columns__ = ['id', 'created_at', 'updated_at', 'user_id'] r = BelongsTo(target_class=User, owner_class=Phone, foreign_key='user_id') self.assertTrue(r.owner is Phone) self.assertEqual('user_id', r.foreign_key) self.assertTrue(r.target is User) self.assertEqual('id', r.target_pk_column) self.assertEqual('user', r.owner_attr) def test_belongs_to_relation_init_without_foreign_key(self): class User(ActiveRecord): __columns__ = ['id', 'created_at', 'updated_at'] class Phone(ActiveRecord): __columns__ = ['id', 'created_at', 'updated_at', 'user_id'] r = BelongsTo(target_class=User, owner_class=Phone, owner_attr='user') self.assertTrue(r.owner is Phone) self.assertEqual('user_id', r.foreign_key) self.assertTrue(r.target is User) self.assertEqual('id', r.target_pk_column) self.assertEqual('user', r.owner_attr) def test_belongs_to_relation_init_should_raise_exception_if_relation_foreign_key_not_in_columns(self): class User(ActiveRecord): __columns__ = ['id', 'created_at', 'updated_at'] class Phone(ActiveRecord): __columns__ = ['id', 'created_at', 'updated_at'] r = BelongsTo(target_class=User, owner_class=Phone) self.assertRaises(ColumnNotInColumns, lambda: r.foreign_key) def test_belongs_to_relation_init_with_target_classpath(self): class Phone(ActiveRecord): __columns__ = ['id', 'created_at', 'updated_at', 'person_id'] r = BelongsTo(target_class="pyactive.tests.test_relation_belongs_to.Person", owner_class=Phone, owner_attr='user') self.assertTrue(r.owner is Phone) self.assertEqual('person_id', r.foreign_key) self.assertTrue(r.target is Person) self.assertEqual('id', r.target_pk_column) self.assertEqual('user', r.owner_attr) @fudge.patch('pyactive.record.ar.Criteria') def test_phone_belongs_to_user_relation(self, Criteria): class User(ActiveRecord): __columns__ = ['id', 'name', 'created_at', 'updated_at'] class Phone(ActiveRecord): __columns__ = ['id', 'created_at', 'updated_at', 'user_id'] belongs_to(User) Criteria.is_callable().returns_fake()\ .expects('from_').returns_fake()\ .expects('where').with_args(id=(1, )).returns_fake()\ .expects('first').returns(Phone(id=1, created_at=datetime.now(), updated_at=datetime.now(), user_id=10)) p = Phone.find(1) Criteria.is_callable().returns_fake()\ .expects('from_').returns_fake()\ .expects('where').with_args(id=10).returns_fake()\ .expects('first').returns(User(id=10, name='py', created_at=datetime.now(), updated_at=datetime.now())) u = p.user self.assertEqual('py', u.name) self.assertEqual(10, u.id) self.assertTrue(isinstance(u.created_at, datetime)) self.assertTrue(isinstance(u.updated_at, datetime))
Python
0.000012
@@ -963,18 +963,18 @@ est_belo -g n +g s_to_rel @@ -4304,8 +4304,64 @@ time))%0A%0A + %0Aif __name__ == '__main__':%0A unittest.main()%0A
9d1a44dd85b452430f90e1d5eb2400c9869934b6
use get_latest() instead of _latest() for #393
pycqed/instrument_drivers/pq_parameters.py
pycqed/instrument_drivers/pq_parameters.py
from qcodes.instrument.parameter import ManualParameter from qcodes.utils.validators import Validator, Strings class InstrumentParameter(ManualParameter): """ Args: name (string): the name of the instrument that one wants to add. instrument (Optional[Instrument]): the "parent" instrument this parameter is attached to, if any. initial_value (Optional[string]): starting value, the only invalid value allowed, and None is only allowed as an initial value, it cannot be set later **kwargs: Passed to Parameter parent class """ def get_instr(self): """ Returns the instance of the instrument with the name equal to the value of this parameter. """ instrument_name = self.get() # note that _instrument refers to the instrument this parameter belongs # to, while the instrument_name is the instrument that is the value # of this parameter. return self._instrument.find_instrument(instrument_name) def set_validator(self, vals): """ Set a validator `vals` for this parameter. Args: vals (Validator): validator to set """ if vals is None: self._vals = Strings() elif isinstance(vals, Validator): self._vals = vals else: raise TypeError('vals must be a Validator') class ConfigParameter(ManualParameter): # TODO: move this to qcodes as a pull request """ Define one parameter that reflects a manual configuration setting. Args: name (string): the local name of this parameter instrument (Optional[Instrument]): the instrument this applies to, if any. initial_value (Optional[string]): starting value, the only invalid value allowed, and None is only allowed as an initial value, it cannot be set later **kwargs: Passed to Parameter parent class """ def __init__(self, name, instrument=None, initial_value=None, **kwargs): super().__init__(name=name, **kwargs) self._instrument = instrument # if the instrument does not have _config_changed attribute creates it if not hasattr(self._instrument, '_config_changed'): self._instrument._config_changed = True self._meta_attrs.extend(['instrument', 'initial_value']) if initial_value is not None: self.validate(initial_value) self._save_val(initial_value) def set(self, value): """ Validate and saves value. If the value is different from the latest value it sets the Args: value (any): value to validate and save """ self.validate(value) if value != self._latest()['value']: self._instrument._config_changed = True self._save_val(value) def get(self): """ Return latest value""" return self._latest()['value']
Python
0
@@ -2812,24 +2812,27 @@ lue != self. +get _latest()%5B'v @@ -2828,25 +2828,16 @@ latest() -%5B'value'%5D :%0A @@ -2987,16 +2987,19 @@ rn self. +get _latest( @@ -3003,14 +3003,5 @@ st() -%5B'value'%5D %0A
a23374939583b3954baa1418f12ce309442d31ff
Mark certain resources as uncompressible
pyforge/pyforge/lib/widgets/form_fields.py
pyforge/pyforge/lib/widgets/form_fields.py
from pylons import c from pyforge.model import User from formencode import validators as fev import ew class MarkdownEdit(ew.InputField): template='genshi:pyforge.lib.widgets.templates.markdown_edit' validator = fev.UnicodeString() params=['name','value','show_label'] show_label=True name=None value=None def resources(self): yield ew.resource.JSLink('js/jquery.markitup.pack.js') yield ew.resource.JSLink('js/jquery.markitup.markdown.js') yield ew.resource.JSLink('js/sf_markitup.js') yield ew.resource.CSSLink('css/markitup.css') yield ew.resource.CSSLink('css/markitup_markdown.css') yield ew.resource.CSSLink('css/markitup_sf.css') class UserTagEdit(ew.InputField): template='genshi:pyforge.lib.widgets.templates.user_tag_edit' validator = fev.UnicodeString() params=['name','user_tags', 'className', 'show_label'] show_label=True name=None user_tags=None className='' def resources(self): yield ew.resource.JSLink('js/jquery.tag.editor.js') class LabelEdit(ew.InputField): template='genshi:pyforge.lib.widgets.templates.label_edit' validator = fev.UnicodeString() params=['name', 'className', 'show_label', 'value'] show_label=True name=None value=None className='' def resources(self): yield ew.resource.JSLink('js/jquery.tag.editor.js') class ProjectUserSelect(ew.InputField): template='genshi:pyforge.lib.widgets.templates.project_user_select' params=['name', 'value', 'size', 'all', 'users', 'show_label'] show_label=True name=None value=None size=None all=False def __init__(self, **kw): self.users = User.query.find({'_id':{'$in':[role.user_id for role in c.project.roles]}}).all() if not isinstance(self.value, list): self.value=[self.value] super(ProjectUserSelect, self).__init__(**kw) class AttachmentList(ew.Widget): template='genshi:pyforge.lib.widgets.templates.attachment_list' params=['attachments','edit_mode'] attachments=None edit_mode=None class SubmitButton(ew.SubmitButton): attrs={'class':'ui-state-default ui-button ui-button-text'}
Python
0.999999
@@ -408,24 +408,40 @@ tup.pack.js' +, compress=False )%0A yi @@ -599,24 +599,40 @@ arkitup.css' +, compress=False )%0A yi @@ -682,16 +682,32 @@ own.css' +, compress=False )%0A
2b5e94f6c301932eb9387bba9a80414a714e2b38
Tidy up the references
pygraphc/abstraction/ClusterAbstraction.py
pygraphc/abstraction/ClusterAbstraction.py
class ClusterAbstraction(object): """Get cluster abstraction based on longest common substring [jtjacques2010]_. References ---------- .. [jtjacques2010] jtjacques, Longest common substring from more than two strings - Python. http://stackoverflow.com/questions/2892931/longest-common-substring-from-more-than-two-strings-python. """ @staticmethod def dp_lcs(graph, clusters): """The processed string are preprocessed message from raw event log messages. Parameters ---------- graph : graph A graph to be processed. clusters : dict[list] Dictionary containing a list of node identifier per cluster. Returns ------- abstraction : dict[str] Dictionary of abstraction string per cluster. """ abstraction = {} for cluster_id, nodes in clusters.iteritems(): data = [] for node_id in nodes: data.append(graph.node[node_id]['preprocessed_event']) abstraction[cluster_id] = ClusterAbstraction.lcs(data) return abstraction @staticmethod def lcs(data): """Get longest common substring from multiple string. Parameters ---------- data : list[str] List of string to be processed. Returns ------- substr : str A single string as longest common substring. """ substr = '' if len(data) > 1 and len(data[0]) > 0: for i in range(len(data[0])): for j in range(len(data[0]) - i + 1): if j > len(substr) and all(data[0][i:i + j] in x for x in data): substr = data[0][i:i + j] return substr
Python
0.019729
@@ -245,20 +245,16 @@ %0A - http://s
1bd5d7179c49004f47e169512a784248979fbea3
Update compile_engine.py (#4393)
python/tvm/relay/backend/compile_engine.py
python/tvm/relay/backend/compile_engine.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Backend code generation engine.""" from __future__ import absolute_import from ..base import register_relay_node, NodeBase from ... import target as _target from .. import expr as _expr from . import _backend @register_relay_node class CachedFunc(NodeBase): """Low-level tensor function to back a relay primitive function. """ @register_relay_node class CCacheKey(NodeBase): """Key in the CompileEngine. Parameters ---------- source_func : tvm.relay.Function The source function. target : tvm.Target The target we want to run the function on. """ def __init__(self, source_func, target): self.__init_handle_by_constructor__( _backend._make_CCacheKey, source_func, target) @register_relay_node class CCacheValue(NodeBase): """Value in the CompileEngine, including usage statistics. """ def _get_cache_key(source_func, target): if isinstance(source_func, _expr.Function): if isinstance(target, str): target = _target.create(target) if not target: raise ValueError("Need target when source_func is a Function") return CCacheKey(source_func, target) if not isinstance(source_func, CCacheKey): raise TypeError("Expect source_func to be CCacheKey") return source_func @register_relay_node class CompileEngine(NodeBase): """CompileEngine to get lowered code. """ def __init__(self): raise RuntimeError("Cannot construct a CompileEngine") def lower(self, source_func, target=None): """Lower a source_func to a CachedFunc. Parameters ---------- source_func : Union[tvm.relay.Function, CCacheKey] The source relay function. target : tvm.Target The target platform. Returns ------- cached_func: CachedFunc The result of lowering. """ # pylint: disable=broad-except try: key = _get_cache_key(source_func, target) return _backend._CompileEngineLower(self, key) except Exception: import traceback msg = traceback.format_exc() msg += "Error during compile func\n" msg += "--------------------------\n" msg += source_func.astext(show_meta_data=False) msg += "--------------------------\n" raise RuntimeError(msg) def lower_shape_func(self, source_func, target=None): key = _get_cache_key(source_func, target) return _backend._CompileEngineLowerShapeFunc(self, key) def jit(self, source_func, target=None): """JIT a source_func to a tvm.Function. Parameters ---------- source_func : Union[tvm.relay.Function, CCacheKey] The source relay function. target : tvm.Target The target platform. Returns ------- cached_func: CachedFunc The result of lowering. """ key = _get_cache_key(source_func, target) return _backend._CompileEngineJIT(self, key) def clear(self): """clear the existing cached functions""" _backend._CompileEngineClear(self) def items(self): """List items in the cache. Returns ------- item_list : List[Tuple[CCacheKey, CCacheValue]] The list of items. """ res = _backend._CompileEngineListItems(self) assert len(res) % 2 == 0 return [(res[2*i], res[2*i+1]) for i in range(len(res) // 2)] def dump(self): """Return a string representation of engine dump. Returns ------- dump : str The dumped string representation """ items = self.items() res = "====================================\n" res += "CompilerEngine dump, %d items cached\n" % len(items) for k, v in items: res += "------------------------------------\n" res += "target={}\n".format(k.target) res += "use_count={}\n".format(v.use_count) res += "func_name={}\n".format(v.cached_func.func_name) res += k.source_func.astext() + "\n" res += "===================================\n" return res def get(): """Get the global compile engine. Returns ------- engine : tvm.relay.backend.CompileEngine The compile engine. """ return _backend._CompileEngineGlobal()
Python
0
@@ -3713,36 +3713,35 @@ -------%0A -cach +jit ed_func: CachedF @@ -3729,34 +3729,36 @@ jited_func: -CachedFunc +tvm.Function %0A @@ -3768,32 +3768,38 @@ e result of -lowering +jited function .%0A %22%22
ac61b2f99f91a274572e96be8f0136871288f1bb
update timer to be able to measure time more times
proso/util.py
proso/util.py
import re import importlib import time _timers = {} def timer(name): now = time.clock() if name in _timers: diff = now - _timers[name] return diff _timers[name] = now def instantiate(classname, *args, **kwargs): matched = re.match('(.*)\.(\w+)', classname) if matched is None: raise Exception('can instantiate only class with packages: %s' % classname) module = importlib.import_module(matched.groups()[0]) return getattr(module, matched.groups()[1])(*args, **kwargs)
Python
0
@@ -84,15 +84,30 @@ ime. -clock() +time()%0A diff = None %0A @@ -166,28 +166,8 @@ me%5D%0A - return diff%0A @@ -186,16 +186,32 @@ %5D = now%0A + return diff%0A %0A%0Adef in
c85db50907f21495ea5927a0f2bdc88d4ba4e5b9
fix import order
relengapi/blueprints/slaveloan/__init__.py
relengapi/blueprints/slaveloan/__init__.py
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import flask_login import logging import sqlalchemy as sa from sqlalchemy import asc from flask import Blueprint from flask import g from flask import render_template from relengapi import apimethod from relengapi import p from relengapi.blueprints.slaveloan import task_groups from relengapi.blueprints.slaveloan.slave_mappings import slave_patterns from relengapi.blueprints.slaveloan.slave_mappings import slave_to_slavetype from relengapi.util import tz from werkzeug.exceptions import BadRequest from werkzeug.exceptions import InternalServerError from relengapi.blueprints.slaveloan import rest from relengapi.blueprints.slaveloan import bugzilla from relengapi.blueprints.slaveloan.model import History from relengapi.blueprints.slaveloan.model import Humans from relengapi.blueprints.slaveloan.model import Loans from relengapi.blueprints.slaveloan.model import Machines logger = logging.getLogger(__name__) bp = Blueprint('slaveloan', __name__, template_folder='templates', static_folder='static') p.slaveloan.admin.doc("Administer Slaveloans for all users") @bp.record def init_blueprint(state): bugzilla.init_app(state.app) ################## # RESTful APIs # ################## @bp.route('/loans/') @p.slaveloan.admin.require() @apimethod([rest.Loan]) def get_loans(): "Get the list of all `active` loans you can see" # XXX: Use permissions to filter if not an admin loans = Loans.query.filter(Loans.machine_id.isnot(None)) return [l.to_wsme() for l in loans.all()] @bp.route('/loans/<int:loanid>') @p.slaveloan.admin.require() @apimethod(rest.Loan, int) def get_loan(loanid): "Get the details of a loan, by id" # XXX: Use permissions to ensure admin | loanee l = Loans.query.get(loanid) return l.to_wsme() @bp.route('/loans/<int:loanid>/history') @p.slaveloan.admin.require() @apimethod([rest.HistoryEntry], int) def get_loan_history(loanid): "Get the history associated with this loan" # XXX: Use permissions to ensure admin | loanee histories = History.query \ .filter(History.loan_id == loanid) \ .order_by(asc(History.timestamp)) return [h.to_wsme() for h in histories.all()] @bp.route('/loans/all') @p.slaveloan.admin.require() @apimethod([rest.Loan]) def get_all_loans(): "Get the list of all loans you can see" # XXX: Use permissions to filter if not an admin loans = Loans.query return [l.to_wsme() for l in loans.all()] @bp.route('/loans/new', methods=['POST']) @p.slaveloan.admin.require() @apimethod(rest.Loan, body=rest.LoanAdminRequest) def new_loan_from_admin(body): "Creates a new loan entry" if not body.status: raise BadRequest("Missing Status Field") if not body.ldap_email: raise BadRequest("Missing LDAP E-Mail") if not body.bugzilla_email: raise BadRequest("Missing Bugzilla E-Mail") if body.status != 'PENDING': if not body.fqdn: raise BadRequest("Missing Machine FQDN") if not body.ipaddress: raise BadRequest("Missing Machine IP Address") session = g.db.session('relengapi') try: if body.status != 'PENDING': m = Machines.as_unique(session, fqdn=body.fqdn, ipaddress=body.ipaddress) h = Humans.as_unique(session, ldap=body.LDAP, bugzilla=body.bugzilla) except sa.exc.IntegrityError: raise InternalServerError("Integrity Error from Database, please retry.") if body.status != 'PENDING': l = Loans(status=body.status, human=h, machine=m) else: l = Loans(status=body.status, human=h) history = History(for_loan=l, timestamp=tz.utcnow(), msg="Adding to slave loan tool via admin interface") session.add(l) session.add(history) session.commit() return l.to_wsme() @bp.route('/loans/request', methods=['POST']) @p.slaveloan.admin.require() @apimethod(rest.Loan, body=rest.LoanRequest) def new_loan_request(body): "User Loan Requesting, returns the id of the loan" if not body.ldap_email: raise BadRequest("Missing LDAP E-Mail") if not body.requested_slavetype: raise BadRequest("Missing slavetype") slavetype = slave_to_slavetype(body.requested_slavetype) if not slavetype: raise BadRequest("Unsupported slavetype") if not body.bugzilla_email: # Set bugzilla e-mail to ldap e-mail by default body.bugzilla_email = body.ldap_email session = g.db.session('relengapi') try: h = Humans.as_unique(session, ldap=body.ldap_email, bugzilla=body.bugzilla_email) except sa.exc.IntegrityError: raise InternalServerError("Integrity Error from Database, please retry.") if body.loan_bug_id: l = Loans(status="PENDING", human=h, bug_id=body.loan_bug_id) else: l = Loans(status="PENDING", human=h) history = History(for_loan=l, timestamp=tz.utcnow(), msg="Requesting loan for slavetype %s (original: '%s')" % (slavetype, body.requested_slavetype)) session.add(l) session.add(history) session.commit() chain_of_stuff = task_groups.generate_loan(loanid=l.id, slavetype=slavetype) chain_of_stuff.delay() return l.to_wsme() @bp.route('/machine/classes') @apimethod({unicode: [unicode]}) def get_machine_classes(): """ A mapping of what you'll get with a given loan, and globs of the slave types associated. Returns a mapping keyed on type of loan against slave-name globs that it corresponds to e.g.:: { "b-2008-ix": [ "b-2008-ix-*", "b-2008-sm-*", "w64-ix-*" ], } Where the above would tell you we are loaning a 'b-2008-ix' machine for slaves which match any of the globs in the array.""" return slave_patterns() ################## # User Interface # ################## @bp.route('/') @flask_login.login_required def root(): return render_template('slaveloan_root.html') @bp.route('/details/<int:id>') @flask_login.login_required @p.slaveloan.admin.require() def loan_details(id): g.loanid = id return render_template('slaveloan_details.html') @bp.route('/admin/') @flask_login.login_required @p.slaveloan.admin.require() def admin(): return render_template('slaveloan_admin.html')
Python
0.000001
@@ -792,20 +792,24 @@ import -rest +bugzilla %0Afrom re @@ -840,32 +840,28 @@ loan import -bugzilla +rest %0Afrom releng
ce1350bb42028ad29356af275ab5b90257ccf0cb
fix import
yr/__init__.py
yr/__init__.py
from yr import YR
Python
0.000001
@@ -1,13 +1,14 @@ from +. yr impor
1cfc885597f14282245c68179922e27e3974a26f
use environment var for file location
publish-ci.py
publish-ci.py
import requests import json import os # import tarfile # def make_tarfile(output_filename, source_dir): # with tarfile.open(output_filename, "w:gz") as tar: # tar.add(source_dir, arcname=os.path.basename(source_dir)) uri = 'https://zenodo.org/api/deposit/depositions' access_token = os.environ['ZENODO_API_KEY'] headers = {"Content-Type": "application/json"} # login response = requests.get(uri, params={'access_token': access_token }) # get env # data will be sent as a parameter to the request data = { 'filename': '/data/artefacts/gmp-6.1.0-generic-x86_64-centos6.tar.gz' } # TODO - load from file metadata = { 'metadata': { 'upload_type': 'software', 'publication_type': 'softwaredocumentation', 'title': 'GMP build for CODE-RADE CI phase', 'creators': [ { 'name': 'Bruce Becker', 'affiliation': 'EGI Foundation', 'orcid': '0000-0002-6607-7145' } ], 'description': 'See the README', 'access_right': 'open', 'license': 'Apache-2.0', 'prereserve_doi': 'true', 'communities': 'code-rade' } } # check if json is present if os.path.isfile('zenodo.json'): print("file is there") # Check that DOI has been registered and that url is valid with open('zenodo.json') as deposition: zenodo = json.load(deposition) id = zenodo['id'] print 'id is ',id # Check that this is the right ID else: # deposit the file print("no deposition yet") # create deposition create = requests.post(uri, params={'access_token': access_token}, json={}, headers=headers) create.json() with open('zenodo.json', 'w') as deposition: json.dump(create.json(), deposition) id = create.json['id'] # files is an array of files to be sent as parameters to the request files = {'file': open(os.environ['TARBALL'], 'rb')} deposit = requests.post(uri + '/%s/files' % id, params={'access_token': access_token}, data=data, files=files) print(deposit.json()) # update with metadata meta = requests.put(uri + '/%s' % id, params={'access_token': access_token}, data=json.dumps(metadata), headers=headers) print(meta.json())
Python
0
@@ -529,65 +529,29 @@ e': -'/data/artefacts/gmp-6.1.0-generic-x86_64-centos6.tar.gz' +os.environ%5B'TARBALL'%5D %7D%0A#
32f308d3697c72a6655df38eedd23ebb71d95d40
Fix typos, works in linux now.
pyglet/lib.py
pyglet/lib.py
#!/usr/bin/env python '''Functions for loading dynamic libraries. These extend and correct ctypes functions. ''' __docformat__ = 'restructuredtext' __version__ = '$Id: $' import os import sys import ctypes import ctypes.util class LibraryLoader(object): def load_library(self, *names, **kwargs): '''Find and load a library. More than one name can be specified, they will be tried in order. Platform-specific library names (given as kwargs) are tried first. Raises ImportError if library is not found. ''' platform_names = kwargs.get(self.platform, []) if type(platform_names) in (str, unicode): platform_names = (platform_names,) elif type(platform_names) == list: platform_names = tuple(list) for name in platform_names + names: path = self.find_library(name) if path: return ctypes.cdll.LoadLibrary(path) raise ImportError('Library "%s" not found.' % names[0]) find_library = ctypes.util.find_library platform = sys.platform if platform == 'cygwin': platform = 'win32' class MachOLibraryLoader(LibraryLoader): def __init__(self): if 'LD_LIBRARY_PATH' in os.environ: self.ld_library_path = os.environ['LD_LIBRARY_PATH'].split(':') else: self.ld_library_path = [] if 'DYLD_LIBRARY_PATH' in os.environ: self.dyld_library_path = os.environ['DYLD_LIBRARY_PATH'].split(':') else: self.dyld_library_path = [] if 'DYLD_FALLBACK_LIBRARY_PATH' in os.environ: self.dyld_fallback_library_path = \ os.environ['DYLD_FALLBACK_LIBRARY_PATH'].split(':') else: self.dyld_fallback_library_path = [ os.path.expanduser('~/lib'), '/usr/local/lib', '/usr/lib'] def find_library(self, path): '''Implements the dylib search as specified in Apple documentation: http://developer.apple.com/documentation/DeveloperTools/Conceptual/DynamicLibraries/Articles/DynamicLibraryUsageGuidelines.html ''' libname = os.path.basename(path) if '/' in path: search_path = ( [os.path.join(p, libname) \ for p in self.dyld_library_path] + [path] + [os.path.join(p, libname) \ for p in self.dyld_fallback_library_path]) else: search_path = ( [os.path.join(p, libname) \ for p in self.ld_library_path] + [os.path.join(p, libname) \ for p in self.dyld_library_path] + [path] + [os.path.join(p, libname) \ for p in self.dyld_fallback_library_path]) for path in search_path: if os.path.exists(path): return path return None if sys.platform == 'darwin': loader = MachOLibraryLoader() else: loader = LibraryLoader() load_library = loader.load_library
Python
0.999999
@@ -791,20 +791,30 @@ = tuple( -list +platform_names )%0A @@ -1051,16 +1051,35 @@ ibrary = + lambda self, name: ctypes. @@ -1095,16 +1095,22 @@ _library +(name) %0A%0A pl
c424efd76f5c6949729d17d131333ce6ec8103f8
Add Event model
app/models.py
app/models.py
from app import db from flask_security import RoleMixin # Define associations friends = db.Table('friends', db.Column('friend_id', db.Integer, db.ForeignKey('user.id')), db.Column('friended_id', db.Integer, db.ForeignKey('user.id')) ) roles_users = db.Table('roles_users', db.Column('user_id', db.Integer(), db.ForeignKey('user.id')), db.Column('role_id', db.Integer(), db.ForeignKey('role.id'))) # Define models class Role(db.Model, RoleMixin): id = db.Column(db.Integer(), primary_key=True) name = db.Column(db.String(80), unique=True) description = db.Column(db.String(255)) class User(db.Model): id = db.Column(db.Integer, primary_key=True) google_id = db.Column(db.String, unique=True) roles = db.relationship('Role', secondary=roles_users, backref=db.backref('users', lazy='dynamic')) connections = db.relationship('Connection', backref=db.backref('user', lazy='joined'), cascade="all", uselist=False) active = False friended = db.relationship('User', secondary=friends, primaryjoin=(friends.c.friend_id == id), secondaryjoin=(friends.c.friended_id == id), backref=db.backref('friends', lazy='dynamic'), lazy='dynamic') def friend(self, user): if not self.is_friend(user): self.friended.append(user) db.session.commit() return self def unfriend(self, user): if self.is_friend(user): self.friended.remove(user) db.session.commit() return self def is_friend(self, user): return self.friended.filter(friends.c.friended_id == user.id).count() > 0 def get_user(self): return {self.id:self.connections.full_name} def get_name(self): return self.connections.full_name def has_role(self, role_check): return role_check in self.roles def is_active(self): return True def get_id(self): return self.id def is_authenticated(self): return True def is_anonymous(self): return False def __init__(self, google_id, active, roles): self.google_id = google_id self.active = active self.roles = roles def __repr__(self): return "<Google ID {}>".format(self.google_id) class Connection(db.Model): id = db.Column(db.Integer, primary_key=True) user_id = db.Column(db.Integer, db.ForeignKey('user.id')) provider_id = db.Column(db.String(255)) full_name = db.Column(db.String(255)) provider_user_id = db.Column(db.String(255)) access_token = db.Column(db.String(255)) secret = db.Column(db.String(255)) display_name = db.Column(db.String(255)) profile_url = db.Column(db.String(512)) image_url = db.Column(db.String(512)) rank = db.Column(db.Integer) db.create_all()
Python
0
@@ -3065,16 +3065,325 @@ teger)%0A%0A +class Event(db.Model):%0A id = db.Column(db.Integer, primary_key=True)%0A description = db.Column(db.String(255))%0A longitude = db.Column(db.Float(precision='3,8'))%0A latitude = db.Column(db.Float(precision='3,8'))%0A start_datetime = db.Column(db.DateTime)%0A end_datetime = db.Column(db.DateTime)%0A%0A%0A db.creat
e4880658f92c0b55c883b136405fa9a2a9d8c8dc
Define update_commits method.
app/models.py
app/models.py
from datetime import datetime from app import slack, redis, app from app.redis import RedisModel class Channel(RedisModel): __prefix__ = '#' @staticmethod def load_from_slack(): """Update channel list from slack""" slack_response = slack.channels.list() if not slack_response.successful: app.logger.error('Error loading channel list. Server returned %s' % slack_response.error) return False # Add channel to list and save for channel in slack_response.body.get('channels', []): name = channel.get('name') entity = Channel(channel.get('name')) entity.slack_id = channel.get('id') return True class User(RedisModel): __prefix__ = '@' @property def commits_updated(self): if 'commits_updated' in self: return datetime.strptime(self['commits_updated'], "%Y-%m-%dT%H:%M:%S.%fZ") return None @commits_updated.setter def commits_updated(self, value): self['commits_updated'] = datetime.strftime(value, "%Y-%m-%dT%H:%M:%S.%fZ") @staticmethod def load_from_slack(include_bots=False, include_deleted=False): """Update user list from slack""" slack_response = slack.users.list() if not slack_response.successful: app.logger.error('Error loading user list. Server returned %s' % slack_response.error) return False # Add channel to list and save for user in slack_response.body.get('members', []): if user.get('is_bot') and not include_bots: continue if user.get('deleted') and not include_deleted: continue entity = User(user.get('name')) entity.slack_id = user.get('id') return True def load_data_from_slack(): """Load data from slack. To be called on application start""" Channel.load_from_slack() User.load_from_slack()
Python
0
@@ -1820,16 +1820,1904 @@ n True%0A%0A + def update_commits(self, commits=1):%0A %22%22%22Update the number of commits%22%22%22%0A if not 'commits_updated' in self:%0A # Start from 0%0A self.commits_updated = datetime.now()%0A self.commits_in_last_day = 0%0A self.commits_in_last_week = 0%0A self.commits_in_last_month = 0%0A self.commits_in_last_year = 0%0A self.commits_total = 0%0A self.days = 1%0A%0A # We will check the dates%0A now = datetime.now()%0A updated = self.commits_updated%0A%0A # Save the difference%0A delta = now - updated%0A%0A # If more than one day has passed since last commit, reset daily commit count%0A if delta.days %3E 0:%0A self.commits_in_last_day = 0%0A%0A # And increase the number of days counting%0A self.incrby('days', 1)%0A%0A # If the week has changed between commits, reset weekly commit count%0A if abs(now.isocalendar()%5B1%5D - updated.isocalendar()%5B1%5D) %3E 0:%0A # Week changed%0A self.commits_in_last_week = 0%0A%0A # If the month changed, reset monthly commit count%0A if abs(now.month - updated.month) %3E 0:%0A self.commits_in_last_month = 0%0A%0A # If the year changed, reset yearly commit count%0A if now.year - updated.year %3E 0:%0A self.commits_in_last_week = 0 # In case there has been no activity in an exact year%0A self.commits_in_last_month = 0%0A self.commits_in_last_year = 0%0A%0A # Increase count. Use incrby for efficiency%0A self.incrby('commits_in_last_day', commits)%0A self.incrby('commits_in_last_week', commits)%0A self.incrby('commits_in_last_month', commits)%0A self.incrby('commits_in_last_year', commits)%0A self.incrby('commits_total', commits)%0A%0A # Change update date%0A self.commits_updated = now%0A%0A%0A def load
437f50f768f3c250364e33289403164ade464509
Add a default option for the field 'field' of the social network app model
app/models.py
app/models.py
import connectors.models from django.db import models class SocialNetworkApp(models.Model): name = models.CharField(max_length=50) url = models.URLField(null=True, blank=True) connector = models.OneToOneField(connectors.models.SocialNetworkConnector) blocked = models.DateTimeField(null=True, editable=False, default=None) app_id = models.CharField(max_length=50) app_secret = models.CharField(max_length=50, null=True, blank=True) page_id = models.CharField(max_length=50, null=True, blank=True) access_token = models.CharField(max_length=300, null=True) page_token = models.CharField(max_length=300, null=True, blank=True) callback_real_time_updates = models.URLField(null=True, blank=True) object_real_time_updates = models.CharField(max_length=100, null=True, blank=True) field_real_time_updates = models.CharField(max_length=50, null=True, blank=True) token_real_time_updates = models.CharField(max_length=100, null=True, editable=False) subscribed_read_time_updates = models.BooleanField(default=False, editable=False) last_real_time_update_sig = models.CharField(max_length=100, null=True, editable=False) batch_requests = models.BooleanField(default=False) max_batch_requests = models.IntegerField(null=True, blank=True) def __unicode__(self): return self.name class ConsultationPlatform(models.Model): name = models.CharField(max_length=50) url = models.URLField(null=True, blank=True) connector = models.OneToOneField(connectors.models.Connector) def __unicode__(self): return self.name class Initiative(models.Model): external_id = models.IntegerField(editable=False) name = models.CharField(max_length=50, editable=False) platform = models.ForeignKey(ConsultationPlatform, editable=False) social_network = models.ManyToManyField(SocialNetworkApp, blank=True) hashtag = models.CharField(unique=True, max_length=14, null=True, help_text="Max length 14 characters (do not include '#')") url = models.URLField(editable=False) users = models.IntegerField(editable=False, default=0) ideas = models.IntegerField(editable=False, default=0) votes = models.IntegerField(editable=False, default=0) comments = models.IntegerField(editable=False, default=0) active = models.BooleanField(default=False) language = models.CharField(max_length=5, default='en', choices=(('en', 'English'), ('es', 'Spanish'), ('it', 'Italian'),)) def __unicode__(self): return self.name class Campaign(models.Model): external_id = models.IntegerField(editable=False) name = models.CharField(max_length=100) initiative = models.ForeignKey(Initiative) hashtag = models.CharField(max_length=14, null=True, help_text="Max length 14 characters (do not include '#')") def __unicode__(self): return self.name class Location(models.Model): country = models.CharField(max_length=50) city = models.CharField(max_length=50) latitude = models.FloatField(null=True) longitude = models.FloatField(null=True) def __unicode__(self): return self.county + ', ' + self.city class Author(models.Model): external_id = models.CharField(max_length=50) screen_name = models.CharField(max_length=100) name = models.CharField(max_length=100, null=True) bio = models.TextField(null=True) language = models.CharField(max_length=10, null=True) location = models.ForeignKey(Location, null=True) zipcode = models.CharField(max_length=10, null=True, blank=True) national_id = models.CharField(max_length=20, null=True, blank=True) address = models.CharField(max_length=200, null=True, blank=True) email = models.EmailField(max_length=254, null=True, blank=True) friends = models.IntegerField(editable=False, default=0) followers = models.IntegerField(editable=False, default=0) groups = models.IntegerField(editable=False, default=0) posts_count = models.IntegerField(editable=False, default=0) url = models.URLField(null=True, blank=True) channel = models.CharField(max_length=50, choices=(('consultation_platform', 'Consultation Platform'), ('social_network', 'Social Network'),)) social_network = models.ForeignKey(SocialNetworkApp, null=True) consultation_platform = models.ForeignKey(ConsultationPlatform, null=True) # Property to save any other information payload = models.TextField(null=True, editable=False) def __unicode__(self): return self.screen_name class BaseContent(models.Model): # Id Fields (after the idea is synchronized in both platforms it will have two ids) sn_id = models.CharField(max_length=100, null=True) cp_id = models.CharField(max_length=100, null=True) # Common Properties datetime = models.DateTimeField(null=True) author = models.ForeignKey(Author) # Context initiative = models.ForeignKey(Initiative) campaign = models.ForeignKey(Campaign) # Original Source source = models.CharField(max_length=50, choices=(('consultation_platform', 'Consultation Platform'), ('social_network', 'Social Network'),)) source_consultation = models.ForeignKey(ConsultationPlatform, null=True) source_social = models.ForeignKey(SocialNetworkApp, null=True) # Property to save any other information payload = models.TextField(null=True, editable=False) # Flags is_new = models.BooleanField(default=True) has_changed = models.BooleanField(default=False) exist = models.BooleanField(default=True) sync = models.BooleanField(default=False) class Meta: abstract = True class Idea(BaseContent): title = models.CharField(max_length=255, null=True) text = models.TextField() url = models.URLField(null=True) location = models.ForeignKey(Location, null=True) # Social Network Metrics re_posts = models.IntegerField(default=0) # e.g. Share in Facebook, RT in Twitter bookmarks = models.IntegerField(default=0) # e.g. Favourite in Twitter # Additional Metric positive_votes = models.IntegerField(default=0) negative_votes = models.IntegerField(default=0) comments = models.IntegerField(default=0) class Comment(BaseContent): text = models.TextField() url = models.URLField(null=True) location = models.ForeignKey(Location, null=True) parent = models.CharField(max_length=10, choices=(('idea','Idea'),('comment','Comment'),)) parent_idea = models.ForeignKey(Idea, null=True) parent_comment = models.ForeignKey('Comment', null=True) # Additional Metric positive_votes = models.IntegerField(default=0) negative_votes = models.IntegerField(default=0) comments = models.IntegerField(default=0) class Vote(BaseContent): value = models.IntegerField(choices=((1,'Positive'), (-1,'Negative'),), default=1) parent = models.CharField(max_length=10, choices=(('idea','Idea'),('comment','Comment'),)) parent_idea = models.ForeignKey(Idea, null=True) parent_comment = models.ForeignKey(Comment, null=True)
Python
0.000003
@@ -810,24 +810,40 @@ , blank=True +, default='page' )%0A field_ @@ -911,24 +911,40 @@ , blank=True +, default='feed' )%0A token_
99b65f7308a4b5719f5cf2e15200767af6780775
deploy keynote images
pytx/files.py
pytx/files.py
import os from django.conf import settings JS_HEAD = [] JS = [ # 'raven.min.js', # 'plugins/vue.min.js', # 'showdown.min.js', 'pytexas.js', ] CSS = [ 'vuetify.min.css', 'global.css', 'pytexas.css', ] IMAGES = [ 'img/atx.svg', 'img/banner80.png', 'img/icon.svg', 'img/icons/about.svg', 'img/icons/blog.svg', 'img/icons/chat.svg', 'img/icons/talks.svg', 'img/icons/community.svg', 'img/icons/sponsors.svg', 'img/icons/venue.svg', 'img/icons/external.svg', 'img/icons/external-white.svg', 'img/icons/background.png', 'img/social/about.me.png', 'img/social/facebook.png', 'img/social/github.png', 'img/social/google.png', 'img/social/linkedin.png', 'img/social/twitter.png', 'img/social/website.png', 'img/apl/library1.png', 'img/apl/library2.png', 'img/keynote/emily.jpg', 'img/keynote/adrienne.jpg', ] FONTS = [ 'Roboto-Regular.woff2', 'Roboto-Bold.woff2', 'Roboto-Slab-Regular.woff2', 'Roboto-Slab-Bold.woff2', 'MaterialIcons-Regular.woff2', ] MD = [] MD_PATH = settings.FRONTEND_MD for root, dirs, files in os.walk(MD_PATH): for f in files: path = os.path.join(root, f) path = path.replace(MD_PATH, '') path = path[1:] MD.append(path) def tpl_files(): tpls = [] base_dir = settings.FRONTEND_TEMPLATES for root, dirs, files in os.walk(base_dir): for file in files: if file.endswith('.html'): fullpath = os.path.join(root, file) relpath = fullpath.replace(base_dir + '/', '') relpath = relpath.replace('/', '-') relpath = relpath[:-5] with open(fullpath, 'r') as fh: tpls.append({'path': relpath, 'content': fh.read()}) return tpls if settings.DEBUG: for i, f in enumerate(JS): if '.min.' in f: JS[i] = f.replace('.min.', '.')
Python
0
@@ -807,127 +807,8 @@ ng', -%0A%0A 'img/apl/library1.png',%0A 'img/apl/library2.png',%0A%0A 'img/keynote/emily.jpg',%0A 'img/keynote/adrienne.jpg', %0A%5D%0A%0A
e9314b02c482314efeb7e36ecf3f6613f9a99adb
fix fetch loop block server
app/server.py
app/server.py
import logging import os import sys import requests from flask import Flask from flask import send_file, send_from_directory from gevent import sleep, spawn from api import load_api from blockchain import web3_client from settings import SOURCE_ROOT from storage import Cache log = logging.getLogger(__name__) console_handler = logging.StreamHandler(sys.stderr) def setup_logging(): root_logger = logging.getLogger() root_logger.addHandler(console_handler) root_logger.setLevel(logging.DEBUG) # Disable requests logging logging.getLogger("requests").propagate = False def setup_routes(app): @app.route('/') def index(): return send_file(os.path.join(SOURCE_ROOT, 'static', 'index.html')) @app.route('/<path:path>') def static_files(path): return send_from_directory(os.path.join(SOURCE_ROOT, 'static'), path) @app.after_request def after_request(response): response.headers.add('Access-Control-Allow-Origin', '*') response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization') response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE') return response def fetch_nodes(): while True: try: nodes = requests.get('http://blockchain.daocloud.io/nodes.json').json() w3 = web3_client() for n in nodes: w3.admin.addPeer(n) log.info('fetched nodes: %s' % ', '.join(nodes)) sleep(30) except Exception: log.error('Fail to fetch nodes.json') sleep(10) def create_app(name=None): setup_logging() app = Flask(name or 'app') app.config.root_path = os.path.dirname(os.path.abspath(__file__)) app.config.from_pyfile('settings.py') Cache.init() load_api(app) setup_routes(app) spawn(fetch_nodes()) return app if __name__ == '__main__': app = create_app() app.run('0.0.0.0', 8000, True, use_reloader=True)
Python
0
@@ -122,40 +122,8 @@ tory -%0Afrom gevent import sleep, spawn %0A%0Afr @@ -1169,24 +1169,111 @@ ch_nodes():%0A + from time import sleep%0A from threading import Thread%0A%0A def fetch_loop():%0A while Tr @@ -1288,13 +1288,21 @@ + try:%0A + @@ -1393,16 +1393,20 @@ + w3 = web @@ -1412,24 +1412,67 @@ b3_client()%0A + peers = w3.admin.peers%0A @@ -1487,16 +1487,20 @@ nodes:%0A + @@ -1527,16 +1527,73 @@ Peer(n)%0A + if not len(peers) == len(nodes):%0A @@ -1661,15 +1661,23 @@ + + sleep( -3 +6 0)%0A @@ -1683,16 +1683,20 @@ + + except E @@ -1705,16 +1705,20 @@ eption:%0A + @@ -1755,24 +1755,28 @@ odes.json')%0A + @@ -1785,10 +1785,80 @@ eep( -10 +5)%0A%0A t = Thread(target=fetch_loop)%0A t.setDaemon(True)%0A t.start( )%0A%0A%0A @@ -2112,14 +2112,8 @@ -spawn( fetc @@ -2121,17 +2121,16 @@ _nodes() -) %0A ret
69f24cd7a1936fb7dc4cfb03e3e97997332f633e
add portforward_get method
akanda/horizon/client.py
akanda/horizon/client.py
Python
0.000001
@@ -0,0 +1,338 @@ +import requests%0A%0Adef portforward_get(request):%0A headers = %7B%0A %22User-Agent%22 : %22python-quantumclient%22,%0A %22Content-Type%22 : %22application/json%22,%0A %22Accept%22 : %22application/json%22,%0A %22X-Auth-Token%22 : request.user.token.id%0A %7D%0A r = requests.get('http://0.0.0.0/v2.0/dhportforward.json', headers=headers)%0A r.json%0A
5ae16add0ad711340d92983814eb4c0eff524933
Fix video regex matching whitespace
alexBot/cogs/video_dl.py
alexBot/cogs/video_dl.py
import logging import re from functools import partial import discord from discord.errors import DiscordException from discord.ext import commands import os import shutil import asyncio import subprocess import math from ..tools import Cog, timing from youtube_dl import YoutubeDL, DownloadError log = logging.getLogger(__name__) REGEXES = [ re.compile(r'https?://vm\.tiktok\.com/.{6,}/'), re.compile(r'https?://(?:w{3}\.)tiktok.com/@.*/video/\d{18,20}\??[a-zA-Z0-9#-_!*\(\),]*'), re.compile(r'https?://(?:v\.)?redd\.it/.{6,}'), re.compile(r'https?://(?:\w{,32}\.)?reddit\.com\/(?:r\/\w+\/)?comments\/.{6,}'), re.compile(r'https?://twitter.com\/[a-zA-Z0-9#-_!*\(\),]{0,20}/status/\d{0,25}\??[a-zA-Z0-9#-_!*\(\),]*'), ] TARGET_SHRINK_SIZE = (8 * 10 ** 6 - 128 * 1000) * 8 # 8 MB - 128 KB in bits MAX_VIDEO_LENGTH = 5 * 60 # 5 Minutes AUDIO_BITRATE = 64 * 1000 # 64 Kbits BUFFER_CONSTANT = 20 # Magic number, see https://unix.stackexchange.com/a/598360 FFPROBE_CMD = 'ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 in.mp4' FFMPEG_CMD = 'ffmpeg -i in.mp4 -y -b:v {0} -maxrate:v {0} -b:a {1} -maxrate:a {1} -bufsize:v {2} {3}.mp4' class NotAVideo(Exception): pass class Video_DL(Cog): active = False encode_lock = asyncio.Lock() # TODO: convert to ~asyncio.Condition() in the future for better responce in emojis? @staticmethod def download_video(url, id): ytdl = YoutubeDL({'outtmpl': f'{id}.mp4'}) try: data = ytdl.extract_info(url, download=True) except DownloadError: raise NotAVideo(False) try: if data['ext'] not in ['mp4', 'gif', 'm4a', 'mov']: raise NotAVideo(data['url']) except KeyError: pass return REGEXES[3].sub('', data['title']) @Cog.listener() async def on_message(self, message: discord.Message): if message.guild is None or message.author == self.bot.user: return if not (await self.bot.db.get_guild_data(message.guild.id)).config.tikTok: return matches = None for regex in REGEXES: matches = regex.match(message.content) if matches: break if matches is None: return match = matches.group(0) log.info(f'collecting {match} for {message.author}') async with message.channel.typing(): try: if match: await message.channel.trigger_typing() try: await message.add_reaction('⌛') except discord.Forbidden: pass task = partial(self.download_video, match, message.id) try: title = await self.bot.loop.run_in_executor(None, task) except NotAVideo as e: if e.args[0]: await message.reply(e, mention_author=False) try: await message.add_reaction('✅') except DiscordException: pass return if os.path.getsize(f'{message.id}.mp4') > 8000000: try: await message.add_reaction('🪄') except discord.Forbidden: pass async with self.encode_lock: task = partial(self.transcode_shrink, message.id) await self.bot.loop.run_in_executor(None, task) # file is MESSAGE.ID.mp4, need to create discord.File and upload it to channel then delete out.mp4 file = discord.File(f'{message.id}.mp4', 'vid.mp4') await message.reply(title, file=file, mention_author=False) try: await message.add_reaction('✅') except DiscordException: pass except Exception as e: log.warn(f'Exception occurred processing video {e}') try: await message.add_reaction('❌') except discord.Forbidden: await message.channel.send('Something broke') finally: await message.remove_reaction('⌛', self.bot.user) if os.path.exists(f'{message.id}.mp4'): os.remove(f'{message.id}.mp4') @staticmethod @timing(log=log) def transcode_shrink(id): shutil.copyfile(f'{id}.mp4', 'in.mp4') os.remove(f'{id}.mp4') try: video_length = float(subprocess.check_output(FFPROBE_CMD.split(' ')).decode("utf-8")) if video_length > MAX_VIDEO_LENGTH: raise commands.CommandInvokeError('Video is too large.') target_total_bitrate = TARGET_SHRINK_SIZE / video_length buffer_size = math.floor(TARGET_SHRINK_SIZE / BUFFER_CONSTANT) target_video_bitrate = target_total_bitrate - AUDIO_BITRATE command_formatted = FFMPEG_CMD.format( str(target_video_bitrate), str(AUDIO_BITRATE), str(buffer_size), str(id) ) subprocess.check_call(command_formatted.split(' ')) except Exception as e: log.warn(f'Exception occurred transcoding video {e}') finally: if os.path.exists('in.mp4'): os.remove('in.mp4') def setup(bot): bot.add_cog(Video_DL(bot))
Python
0.998861
@@ -379,17 +379,37 @@ ok%5C.com/ -. +%5Ba-zA-Z0-9#-_!*%5C(%5C),%5D %7B6,%7D/'), @@ -547,17 +547,37 @@ edd%5C.it/ -. +%5Ba-zA-Z0-9#-_!*%5C(%5C),%5D %7B6,%7D'),%0A @@ -652,17 +652,37 @@ mments%5C/ -. +%5Ba-zA-Z0-9#-_!*%5C(%5C),%5D %7B6,%7D'),%0A
2641b2a1c3d438144191c3a088e2c9b2b777a74c
Implement cloudforms license
awx/main/tests/functional/core/test_licenses.py
awx/main/tests/functional/core/test_licenses.py
# Copyright (c) 2015 Ansible, Inc. # All Rights Reserved. import time import pytest from datetime import datetime from awx.main.models import Host from awx.main.task_engine import TaskEnhancer @pytest.mark.django_db def test_license_writer(inventory, admin): task_enhancer = TaskEnhancer( company_name='acmecorp', contact_name='Michael DeHaan', contact_email='michael@ansibleworks.com', license_date=25000, # seconds since epoch instance_count=500) data = task_enhancer.enhance() Host.objects.bulk_create( [ Host( name='host.%d' % n, inventory=inventory, created_by=admin, modified=datetime.now(), created=datetime.now()) for n in range(12) ] ) assert data['instance_count'] == 500 assert data['contact_name'] == 'Michael DeHaan' assert data['contact_email'] == 'michael@ansibleworks.com' assert data['license_date'] == 25000 assert data['license_key'] == "11bae31f31c6a6cdcb483a278cdbe98bd8ac5761acd7163a50090b0f098b3a13" vdata = task_enhancer.validate_enhancements() assert vdata['available_instances'] == 500 assert vdata['current_instances'] == 12 assert vdata['free_instances'] == 488 assert vdata['date_warning'] is True assert vdata['date_expired'] is True assert vdata['license_date'] == 25000 assert vdata['time_remaining'] < 0 assert vdata['valid_key'] is True assert vdata['compliant'] is False assert vdata['subscription_name'] @pytest.mark.django_db def test_expired_licenses(): task_enhancer = TaskEnhancer( company_name='Tower', contact_name='Tower Admin', contact_email='tower@ansible.com', license_date=int(time.time() - 3600), instance_count=100, trial=True) task_enhancer.enhance() vdata = task_enhancer.validate_enhancements() assert vdata['compliant'] is False assert vdata['grace_period_remaining'] < 0 task_enhancer = TaskEnhancer( company_name='Tower', contact_name='Tower Admin', contact_email='tower@ansible.com', license_date=int(time.time() - 2592001), instance_count=100, trial=False) task_enhancer.enhance() vdata = task_enhancer.validate_enhancements() assert vdata['compliant'] is False assert vdata['grace_period_remaining'] < 0 task_enhancer = TaskEnhancer( company_name='Tower', contact_name='Tower Admin', contact_email='tower@ansible.com', license_date=int(time.time() - 3600), instance_count=100, trial=False) task_enhancer.enhance() vdata = task_enhancer.validate_enhancements() assert vdata['compliant'] is False assert vdata['grace_period_remaining'] > 0
Python
0.000828
@@ -2849,8 +2849,536 @@ g'%5D %3E 0%0A +%0A%0A@pytest.mark.django_db%0Adef test_cloudforms_license(mocker):%0A with mocker.patch('awx.main.task_engine.TaskEnhancer._check_cloudforms_subscription', return_value=True):%0A task_enhancer = TaskEnhancer()%0A vdata = task_enhancer.validate_enhancements()%0A assert vdata%5B'compliant'%5D is True%0A assert vdata%5B'subscription_name'%5D == %22Cloudforms License%22%0A assert vdata%5B'available_instances'%5D == 9999999%0A assert vdata%5B'license_type'%5D == 'enterprise'%0A assert vdata%5B'features'%5D%5B'ha'%5D is True%0A
b8f09b57289a5ae1749a33fa9141bb4b3fb67df9
Add an organisation package to quickstart
quickstart.py
quickstart.py
#!/usr/bin/env python # IATI Data Quality, tools for Data QA on IATI-formatted publications # by Mark Brough, Martin Keegan, Ben Webb and Jennifer Smith # # Copyright (C) 2013 Publish What You Fund # # This programme is free software; you may redistribute and/or modify # it under the terms of the GNU Affero General Public License v3.0 """ This script is to quickly get started with this tool, by: 1) creating DB 2) populating the list of packages from the Registry (will download basic data about all packages) 3) setting 3 to "active" """ #import warnings #warnings.filterwarnings('error') import iatidq import iatidq.dqfunctions import iatidq.dqimporttests import iatidq.dqdownload import iatidq.dqcodelists import iatidq.dqruntests import iatidq.dqindicators import iatidq.dqorganisations import iatidq.dqaggregationtypes import iatidq.dqtests import iatidq.dqprocessing import iatidq.inforesult import iatidq.setup import iatidq.dqregistry as dqregistry import optparse import sys which_packages = [ (u'worldbank-tz', True), (u'unops-tz', True), (u'dfid-tz', True), (u'unitedstates-tz', True) ] def refresh(options): pkg_names = None if options.package_name: pkg_names = [options.package_name] elif options.minimal: pkg_names = [i[0] for i in which_packages] if pkg_names is not None: [ dqregistry.refresh_package_by_name(name) for name in pkg_names ] else: dqregistry.refresh_packages() def activate_packages(options): dqregistry.activate_packages(which_packages, clear_revision_id=True) def drop_all(options): iatidq.db.drop_all() def init_db(options): iatidq.db.create_all() iatidq.dqimporttests.hardcodedTests() def enroll_tests(options): assert options.filename filename = options.filename.decode() result = iatidq.dqimporttests.importTestsFromFile( filename=filename, level=options.level) if not result: print "Error importing" def clear_revisionid(options): iatidq.dqfunctions.clear_revisions() def import_codelists(options): iatidq.dqcodelists.importCodelists() def download(options): if options.minimal: for package_name, _ in which_packages: iatidq.dqdownload.run(package_name=package_name) else: iatidq.dqdownload.run() def import_indicators(options): if options.filename: iatidq.dqindicators.importIndicatorsFromFile("pwyf2013", options.filename) else: iatidq.dqindicators.importIndicators() def import_organisations(options): if options.filename: iatidq.dqorganisations.importOrganisationPackagesFromFile(options.filename) else: print "Error: please provide a filename" def create_aggregation_types(options): iatidq.setup.create_aggregation_types(options) def create_inforesult_types(options): iatidq.setup.create_inforesult_types(options) def enqueue_test(options): assert options.package_name assert options.filename iatidq.dqruntests.enqueue_package_for_test(options.filename, options.package_name) def aggregate_results(options): assert options.runtime_id assert options.package_id iatidq.dqprocessing.aggregate_results(options.runtime_id, options.package_id) def setup(options): iatidq.setup.setup(options) commands = { "drop_db": (drop_all, "Delete DB"), "init_db": (init_db, "Initialise DB"), "enroll_tests": (enroll_tests, "Enroll a CSV file of tests"), "clear_revisionid": (clear_revisionid, "Clear CKAN revision ids"), "import_codelists": (import_codelists, "Import codelists"), "download": (download, "Download packages"), "import_indicators": ( import_indicators, "Import indicators. Will try to assign indicators to existing tests."), "import_organisations": ( import_organisations, "Import organisations. Will try to create and assign organisations " "to existing packages."), "setup": (setup, """Quick setup. Will init db, add tests, add codelists, add indicators, refresh package data from Registry."""), "enqueue_test": (enqueue_test, "Set a package to be tested (with --package)"), "refresh": (refresh, "Refresh"), "activate_packages": (activate_packages, "Mark all packages as active"), "create_aggregation_types": (create_aggregation_types, "Create basic aggregation types."), "aggregate_results": (aggregate_results, "Trigger result aggregation"), "create_inforesult_types": (create_inforesult_types, "Create basic infroresult types.") } def main(): p = optparse.OptionParser() for k, v in commands.iteritems(): handler, help_text = v option_name = "--" + k.replace("_", "-") p.add_option(option_name, dest=k, action="store_true", default=False, help=help_text) p.add_option("--runtime-id", dest="runtime_id", type=int, help="Runtime id (integer)") p.add_option("--package-id", dest="package_id", type=int, help="Package id (integer)") p.add_option("--level", dest="level", type="int", default=1, help="Test level (e.g., 1 == Activity)") p.add_option("--minimal", dest="minimal", action="store_true", default=False, help="Operate on a minimal set of packages") p.add_option("--package", dest="package_name", help="Set name of package to be tested") p.add_option("--filename", dest="filename", help="Set filename of data to test") p.add_option("--local-folder", dest="local_folder", help="Set local folder where data to test is stored") options, args = p.parse_args() for mode, handler_ in commands.iteritems(): handler, _ = handler_ if getattr(options, mode, None): handler(options) return usage() def usage(): print "You need to specify which mode to run under" sys.exit(1) if __name__ == '__main__': main()
Python
0
@@ -1133,24 +1133,49 @@ dstates-tz', + True),%0A (u'dfid-org', True)%0A %5D
db52119430c2d62a82759ada121a5e7d1e1f82ef
Update redis_cola.py
redis_cola.py
redis_cola.py
__author__ = 'mariosky' import redis import os import json HOST = os.environ['REDIS_HOST'] PORT = os.environ['REDIS_PORT'] WORKER_HEARTBEAT_INTERVAL = 1 #Time a worker waits for a Task before unblocking to send a heartbeat #TODO: Connection Exception r = redis.Redis(host=HOST, port=PORT) class Task: def __init__(self, **kwargs): self.id = kwargs['id'] self.method = kwargs.get('method', None) self.params = kwargs.get('params', {}) self.state = kwargs.get('state', 'created') self.expire = kwargs.get('expire', None) self.result = None self.__dict__.update(kwargs) def enqueue(self, app_name): pipe = r.pipeline() if pipe.rpush('%s:task_queue' % app_name, self.id): self.state = 'submitted' message = json.dumps(self.__dict__) pipe.set(self.id, message) pipe.execute() return True else: return False def put_result(self, worker): pipe = r.pipeline() if pipe.zrem('%s:pending_set' % worker.cola.app_name, '%s:%s' % (worker.id, self.id)): self.state = 'completed' message = json.dumps(self.__dict__) pipe.set(self.id, message) pipe.sadd('%s:result_set' % worker.cola.app_name, self.id) pipe.execute() return True else: return None def get_result(self, app_name, as_dict = False): if r.sismember('%s:result_set' % app_name, self.id): _dict = eval(r.get(self.id)) self.__dict__.update(_dict) if as_dict: return self.__dict__ else: return self else: return None def __repr__(self): return self.id +" method:"+ str(self.method) +", params:" + str(self.params) def as_dict(self): return self.__dict__ class Cola: def __init__(self, name): self.app_name = name self.task_counter = self.app_name+':task_counter' self.pending_set = self.app_name+':pending_set' self.task_queue = self.app_name+':task_queue' self.result_set = self.app_name+':result_set' self.worker_set = self.app_name+':worker_set' def initialize(self): r.flushall() r.setnx(self.task_counter,0) def enqueue(self, **kwargs): if kwargs['id'] is None: kwargs['id'] = "%s:task:%s" % (self.app_name, r.incr(self.task_counter)) t = Task(**kwargs) t.enqueue(self.app_name) return kwargs['id'] def get_dead_workers(self): workers = r.smembers(self.worker_set) dead = [] for w in workers: if r.get(w): pass else: r.srem(self.worker_set,w) dead.append(w) return dead def get_workers(self): pattern = '%s:worker:*' % (self.app_name) return r.keys(pattern) @staticmethod def get_all_workers(): pattern = '*:worker:*' return r.keys(pattern) class Worker: def __init__(self, worker_id, cola): self.cola = cola self.id = '%s:worker:%s' % (cola.app_name, worker_id) r.sadd(self.cola.worker_set, self.id) def pull_task(self, time_out=WORKER_HEARTBEAT_INTERVAL): #Pop task from queue #This is a blocking operation #task is a tuple (queue_name, task_id) task = r.blpop(self.cola.task_queue, time_out) if task: #Get Task Details _task = r.get(task[1]) #Get Time_stamp time_stamp =r.time()[0] #Store task in pending_set ordered by time # zadd NOTE: The order of arguments differs from that of the official ZADD command. r.zadd(self.cola.pending_set, '%s:%s' % (self.id, task[1]), time_stamp) # Return a Task object _task = json.loads(_task) return Task(**eval(_task)) #If there is no task to do return None else: return None def send_heartbeat(self, timeout = WORKER_HEARTBEAT_INTERVAL + 12): pipe = r.pipeline() pipe.set(self.id, 1) pipe.expire(self.id, timeout) pipe.execute()
Python
0.000001
@@ -51,16 +51,28 @@ ort json +%0Aimport time %0A%0AHOST = @@ -260,16 +260,18 @@ ception%0A +%0A%0A r = redi @@ -300,16 +300,189 @@ t=PORT)%0A +redis_ready = False%0Awhile not redis_ready:%0A try:%0A redis_ready = r.ping()%0A except:%0A print(%22waiting for redis%22)%0A time.sleep(3)%0A%0Aprint(%22redis alive%22) %0A%0Aclass
ad79fab7d18b1a31ba06f46e91d573dd3898cca2
fix update points
fantasydota/scripts/update_leaderboard_points.py
fantasydota/scripts/update_leaderboard_points.py
import transaction from fantasydota.lib.account import add_achievement, team_swap_all from fantasydota.lib.general import match_link from sqlalchemy import and_ from fantasydota.lib.constants import MULTIPLIER from fantasydota.lib.session_utils import make_session from fantasydota.models import Result, LeagueUser, League, LeagueUserDay, \ TeamHero, Game def add_results_to_user(session, userq, userq_day, new_results, league, team_size, game_id): picks = 0 bans = 0 heroes = [x[0] for x in session.query(TeamHero.hero_id).filter(and_(TeamHero.league == league.id, TeamHero.user_id == userq.user_id)).filter( TeamHero.active.is_(True)).all()] hero_count = len(heroes) match = None for result in new_results: # This check necessary in-case multiple matches come in at once # think I can get away without ordering query by match id # because get_tournament_data never running in parallel # not possible to get mixed results from two matches if match != result.match_id: picks = 0 bans = 0 match = result.match_id if userq.late_start == 0 or (userq.late_start == 2 and userq.late_start_tstamp > result.start_tstamp): if result.hero in heroes: res = result.result_str user_id = userq.user_id if "p" in res: picks += 1 userq.picks += 1 userq_day.picks += 1 if "w" in res: userq.wins += 1 userq_day.wins += 1 if "b" in res: bans += 1 userq.bans += 1 userq_day.bans += 1 if game_id == 1: to_add = league.multiplier * ((0.5 ** (team_size - hero_count)) * Result.result_to_value(res)) elif game_id == 2: to_add = league.multiplier * ((0.5 ** (team_size - hero_count)) * Result.result_to_value_pubg(res)) print "addin %s points to %s" % (to_add, user_id) userq.points += to_add userq_day.points += to_add # Despite looping over all results in match. with equals these can only be awarded once per match if picks == 3: add_achievement(session, 'Three of a Kind', userq.user_id, match_link(match)) if picks + bans == 5: add_achievement(session, 'Full House', userq.user_id, match_link(match)) def update_league_points(session, league): league_id = league.id new_results = session.query(Result).filter(Result.applied == 1). \ filter(Result.tournament_id == league_id).all() for userq in session.query(LeagueUser).filter(LeagueUser.league == league_id).all(): team_size = session.query(Game.team_size).filter(Game.id == league.game).first()[0] game = league.game userq_day = session.query(LeagueUserDay).filter(and_(LeagueUserDay.user_id == userq.user_id, LeagueUserDay.league == userq.league, LeagueUserDay.day == league.current_day )).first() add_results_to_user(session, userq, userq_day, new_results, league, team_size, game) for res in new_results: res.applied = 2 def main(): with transaction.manager: session = make_session() for league in session.query(League).all(): team_swap_all(session, league.id) session.flush() update_league_points(session, league) transaction.commit() if __name__ == "__main__": main()
Python
0
@@ -1268,9 +1268,9 @@ amp -%3E +%3C res
b37814280dc06dbf8aefec4490f6b73a47f05c1a
Simplify python3 unicode fixer and make it replace all occurrences of __unicode__ with __str__.
custom_fixers/fix_alt_unicode.py
custom_fixers/fix_alt_unicode.py
# Taken from jinja2. Thanks, Armin Ronacher. # See also http://lucumr.pocoo.org/2010/2/11/porting-to-python-3-a-guide from lib2to3 import fixer_base from lib2to3.fixer_util import Name, BlankLine class FixAltUnicode(fixer_base.BaseFix): PATTERN = """ func=funcdef< 'def' name='__unicode__' parameters< '(' NAME ')' > any+ > """ def transform(self, node, results): name = results['name'] name.replace(Name('__str__', prefix=name.prefix))
Python
0.000015
@@ -148,55 +148,8 @@ ase%0A -from lib2to3.fixer_util import Name, BlankLine%0A %0A%0Acl @@ -206,240 +206,136 @@ = %22 -%22%22%0A func=funcdef%3C 'def' name='__unicode__'%0A parameters%3C '(' NAME ')' %3E any+ %3E%0A %22%22%22%0A%0A def transform(self, node, results):%0A name = results%5B'name'%5D%0A name.replace(Name('__str__', prefix=name.prefix)) +'__unicode__'%22%0A%0A def transform(self, node, results):%0A new = node.clone()%0A new.value = '__str__'%0A return new %0A
8f4918a63e312309e835c3a9fc0513ddd6b4bbc1
test restore resnet
restore_resnet.py
restore_resnet.py
__author__ = 'Mohammad' import tensorflow as tf sess = tf.Session() #First let's load meta graph and restore weights saver = tf.train.import_meta_graph('data/tensorflow-resnet-pretrained-20160509/ResNet-L152.meta') saver.restore(sess, 'data/tensorflow-resnet-pretrained-20160509/ResNet-L152.ckpt') for i in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='scale5'): print i.name # i.name if you want just a name # # Access saved Variables directly # print(sess.run('bias:0')) # # This will print 2, which is the value of bias that we saved # # # Now, let's access and create placeholders variables and # # create feed-dict to feed new data # # graph = tf.get_default_graph() # w1 = graph.get_tensor_by_name("w1:0") # w2 = graph.get_tensor_by_name("w2:0") # feed_dict ={w1:13.0,w2:17.0} # # #Now, access the op that you want to run. # op_to_restore = graph.get_tensor_by_name("op_to_restore:0") # # print sess.run(op_to_restore,feed_dict) # #This will print 60 which is calculated
Python
0
@@ -652,18 +652,16 @@ data%0A#%0A -# graph = @@ -683,18 +683,16 @@ graph()%0A -# w1 = gra @@ -718,12 +718,16 @@ me(%22 -w1:0 +scale5/x %22)%0A#
caba041d4297cf7c64a6eef50ddc147331092f26
Implement utils.game_state_to_xml() to export a game to XML
fireplace/utils.py
fireplace/utils.py
import os.path from importlib import import_module from pkgutil import iter_modules # Autogenerate the list of cardset modules _cards_module = os.path.join(os.path.dirname(__file__), "cards") CARD_SETS = [cs for _, cs, ispkg in iter_modules([_cards_module]) if ispkg] # Dict of registered custom cards, by id. for @custom_card _custom_cards = {} class CardList(list): def __contains__(self, x): for item in self: if x is item: return True return False def __getitem__(self, key): ret = super().__getitem__(key) if isinstance(key, slice): return self.__class__(ret) return ret def __int__(self): # Used in Kettle to easily serialize CardList to json return len(self) def contains(self, x): "True if list contains any instance of x" for item in self: if x == item: return True return False def index(self, x): for i, item in enumerate(self): if x is item: return i raise ValueError def remove(self, x): for i, item in enumerate(self): if x is item: del self[i] return raise ValueError def exclude(self, *args, **kwargs): if args: return self.__class__(e for e in self for arg in args if e is not arg) else: return self.__class__(e for k, v in kwargs.items() for e in self if getattr(e, k) != v) def filter(self, **kwargs): return self.__class__(e for k, v in kwargs.items() for e in self if getattr(e, k, 0) == v) def random_draft(hero, exclude=[]): """ Return a deck of 30 random cards from the \a hero's collection """ import random from . import cards from .deck import Deck from hearthstone.enums import CardType, Rarity deck = [] collection = [] hero = cards.db[hero] for card in cards.db.keys(): if card in exclude: continue cls = cards.db[card] if not cls.collectible: continue if cls.type == CardType.HERO: # Heroes are collectible... continue if cls.card_class and cls.card_class != hero.card_class: continue collection.append(cls) while len(deck) < Deck.MAX_CARDS: card = random.choice(collection) if card.rarity == Rarity.LEGENDARY and card.id in deck: continue elif deck.count(card.id) < Deck.MAX_UNIQUE_CARDS: deck.append(card.id) return deck def custom_card(cls): _custom_cards[cls.__name__] = cls return cls def get_script_definition(id): """ Find and return the script definition for card \a id """ for cardset in CARD_SETS: module = import_module("fireplace.cards.%s" % (cardset)) if hasattr(module, id): return getattr(module, id)
Python
0.000002
@@ -77,16 +77,89 @@ modules%0A +from xml.etree import ElementTree%0Afrom hearthstone.enums import CardType%0A %0A%0A# Auto @@ -2579,12 +2579,715 @@ module, id)%0A +%0A%0Adef entity_to_xml(entity):%0A%09e = ElementTree.Element(%22Entity%22)%0A%09for tag, value in entity.tags.items():%0A%09%09if value and not isinstance(value, str):%0A%09%09%09te = ElementTree.Element(%22Tag%22)%0A%09%09%09te.attrib%5B%22enumID%22%5D = str(int(tag))%0A%09%09%09te.attrib%5B%22value%22%5D = str(int(value))%0A%09%09%09e.append(te)%0A%09return e%0A%0A%0Adef game_state_to_xml(game):%0A%09tree = ElementTree.Element(%22HSGameState%22)%0A%09tree.append(entity_to_xml(game))%0A%09for player in game.players:%0A%09%09tree.append(entity_to_xml(player))%0A%09for entity in game.all_entities:%0A%09%09if entity.type in (CardType.GAME, CardType.PLAYER):%0A%09%09%09# Serialized those above%0A%09%09%09continue%0A%09%09e = entity_to_xml(entity)%0A%09%09e.attrib%5B%22CardID%22%5D = entity.id%0A%09%09tree.append(e)%0A%0A%09return ElementTree.tostring(tree)%0A
5632447a202ef3a83e5b96d11cbbc653fafac99b
Use os.getlogin to get login user name.
ibus/common.py
ibus/common.py
# vim:set et sts=4 sw=4: # # ibus - The Input Bus # # Copyright (c) 2007-2008 Huang Peng <shawn.p.huang@gmail.com> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the # Free Software Foundation, Inc., 59 Temple Place, Suite 330, # Boston, MA 02111-1307 USA __all__ = ( "IBUS_ADDR", "IBUS_IFACE", "IBUS_NAME", "IBUS_PATH", "IBUS_CONFIG_IFACE", "IBUS_ENGINE_FACTORY_IFACE", "IBUS_ENGINE_IFACE", "IBUS_PANEL_IFACE", "default_reply_handler", "default_error_handler", "DEFAULT_ASYNC_HANDLERS" ) import os import sys import getpass display = os.environ["DISPLAY"] if "." not in display: display += ".0" IBUS_ADDR = "unix:path=/tmp/ibus-%s/ibus-%s" % (getpass.getuser(), display.replace(":", "-")) # IBUS_ADDR = "tcp:host=localhost,port=7799" IBUS_IFACE = "org.freedesktop.IBus" IBUS_PATH = "/org/freedesktop/IBus" IBUS_NAME = "org.freedesktop.IBus" IBUS_CONFIG_IFACE = "org.freedesktop.IBus.Config" IBUS_ENGINE_FACTORY_IFACE = "org.freedesktop.IBus.EngineFactory" IBUS_ENGINE_IFACE = "org.freedesktop.IBus.Engine" IBUS_PANEL_IFACE = "org.freedesktop.IBus.Panel" def default_reply_handler( *args): pass def default_error_handler(e): print >> sys.stderr, e DEFAULT_ASYNC_HANDLERS = { "reply_handler" : default_reply_handler, "error_handler" : default_error_handler }
Python
0
@@ -1184,23 +1184,8 @@ sys -%0Aimport getpass %0A%0Adi @@ -1311,23 +1311,19 @@ %25 ( -getpass.getuser +os.getlogin (),
a97947b3fb718eb764b982e895da7e21ddf6809a
Debug print removed from Package
idl/Package.py
idl/Package.py
from idl.IDLError import IDLError from idl.TypeGetter import TypeGetter class Package(): def __init__(self, env, parent, name): TypeGetter.__init__(self) self._env = env self._parent = parent self._modules = [] self._name = name self._children = [] @property def children(self): ''' List of all children (infinite depth) of this package. ''' packages = [self] res = [] while packages: package = packages.pop(0) res.append(package) packages += package._children return res @property def types(self): ''' List of types exposed by modules contained in this package. ''' res = [] for module in self._modules: res += module.types return res @property def env(self): ''' Parent environment of this package. ''' return self._env @property def modules(self): ''' List of modules contained in this package. ''' return self._modules @property def dependencies(self): ''' List of type dependencies this package is dependant on. ''' res = [] todo = [self] while todo: package = todo.pop(0) print('proc %r' % package.name) for module in package.modules: for i in module.dependencies: if i not in res: res.append(i) todo += package._children return res @property def name(self): ''' Name of this package. ''' return self._name @property def path(self): ''' Path of this package. ''' path = [] package = self while package: if package.parent: path.append(package.name) package = package.parent path.reverse() return path def getModule(self, name): ''' Gets a module object with the given name. ''' for i in self._modules: if i.name == name: return i return None @property def parent(self): ''' Parent package. ''' return self._parent @property def packageStr(self): ''' Package string (e.g. com.example.packge) ''' return '.'.join( self.path ) def getChild(self, arg): ''' Gets a child package by name or path. ''' if isinstance(arg, str): return self.getChildByName(arg) elif isinstance(arg, list): return self.getChildByPath(arg) else: raise RuntimeError('Invalid child search parameter %s' % str(arg)) def isBase(self, path): ''' Checks if given path is in base of this package. e.g. for package 'com.test.package', 'com.test' is in base ''' if len(path) < len(self.path): return False else: return path[:len(self.path)] == self.path def getChildByPath(self, path): ''' Gets a child packge by path ''' # Copy list path = [i for i in path] package = self while path: name = path.pop(0) package = package.getChild(name) if not package: return None return package def getChildByName(self, name): ''' Gets a child package by name. ''' for child in self._children: if child.name == name: return child return None def resolvePath(self, path): ''' Resolves an entity path in the context of this package. ''' # Package package = self.getPackageByPath(path) if package: return package # Type typeObj = self.getTypeByPath(path) if typeObj: return typeObj # It's neither return None def getTypeByPath(self, path): ''' Gets a chlid type by path. ''' if len(path) == 1: # At least two path components necessary (i.e. module.type) return None package = self.getPackageByPath(path[:-1]) if not package: return None else: for typeObj in package.types: if typeObj.name == path[-1]: return typeObj return None def getPackageByPath(self, path): ''' Gets a child package by path. ''' return self.getChildByPath(path) def _createChildTree(self, path): ''' Creates a child tree structure. If a child already exists it simply returns a reference to it. ''' # Copy list path = [i for i in path] package = self while path: name = path.pop(0) newPackage = package.getChild(name) if not newPackage: newPackage = package._createChildPackage(name) package = newPackage return package def _addModule(self, module): ''' Adds a new module to the list of modules. ''' # Duplicate name check for i in self._modules: if i.name == module.name: raise IDLError('Module named %r already exists in package %r' % (module.name, self.packageStr)) module._setPackage(self) self._modules.append( module ) def _createChildPackage(self, name): ''' Creates a child package with a given name. ''' package = Package(self.env, self, name) if self.getChild(name): raise RuntimeError('Package with name %r already exists' % name) self._children.append( package ) return package
Python
0
@@ -1543,65 +1543,8 @@ %0A - print('proc %25r' %25 package.name)%0A %0A
b08a8fa6132d3533421088f617342abd094187be
make test runs independent of folder possible
scarce/testing/tools.py
scarce/testing/tools.py
import inspect import os import tables as tb from collections import OrderedDict import numpy as np import itertools FIXTURE_FOLDER = 'fixtures' def _call_function_with_args(function, **kwargs): ''' Calls the function with the given kwargs and returns the result in a numpy array ''' # Create all combinations of arguments from list parameters # This is ugly but avoids recursion call_values = [] fixed_arguments = [] fixed_arguments_pos = [] for index, values in enumerate(kwargs.values()): if isinstance(values, list): call_values.extend([values]) else: fixed_arguments.append(values) fixed_arguments_pos.append(index) call_values = list(itertools.product(*call_values)) data = [] # Call functions with all parameter combinations for call_value in call_values: actual_call_value = list(call_value) for index, fixed_arg_pos in enumerate(fixed_arguments_pos): actual_call_value.insert(fixed_arg_pos, fixed_arguments[index]) call_args = {key: value for key, value in zip(kwargs.keys(), actual_call_value)} data.append(function(**call_args)) return data def create_fixture(function, **kwargs): ''' Calls the function with the given kwargs values and stores the result. Numpy arrays are given as one parameter, lists parameters are looped with repeated function calls. ''' # Check if all parameters are defined func_args = inspect.getargspec(function)[0] if not all([a in kwargs for a in func_args]): raise RuntimeError('Not all function arguments values defined') data = _call_function_with_args(function, **kwargs) # Store function return values in compressed pytable array data = np.array(data) with tb.open_file(os.path.join(FIXTURE_FOLDER, '%s.h5' % str(function.__name__)), 'w') as out_file: data_array = out_file.create_carray(out_file.root, name='Data', title='%s return values' % function.__name__, atom=tb.Atom.from_dtype(data.dtype), shape=data.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False)) data_array[:] = data def check_with_fixture(function, **kwargs): ''' Calls the function with the given kwargs values and compares the result with the fixture. Numpy arrays are given as one parameter, lists parameters are looped with repeated function calls. ''' with tb.open_file(os.path.join(FIXTURE_FOLDER, '%s.h5' % str(function.__name__)), 'r') as in_file: data_fixture = in_file.root.Data[:] data = np.array(_call_function_with_args(function, **kwargs)) return np.allclose(data_fixture, data)
Python
0
@@ -42,44 +42,8 @@ tb%0A -from collections import OrderedDict%0A impo @@ -79,26 +79,203 @@ ls%0A%0A -FIXTURE_FOLDER = ' +import scarce%0A%0A# Get package path%0Apackage_path = os.path.dirname(scarce.__file__) # Get the absoulte path of the online_monitor installation%0AFIXTURE_FOLDER = os.path.join(package_path, 'testing/ fixt @@ -279,16 +279,18 @@ ixtures' +)%0A %0A%0Adef _c @@ -897,29 +897,25 @@ ll_values))%0A - %0A + data = %5B @@ -916,21 +916,17 @@ ta = %5B%5D%0A - %0A + # Ca @@ -1326,25 +1326,17 @@ _args))%0A - %0A + retu @@ -1464,21 +1464,17 @@ result.%0A - %0A + Nump @@ -1793,21 +1793,17 @@ fined')%0A - %0A + data @@ -1854,17 +1854,9 @@ gs)%0A - %0A + @@ -2115,18 +2115,18 @@ ='Data', - %0A + @@ -2242,18 +2242,18 @@ .dtype), - %0A + @@ -2546,21 +2546,17 @@ ixture.%0A - %0A + Nump @@ -2810,21 +2810,17 @@ Data%5B:%5D%0A - %0A + data @@ -2877,20 +2877,16 @@ wargs))%0A - %0A ret
eb4cda636a0b0ceb5312b161e97ae5f8376c9f8e
Change biolookup test to work around service bug
indra/tests/test_biolookup_client.py
indra/tests/test_biolookup_client.py
from indra.databases import biolookup_client def test_lookup_curie(): curie = 'pubchem.compound:40976' res = biolookup_client.lookup_curie(curie) assert res['name'] == '(17R)-13-ethyl-17-ethynyl-17-hydroxy-11-' \ 'methylidene-2,6,7,8,9,10,12,14,15,16-decahydro-1H-' \ 'cyclopenta[a]phenanthren-3-one', res def test_lookup(): res = biolookup_client.lookup('FPLX', 'ERK') assert res['name'] == 'ERK', res def test_get_name(): res = biolookup_client.get_name('CHEBI', 'CHEBI:408174') assert res == 'arformoterol', res
Python
0
@@ -389,19 +389,20 @@ up(' -FPLX', 'ERK +HGNC', '1097 ')%0A @@ -431,11 +431,12 @@ == ' -ERK +BRAF ', r
563cd9183231f0087ace974bb67bd1123ba93bb2
label is singlar
notes/tests.py
notes/tests.py
import datetime from django.core.urlresolvers import reverse from django.utils import timezone from django.test import TestCase from .models import Note, Label class NoteMethodTests(TestCase): def test_was_published_recently_with_future_note(self): """ was_published_recently() should return False for notes whose pub_date is in the future. """ time = timezone.now() + datetime.timedelta(days=30) future_note = Note(pub_date=time) self.assertEqual(future_note.was_published_recently(), False) def test_was_published_recently_with_old_note(self): """ was_published_recently() should return False for notes whose pub_date is older than one day. """ time = timezone.now() - datetime.timedelta(days=30) old_note = Note(pub_date=time) self.assertEqual(old_note.was_published_recently(), False) def test_was_published_recently_with_recent_note(self): """ was_published_recently() should return True for notes whose pub_date is within the last day. """ time = timezone.now() - datetime.timedelta(hours=5) recent_note = Note(pub_date=time) self.assertEqual(recent_note.was_published_recently(), True) class NoteIndexViewTests(TestCase): def test_index_view_with_no_notes(self): """ If no notes exist, an appropriate message should be displayed. """ response = self.client.get(reverse('notes:index')) self.assertEqual(response.status_code, 200) self.assertContains(response, "No notes are available.") self.assertQuerysetEqual(response.context['latest_notes_list'], []) def test_index_view_with_future_note(self): """ Index page should display note if it was published in the past """ create_note(note_title="You will read this.", days=30) response = self.client.get(reverse('notes:index')) self.assertEqual(response.status_code, 200) self.assertQuerysetEqual(response.context['latest_notes_list'], []) def test_index_view_with_past_note(self): """ Index page should display note if it was published in the past """ create_note(note_title="Something about the past.", days=-30) response = self.client.get(reverse('notes:index')) self.assertEqual(response.status_code, 200) self.assertQuerysetEqual( response.context['latest_notes_list'], ['<Note: Something about the past.>'] ) def test_index_view_with_past_and_future_notes(self): """ Index page should display note if it was published in the past """ create_note(note_title="Something about the past.", days=-30) create_note(note_title="A future note.", days=30) response = self.client.get(reverse('notes:index')) self.assertEqual(response.status_code, 200) self.assertQuerysetEqual( response.context['latest_notes_list'], ['<Note: Something about the past.>'] ) def test_index_view_with_two_past_notes(self): """ Index page should display note if it was published in the past """ create_note(note_title="Thing 1", days=-2) create_note(note_title="Thing 2", days=-3) response = self.client.get(reverse('notes:index')) self.assertEqual(response.status_code, 200) self.assertQuerysetEqual( response.context['latest_notes_list'], ['<Note: Thing 1>', '<Note: Thing 2>'] ) class NoteDetailViewTests(TestCase): def test_detail_view_not_found(self): """ The detail view of a note whose pub_date is in the future should return a 404 """ future_note = create_note(note_title="You're in the future, man!", days=30) response = self.client.get(reverse('notes:detail', args=(future_note.id,))) self.assertEqual(response.status_code, 404) def test_detail_view_with_past_note(self): """ The detail view of a note whose pub_date is in the past should display its title and text """ past_note = create_note(note_title="Hogwarts, a history", days=-5) response = self.client.get(reverse('notes:detail', args=(past_note.id,))) self.assertEqual(response.status_code, 200) self.assertContains(response, past_note.note_title, status_code=200) class NoteLabelListViewTests(TestCase): def test_label_view_not_found(self): """ If given label doesn't exist return a 404 """ response = self.client.get(reverse('notes:label', args=("DoesntExist",))) self.assertEqual(response.status_code, 404) def test_label_view_no_notes_found(self): """ If there aren't any notes associated with a certain label an appropriate message should be displayed. """ new_label = create_label("Journal") response = self.client.get(reverse('notes:label', args=(new_label.text,))) self.assertEqual(response.status_code, 200) self.assertContains(response, "No notes are available.") self.assertQuerysetEqual(response.context['latest_notes_list'], []) def test_label_view_past_notes_found(self): """ The list view of notes associated with a given label """ new_label = create_label(label_text="Journal") past_note_one = create_note_with_labels(note_title="Dear diary", days=-5, label=new_label.id) past_note_two = create_note_with_labels(note_title="February 24th, 2016", days=-4, label=new_label.id) response = self.client.get(reverse('notes:label', args=(new_label.text,))) self.assertContains(response, past_note_one.note_title, status_code=200) self.assertContains(response, past_note_two.note_title, status_code=200) self.assertQuerysetEqual(response.context['latest_notes_list'], ['<Note: February 24th, 2016>', '<Note: Dear diary>']) def create_note(note_title, days): """ Creates a Note with the given `note_title` and published the given number of `days` offset to now (negative for notes published in the past, positive for notes that have yet to be published). """ time = timezone.now() + datetime.timedelta(days=days) return Note.objects.create(note_title=note_title, pub_date=time) def create_note_with_labels(note_title, days, label): """ Creates a Note with the given `note_title`, associated with given `label`, and published the given number of `days` offset to now (negative for notes published in the past, positive for notes that have yet to be published). """ time = timezone.now() + datetime.timedelta(days=days) note = Note.objects.create(note_title=note_title, pub_date=time) note.labels.add(label) return note def create_label(label_text): """ Creates a Label with the given `text` """ return Label.objects.create(text=label_text)
Python
0.999981
@@ -5066,33 +5066,32 @@ _note_with_label -s (note_title=%22Dea @@ -5163,33 +5163,32 @@ _note_with_label -s (note_title=%22Feb @@ -5987,17 +5987,16 @@ th_label -s (note_ti
0c0c20229d91e183af61c2e243f50054336520f2
Handle title and alignment in reporter.
indra/tools/reading/util/reporter.py
indra/tools/reading/util/reporter.py
from reportlab.lib.enums import TA_JUSTIFY from reportlab.lib.pagesizes import letter from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Image from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle from reportlab.lib.units import inch class Reporter(object): def __init__(self, name): self.styles = getSampleStyleSheet() self.styles.add(ParagraphStyle(name="Justify", alignment=TA_JUSTIFY)) self.story = [] self.name = name self.sections = {} return def add_section(self, section_name): if section_name in self.sections: raise ValueError("Section %s already exists." % section_name) self.sections[section_name] = [] def make_report(self, sections_first=True, section_header_params=None): full_story = [] # Set the default section header parameters if section_header_params is None: section_header_params = {'style': 'h1', 'fontsize': 14} # Merge the sections and the rest of the story. if sections_first: full_story += self._make_sections(**section_header_params) full_story += self.story else: full_story += self.story full_story += self._make_sections(**section_header_params) doc = SimpleDocTemplate(self.name + '.pdf', pagesize=letter, rightMargin=72, leftMargin=72, topMargin=72, bottomMargin=18) doc.build(full_story) return doc def _make_sections(self, **section_hdr_params): sect_story = [] for section_name, section_story in self.sections.items(): title, title_sp = self._preformat_text(section_name + '-'*40, **section_hdr_params) sect_story += [title, title_sp] + section_story return sect_story def _preformat_text(self, text, style='Normal', space=None, fontsize=12): if space is None: space=(1,12) ptext = '<font size=%d>%s</font>' % (fontsize, text) para = Paragraph(ptext, self.styles[style]) sp = Spacer(*space) return para, sp def add_story_text(self, text, *args, **kwargs): # Pull down some kwargs. section_name = kwargs.pop('section', None) # Actually do the formatting. para, sp = self._preformat_text(text, *args, **kwargs) # Select the appropriate list to update if section_name is None: relevant_list = self.story else: relevant_list = self.sections[section_name] # Add the new content to list. relevant_list.append(para) relevant_list.append(sp) return def add_story_image(self, image_path, width=None, height=None, section=None): if width is not None: width = width*inch if height is not None: height = height*inch im = Image(image_path, width, height) if section is None: self.story.append(im) else: self.sections[section].append(im)
Python
0
@@ -504,21 +504,122 @@ elf. -sections = %7B%7D +title = name%0A self.sections = %7B%7D%0A return%0A%0A def set_title(self, title):%0A self.title = title %0A @@ -927,18 +927,149 @@ story = -%5B%5D +list(self._preformat_text(self.title, style='Title',%0A fontsize=18, alignment='center')) %0A%0A @@ -1222,16 +1222,76 @@ ize': 14 +,%0A 'alignment': 'center' %7D%0A%0A @@ -1986,16 +1986,114 @@ tems():%0A + line = '-'*20%0A section_head_text = '%25s %25s %25s' %25 (line, section_name, line)%0A @@ -2147,21 +2147,17 @@ ion_ -name + '-'*40 +head_text ,%0A @@ -2389,16 +2389,58 @@ tsize=12 +,%0A alignment='left' ):%0A @@ -2505,37 +2505,96 @@ t = +( '%3C -font size=%25d%3E%25s%3C/font%3E' %25 ( +para alignment=%5C%22%25s%5C%22%3E%3Cfont size=%25d%3E%25s%3C/font%3E%3C/para%3E'%0A %25 (alignment, font @@ -2604,16 +2604,17 @@ e, text) +) %0A
d65abc2f75f327508683ee5329aa49aff61f702f
use sys.stdout.isatty instead of os.isatty
nutils/core.py
nutils/core.py
# -*- coding: utf8 -*- # # Module CORE # # Part of Nutils: open source numerical utilities for Python. Jointly developed # by HvZ Computational Engineering, TU/e Multiscale Engineering Fluid Dynamics, # and others. More info at http://nutils.org <info@nutils.org>. (c) 2014 """ The core module provides a collection of low level constructs that have no dependencies on other nutils modules. Primarily for internal use. """ import sys, functools, os globalproperties = { 'nprocs': 1, 'outrootdir': '~/public_html', 'outdir': '.', 'verbose': 4, 'richoutput': os.isatty( sys.stdout.fileno() ), 'htmloutput': True, 'pdb': False, 'imagetype': 'png', 'symlink': False, 'recache': False, 'dot': False, 'profile': False, 'selfcheck': False, } if os.access( '/run/shm', os.W_OK ): globalproperties['shmdir'] = '/run/shm' for nutilsrc in ['~/.config/nutils/config', '~/.nutilsrc']: nutilsrc = os.path.expanduser( nutilsrc ) if not os.path.isfile( nutilsrc ): continue try: with open(nutilsrc) as rc: exec( rc.read(), {}, globalproperties ) except: exc_value, frames = sys.exc_info() exc_str = '\n'.join( [ repr(exc_value) ] + [ str(f) for f in frames ] ) print( 'Skipping .nutilsrc: {}'.format(exc_str) ) break _nodefault = object() def getprop( name, default=_nodefault, frame=None ): """Access a semi-global property. The use of global variables is discouraged, as changes can go unnoticed and lead to abscure failure. The getprop mechanism makes local variables accesible (read only) from nested scopes, but not from the encompassing scope. >>> def f(): >>> print getprop('myval') >>> >>> def main(): >>> __myval__ = 2 >>> f() Args: name (str): Property name, corresponds to __name__ local variable. default: Optional default value. Returns: The object corresponding to the first __name__ encountered in a higher scope. If none found, return default. If no default specified, raise NameError. """ key = '__%s__' % name if frame is None: frame = sys._getframe(1) while frame: if key in frame.f_locals: return frame.f_locals[key] frame = frame.f_back if name in globalproperties: return globalproperties[name] if default is _nodefault: raise NameError( 'property %r is not defined' % name ) return default def index( items ): """Index of the first nonzero item. Args: items: Any iterable object Returns: The index of the first item for which bool(item) returns True. """ for i, item in enumerate(items): if item: return i raise ValueError def single_or_multiple( f ): """ Method wrapper, converts first positional argument to tuple: tuples/lists are passed on as tuples, other objects are turned into tuple singleton. Return values should match the length of the argument list, and are unpacked if the original argument was not a tuple/list. >>> class Test: >>> @single_or_multiple >>> def square( args ): >>> return [ v**2 for v in args ] >>> >>> T = Test() >>> a = T.square( 2 ) # 4 >>> a, b = T.square( [2,3] ) # (4,9) Args: f: Method that expects a tuple as first positional argument, and that returns a list/tuple of the same length. Returns: Wrapped method. """ @functools.wraps( f ) def wrapped( self, arg0, *args, **kwargs ): ismultiple = isinstance( arg0, (list,tuple) ) arg0mod = tuple(arg0) if ismultiple else (arg0,) retvals = f( self, arg0mod, *args, **kwargs ) if not ismultiple: retvals, = retvals return retvals return wrapped # vim:shiftwidth=2:softtabstop=2:expandtab:foldmethod=indent:foldnestmax=2
Python
0.000003
@@ -566,19 +566,8 @@ ut': - os.isatty( sys @@ -578,17 +578,15 @@ out. -fileno() +isatty( ),%0A
fbbad4269e28d7763e79a52104566f53100323a8
Allow any iterable to be handled as Component.
astm/codec.py
astm/codec.py
# -*- coding: utf-8 -*- # # Copyright (C) 2012 Alexander Shorin # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. # STX = '\x02' ETX = '\x03' EOT = '\x04' ENQ = '\x05' ACK = '\x06' LF = '\x0A' CR = '\x0D' NAK = '\x15' CRLF = CR + LF RECORD_SEP = '\x0D' # \r FIELD_SEP = '\x7C' # | REPEAT_SEP = '\x5C' # \ COMPONENT_SEP = '\x5E' # ^ ESCAPE_SEP = '\x26' # & def decode(data): """Common ASTM decoding function that tries to guess which kind of data it handles. If `data` starts with STX character (``0x02``) than probably it is full ASTM message with checksum and other system characters. If `data` starts with digit character (``0-9``) than probably it is frame of records leading by his sequence number. No checksum is expected in this case. Otherwise it counts `data` as regular record structure. :param data: ASTM data object. :type data: str :return: List of ASTM records. :rtype: list """ if data[0] == STX: # may be decode message \x02...\x03CS\r\n seq, records, cs = decode_message(data) return records if data[0].isdigit(): # may be decode frame \d... seq, records = decode_frame(data) return records return decode_record(data) def decode_message(message): """Decodes complete ASTM message that is sended or received due communication routines. It should contains checksum that would be additionally verified. :param message: ASTM message. :type message: str :returns: Tuple of three elements: * :class:`int` frame sequence number. * :class:`list` of records. * :class:`str` checksum. :raises: * :exc:`ValueError` if ASTM message is malformed. * :exc:`AssertionError` if checksum verification fails. """ if not (message[0] == STX and message[-2:] == CRLF): raise ValueError('Malformed ASTM message. Expected that it will started' ' with %x and followed by %x%x characters. Got: %r' ' ' % (ord(STX), ord(CR), ord(LF), message)) stx, frame_cs = message[0], message[1:-2] frame, cs = frame_cs[:-2], frame_cs[-2:] ccs = make_checksum(frame) assert cs == ccs, 'Checksum failure: expected %r, calculated %r' % (cs, ccs) seq, records = decode_frame(frame) return seq, records, cs def decode_frame(frame): """Decodes ASTM frame: list of records followed by sequence number.""" if not frame[0].isdigit(): raise ValueError('Malformed ASTM frame. Expected leading seq number %r' '' % frame) if frame.endswith(CR + ETX): frame = frame[:-2] seq, records = int(frame[0]), frame[1:] return seq, [decode_record(record) for record in records.split(RECORD_SEP)] def decode_record(record): """Decodes ASTM record message.""" fields = [] for item in record.split(FIELD_SEP): if REPEAT_SEP in item: item = decode_repeated_component(item) elif COMPONENT_SEP in item: item = decode_component(item) fields.append(item) return fields def decode_component(field): """Decodes ASTM field component.""" return field.split(COMPONENT_SEP) def decode_repeated_component(component): """Decodes ASTM field repeated component.""" return [decode_component(item) for item in component.split(REPEAT_SEP)] def encode(records): """Encodes list of records into single ASTM message. If you need to get each record as standalone message use :func:`iter_encode` instead. :param records: List of ASTM records. :type records: list :return: ASTM complete message with checksum and other control characters. :rtype: str """ return encode_message(1, records) def iter_encode(records): """Emits sequential ASTM messages for single package. :yields: ASTM complete message with """ for idx, record in enumerate(records): yield encode_message(idx + 1, [record]) def encode_message(seq, records): """Encodes ASTM message. :param seq: Frame sequence number. :type seq: int :param records: List of ASTM records. :type records: list :return: ASTM complete message with checksum and other control characters. :rtype: str """ data = RECORD_SEP.join(encode_record(record) for record in records) data = ''.join((str(seq), data , CR, ETX)) return ''.join([STX, data, make_checksum(data), CR, LF]) def encode_record(record): """Encodes single ASTM record. :param record: ASTM record. Each :class:`str`-typed item counted as field value, one level nested :class:`list` counted as components and second leveled - as repeated components. :type record: list :returns: Encoded ASTM record. :rtype: str """ fields = [] _append = fields.append for field in record: if field is None: _append('') elif isinstance(field, (list, tuple)): _append(encode_component(field)) else: _append(field) return FIELD_SEP.join(fields) def encode_component(component): """Encodes ASTM record field components.""" items = [] _append = items.append for item in component: if isinstance(item, (list, tuple)): return encode_repeated_component(component) elif item is None: _append('') else: _append(item) return COMPONENT_SEP.join(items).rstrip(COMPONENT_SEP) def encode_repeated_component(components): """Encodes repeated components.""" return REPEAT_SEP.join(encode_component(item) for item in components) def make_checksum(message): """Calculates checksum for specified message. :param message: ASTM message. :type message: str :returns: Checksum value that is actually byte sized integer in hex base :rtype: str """ return hex(sum(ord(i) for i in message) & 0xFF)[2:].upper().zfill(2)
Python
0
@@ -210,16 +210,50 @@ ion.%0A#%0A%0A +from collections import Iterable%0A%0A STX = '%5C @@ -5060,21 +5060,37 @@ if -field is None +isinstance(field, basestring) :%0A @@ -5099,34 +5099,37 @@ _append( -'' +field )%0A elif i @@ -5145,30 +5145,25 @@ (field, -(list, tup +Iterab le) -) :%0A @@ -5203,33 +5203,47 @@ eld))%0A el -s +if field is Non e:%0A _ @@ -5245,30 +5245,77 @@ _append( +'')%0A else:%0A _append(unicode( field) +) %0A return @@ -5520,22 +5520,86 @@ em, -(list, tup +basestring):%0A _append(item)%0A elif isinstance(item, Iterab le) -) :%0A @@ -5733,29 +5733,39 @@ _append( +unicode( item) +)%0A %0A return
28c0e714149839351a6b67a504356f223563cb2f
add mode
obztak/auto.py
obztak/auto.py
#!/usr/bin/env python """ Base class for calling obztak through the AUTOOBS named pipe mechanism. Based on the Scheduler class by Eric Neilsen TODO: - Replace ConfigParser with Yaml for consistency """ import os from datetime import datetime, timedelta import logging import subprocess from ConfigParser import ConfigParser import json import tempfile from obztak.field import SISPI_DICT class AutoObz(object): """ Automated obztak scheduler """ def __init__(self, config_fname): self.configure(config_fname) def configure(self, config_fname): """Configure the auto object""" config = ConfigParser() config.read(config_fname) self.stale_time_delta = timedelta(0,config.getfloat('timeouts', 'fifo')) self.output_fname = config.get('paths', 'outbox') self.queue_fname = config.get('paths', 'current_queue') self.previous_queue_fname = config.get('paths', 'previous_queue') self.in_progress_fname = config.get('paths', 'inprogress') self.fifo_fname = config.get('paths', 'fifo') self.chunk = 10 self.min_queue_len = 30 self.min_queue_time = 70 def make_script(self): """Create the observing script""" with open(self.queue_fname, 'r') as fp: sispi_queue = json.load(fp) logging.debug("Found %d exposure(s) on the SISPI/OCS queue." % len(sispi_queue)) with open(self.in_progress_fname, 'r') as fp: in_progress = json.load(fp) for exp in in_progress: if exp is None: continue for k,v in SISPI_DICT.items(): exp.setdefault(k,v) exp['date'] = datetime.utcnow().strftime('%Y/%m/%d %H:%M:%S') tmp = tempfile.NamedTemporaryFile(suffix='.json') json.dump(in_progress, tmp) tmp.flush() # Generic time for in-progress exposure delay = (90 + 30)*len(in_progress) # Sum the exposure times for the current queue delay += sum([q['expTime']+30 for q in sispi_queue]) logging.debug("Total queue time: %g seconds"%delay) # If we don't want to add anything, return an empty list if len(sispi_queue) >= self.min_queue_len or delay/60. >= self.min_queue_time: logging.info("Queue contains %d exposures and a runtime of %d minutes; waiting..." % (len(sispi_queue),delay//60)) # Add an empty script so AUTOOBS knows the scheduler "passed" with open(self.output_fname, 'w') as fp: json.dump([], fp, indent=4) os.chmod(self.output_fname, 0o666) return logging.info(datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')) start = datetime.utcnow() + timedelta(seconds=delay) utc = start.strftime('%Y-%m-%dT%H:%M:%S') params = dict(utc=utc,output=self.output_fname,chunk=self.chunk, current=self.queue_fname, previous=self.previous_queue_fname, progress=tmp.name) # Schedule the next chunk of exposures cmd = "schedule_chunk -k %(chunk)i --utc %(utc)s -o %(output)s"%params cmd += " -c %(progress)s"%params # need to be first (hacked) cmd += " -c %(previous)s -c %(current)s"%params #cmd += " -m wide" # hardcoded for now logging.info(cmd) # Generate the script logging.info("Calling scheduler") subprocess.check_call(cmd, shell=True) def __call__(self): """Execute the loop to check the fifo""" logging.info("Scheduler starting") while True: # open block until something is sent to the fifo # (should by sent by obstac) logging.info("Waiting for AUTOOBS") with open(self.fifo_fname, 'r') as fp: time_string = fp.readline().strip() logging.info("Triggered by AUTOOBS") if len(time_string) == 0: continue try: queue_time = datetime.strptime(time_string,'%Y-%m-%d %H:%M:%S') except ValueError: logging.info("Invalid marker in FIFO: %s" % time_string) continue marker_age = datetime.now()-queue_time if marker_age > self.stale_time_delta: logging.info("FIFO has time %s, more than %s ago; not calling scheduler"% (time_string, str(self.stale_time_delta))) continue new_sispi_script = self.make_script()
Python
0.000001
@@ -1106,16 +1106,41 @@ nk = 10%0A + self.mode = None%0A @@ -3084,24 +3084,40 @@ ess=tmp.name +, mode=self.mode )%0A%0A # @@ -3370,26 +3370,49 @@ -#cmd += %22 -m wide%22 +if self.mode: cmd += %22 -m %25(mode)%22%25params # h @@ -3428,16 +3428,17 @@ for now%0A +%0A
7fd2060f2241bcff6849d570406dc057b9c7f8d1
Fix the message string
satchless/cart/views.py
satchless/cart/views.py
# -*- coding: utf-8 -*- from django.contrib import messages from django.shortcuts import get_object_or_404, redirect from django.template.response import TemplateResponse from django.utils.translation import ugettext from django.views.decorators.http import require_POST from . import models from . import forms def cart(request, typ, form_class=forms.EditCartItemForm): cart = models.Cart.objects.get_or_create_from_request(request, typ) cart_item_forms = [] for item in cart.items.all(): form = form_class(data=request.POST or None, instance=item, prefix='%s-%i'%(typ, item.id)) if request.method == 'POST' and form.is_valid(): messages.success(request, ugettext("Cart's content updated successfully.")) form.save() return redirect(request.get_full_path()) cart_item_forms.append(form) templates = [ 'satchless/cart/%s/view.html' % typ, 'satchless/cart/view.html' ] return TemplateResponse(request, templates, { 'cart': cart, 'cart_item_forms': cart_item_forms, }) @require_POST def remove_item(request, typ, item_pk): cart = models.Cart.objects.get_or_create_from_request(request, typ) item = get_object_or_404(cart.items, pk=item_pk) cart.set_quantity(item.variant, 0) return redirect('satchless-cart-view', typ=typ)
Python
1
@@ -209,16 +209,21 @@ ugettext + as _ %0Afrom dj @@ -756,32 +756,23 @@ -ugettext +_ (%22Cart -'s content upd @@ -767,16 +767,22 @@ content +s were updated
80691fa6d517b39a6656a2afc0635f485fd49974
add dependencies (#18406)
var/spack/repos/builtin/packages/gconf/package.py
var/spack/repos/builtin/packages/gconf/package.py
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Gconf(AutotoolsPackage): """GConf is a system for storing application preferences.""" homepage = "https://projects.gnome.org/gconf/" url = "http://ftp.gnome.org/pub/gnome/sources/GConf/3.2/GConf-3.2.6.tar.xz" version('3.2.6', sha256='1912b91803ab09a5eed34d364bf09fe3a2a9c96751fde03a4e0cfa51a04d784c') depends_on('glib@2.14.0:') depends_on('libxml2') # TODO: add missing dependencies # gio-2.0 >= 2.31.0 # gthread-2.0 # gmodule-2.0 >= 2.7.0 # gobject-2.0 >= 2.7.0 # dbus-1 >= 1.0.0 # dbus-glib-1 >= 0.74
Python
0.000001
@@ -540,24 +540,66 @@ a04d784c')%0A%0A + depends_on('pkgconfig', type='build')%0A depends_ @@ -647,186 +647,137 @@ 2')%0A -%0A -# TODO: add missing dependencies%0A # gio-2.0 %3E= 2.31.0%0A # gthread-2.0%0A # gmodule-2.0 %3E= 2.7.0%0A # gobject-2.0 %3E= 2.7.0%0A # dbus-1 %3E= 1.0.0%0A # dbus-glib-1 %3E= 0.74 +depends_on('dbus')%0A depends_on('dbus-glib')%0A depends_on('orbit2')%0A depends_on('perl-xml-parser', type=('build', 'run')) %0A
bd0960cda8a66b843035935c7caa9f20b38b4d0d
Add 0.16.0 and address test suite issues (#27604)
var/spack/repos/builtin/packages/gpgme/package.py
var/spack/repos/builtin/packages/gpgme/package.py
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Gpgme(AutotoolsPackage): """GPGME is the standard library to access GnuPG functions from programming languages.""" homepage = "https://www.gnupg.org/software/gpgme/index.html" url = "https://www.gnupg.org/ftp/gcrypt/gpgme/gpgme-1.12.0.tar.bz2" executables = ['^gpgme-config$'] version('1.12.0', sha256='b4dc951c3743a60e2e120a77892e9e864fb936b2e58e7c77e8581f4d050e8cd8') depends_on('gnupg', type='build') depends_on('libgpg-error', type='build') depends_on('libassuan', type='build') @classmethod def determine_version(cls, exe): return Executable(exe)('--version', output=str, error=str).rstrip()
Python
0
@@ -475,17 +475,17 @@ pgme-1.1 -2 +6 .0.tar.b @@ -527,16 +527,113 @@ fig$'%5D%0A%0A + version('1.16.0', sha256='6c8cc4aedb10d5d4c905894ba1d850544619ee765606ac43df7405865de29ed0')%0A vers @@ -730,256 +730,1427 @@ -depends_on('gnupg', type='build')%0A depends_on('libgpg-error', type='build')%0A depends_on('libassuan', type='build')%0A%0A @classmethod%0A def determine_version(cls, exe):%0A return Executable(exe)('--version', output=str, error=str).rstrip( +# https://dev.gnupg.org/T5509 - New test t-edit-sign test crashes with GCC 11.1.0%0A patch(%0A 'https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gpgme.git;a=commitdiff_plain;h=81a33ea5e1b86d586b956e893a5b25c4cd41c969;hp=e8e055e682f8994d62012574e1c8d862ca72a35d',%0A sha256='b934e3cb0b3408ad27990d97b594c89801a4748294e2eb5804a455a312821411',%0A when='@1.16.0',%0A )%0A%0A depends_on('gnupg', type='build')%0A depends_on('libgpg-error', type='build')%0A depends_on('libassuan', type='build')%0A%0A @classmethod%0A def determine_version(cls, exe):%0A return Executable(exe)('--version', output=str, error=str).rstrip()%0A%0A def configure_args(self):%0A %22%22%22Fix the build when incompatible Qt libraries are installed on the host%22%22%22%0A return %5B'--enable-languages=cpp'%5D%0A%0A def setup_build_environment(self, env):%0A %22%22%22Build tests create a public keyring in ~/.gnupg if $HOME is not redirected%22%22%22%0A if self.run_tests:%0A env.set('HOME', self.build_directory)%0A env.prepend_path('LD_LIBRARY_PATH', self.spec%5B'libgpg-error'%5D.prefix.lib)%0A%0A @property%0A def make_tests(self):%0A %22%22%22Use the Makefile's tests variable to control if the build tests shall run%22%22%22%0A return 'tests=tests' if self.run_tests else 'tests='%0A%0A def build(self, spec, prefix):%0A make(self.make_tests)%0A%0A def install(self, spec, prefix):%0A make(self.make_tests, 'install' )%0A
cd7b72e67a3af4184ccaf3e3dce231c227392f45
Update Keras.py
History/Nesterov-Accelerated-Gradient/Keras.py
History/Nesterov-Accelerated-Gradient/Keras.py
import keras from keras.datasets import mnist from keras.initializers import RandomUniform from keras.layers import Dense from keras.models import Sequential from keras.optimizers import SGD batch_size = 128 epochs = 30 num_classes = 10 (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train = x_train.reshape(60000, 784) x_test = x_test.reshape(10000, 784) x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 # convert class vectors to binary class matrices y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) model = Sequential() model.add(Dense(512, activation='relu', input_shape=(784,), kernel_initializer=RandomUniform(minval=-0.01, maxval=0.01, seed=None), bias_initializer='zeros')) model.add(Dense(512, activation='relu', kernel_initializer=RandomUniform(minval=-0.01, maxval=0.01, seed=None), bias_initializer='zeros')) model.add(Dense(num_classes, activation='softmax', kernel_initializer=RandomUniform(minval=-0.01, maxval=0.01, seed=None), bias_initializer='zeros')) model.summary() model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.1, momentum = 0.9, nesterov=True), metrics=['accuracy']) history = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, shuffle=False, validation_data=(x_test, y_test))
Python
0
@@ -1406,19 +1406,17 @@ momentum - = += 0.9, nes @@ -1697,8 +1697,10 @@ y_test)) +%0D%0A
d3047f76711fd04a069cf17ab7f3f6b6ac54e131
fix error
chainer/functions/loss/sigmoid_cross_entropy.py
chainer/functions/loss/sigmoid_cross_entropy.py
import numpy import chainer from chainer.backends import cuda from chainer import function_node from chainer.functions.activation import sigmoid from chainer import utils from chainer.utils import type_check class SigmoidCrossEntropy(function_node.FunctionNode): """Sigmoid activation followed by a sigmoid cross entropy loss.""" ignore_label = -1 def __init__(self, normalize=True, reduce='mean'): self.normalize = normalize if reduce not in ('mean', 'no'): raise ValueError( "only 'mean' and 'no' are valid for 'reduce', but '%s' is " 'given' % reduce) self.reduce = reduce self.count = None def check_type_forward(self, in_types): type_check.name(in_types, ('x', 't')) x_type, t_type = in_types type_check.expect( x_type.dtype == numpy.float32, t_type.dtype.kind == 'i', x_type.shape == t_type.shape ) def forward(self, inputs): self.retain_inputs((0, 1)) xp = cuda.get_array_module(*inputs) x, t = inputs self.ignore_mask = (t != self.ignore_label) # stable computation of the cross entropy. loss = -( self.ignore_mask * (x * (t - (x >= 0)) - xp.log1p(xp.exp(-xp.abs(x))))) if not self.reduce == 'mean': return utils.force_array(loss.astype(x.dtype)), if self.normalize: count = xp.maximum(1, self.ignore_mask.sum()) else: count = max(1, len(x)) self.count = count return utils.force_array( xp.divide(xp.sum(loss), self.count, dtype=x.dtype)), def backward(self, inputs, grad_outputs): x, t = self.get_retained_inputs() gy, = grad_outputs return SigmoidCrossEntropyGrad( self.reduce, self.count, self.ignore_mask, t.data).apply((x, gy)) class SigmoidCrossEntropyGrad(function_node.FunctionNode): """Sigmoid cross entropy gradient function.""" def __init__(self, reduce, count, ignore_mask, t): self.reduce = reduce self.count = count self.ignore_mask = ignore_mask self.t = t def forward(self, inputs): self.retain_inputs((0, 1)) xp = cuda.get_array_module(*inputs) x, gy = inputs y, = sigmoid.Sigmoid().forward((x,)) if self.reduce == 'mean': gx = xp.divide( gy * self.ignore_mask * (y - self.t), self.count, dtype=y.dtype) else: gx = (gy * self.ignore_mask * (y - self.t)).astype(y.dtype) return gx, None def backward(self, indexes, grad_outputs): ggx, _ = grad_outputs x, gy = self.get_retained_inputs() y = chainer.functions.sigmoid(x) yp = y * (1 - y) gx = yp * chainer.functions.broadcast_to(gy, yp.shape) ggy = y - self.t.astype(y.dtype) gx *= self.ignore_mask * ggx ggy *= self.ignore_mask * ggx if self.reduce == 'mean': gx /= self.count ggy = chainer.functions.sum(ggy) / self.count return gx, ggy def sigmoid_cross_entropy(x, t, normalize=True, reduce='mean'): """Computes cross entropy loss for pre-sigmoid activations. Args: x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \ :class:`cupy.ndarray`): A variable object holding a matrix whose (i, j)-th element indicates the unnormalized log probability of the j-th unit at the i-th example. t (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \ :class:`cupy.ndarray`): A variable object holding a matrix whose (i, j)-th element indicates a signed integer vector of ground truth labels 0 or 1. If ``t[i, j] == -1``, corresponding ``x[i, j]`` is ignored. Loss is zero if all ground truth labels are ``-1``. normalize (bool): Variable holding a boolean value which determines the normalization constant. If true, this function normalizes the cross entropy loss across all instances. If else, it only normalizes along a batch size. reduce (str): Variable holding a ``str`` which determines whether to reduce the shape of the input. If it is ``'mean'``, it computes the sum of cross entropy and normalize it according to ``normalize`` option. If is is ``'no'``, this function computes cross entropy for each instance and does not normalize it (``normalize`` option is ignored). In this case, the loss value of the ignored instance, which has ``-1`` as its target value, is set to ``0``. Returns: Variable: A variable object holding an array of the cross entropy. If ``reduce`` is ``'mean'``, it is a scalar array. If ``reduce`` is ``'no'``, the shape is same as ``x``. .. note:: This function is differentiable only by ``x``. .. admonition:: Example >>> x = np.array([[-2.0, 3.0, 0.5], [5.0, 2.0, -0.5]]).\ astype(np.float32) >>> x array([[-2. , 3. , 0.5], [ 5. , 2. , -0.5]], dtype=float32) >>> t = np.array([[0, 1, 0], [1, 1, -1]]).astype(np.int32) >>> t array([[ 0, 1, 0], [ 1, 1, -1]], dtype=int32) >>> F.sigmoid_cross_entropy(x, t) variable(0.25664714) >>> F.sigmoid_cross_entropy(x, t, normalize=False) variable(0.64161783) >>> y = F.sigmoid_cross_entropy(x, t, reduce='no') >>> y.shape (2, 3) >>> y.data array([[ 0.126928 , 0.04858735, 0.974077 ], [ 0.00671535, 0.126928 , -0. ]], dtype=float32) """ return SigmoidCrossEntropy(normalize, reduce).apply((x, t))[0]
Python
0.000002
@@ -749,12 +749,12 @@ eck. -name +args (in_
891207f20edfeadaf7f56ff90942f45732cfd0bb
fix building with mkl (#24338)
var/spack/repos/builtin/packages/kaldi/package.py
var/spack/repos/builtin/packages/kaldi/package.py
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * from os.path import join from fnmatch import fnmatch import os class Kaldi(Package): # Does not use Autotools """Kaldi is a toolkit for speech recognition written in C++ and licensed under the Apache License v2.0. Kaldi is intended for use by speech recognition researchers.""" homepage = "https://github.com/kaldi-asr/kaldi" git = "https://github.com/kaldi-asr/kaldi.git" version('master') version('2019-09-29', commit='6ffde4b41c58de778245149690927d592cd5956a') version('2019-07-29', commit='7637de77e0a77bf280bef9bf484e4f37c4eb9475') version('2018-07-11', commit='6f2140b032b0108bc313eefdca65151289642773') version('2015-10-07', commit='c024e8aa0a727bf76c91a318f76a1f8b0b59249e') variant('shared', default=True, description='build shared libraries') variant('double', default=False, description='build with double precision floats') variant('cuda', default=False, description='build with CUDA') depends_on('blas') depends_on('cuda', when='+cuda') depends_on('sph2pipe', type='run') depends_on('sctk', type='run') depends_on('speex', type='run') depends_on('openfst@1.4.1-patch', when='@2015-10-07') depends_on('openfst@1.6.0:', when='@2018-07-11') depends_on('openfst@1.6.0:', when='@2019-07-29') depends_on('openfst@1.6.7:1.7.3', when='@2019-09-29:') depends_on('cub', when='@2019-07-29:') patch('openfst-1.4.1.patch', when='@2015-10-07') # Change process of version analysis when using Fujitsu compiler. patch('fujitsu_fix_version_analysis.patch', when='@2018-07-11:%fj') def install(self, spec, prefix): configure_args = ['--fst-root=' + spec['openfst'].prefix] configure_args.append('--fst-version=' + str(spec['openfst'].version)) configure_args.append('--speex-root=' + spec['speex'].prefix) if '~shared' in spec: configure_args.append('--static') else: configure_args.append('--shared') if '^openblas' in spec: configure_args.append('--mathlib=OPENBLAS') configure_args.append('--openblas-root=' + spec['blas'].prefix) if '+openmp' in spec['blas'].variants: configure_args.append('--threaded-math=yes') elif '^atlas' in spec: configure_args.append('--mathlib=ATLAS') configure_args.append('--atlas-root=' + spec['blas'].prefix) if '+pthread' in spec['blas'].variants: configure_args.append('--threaded-atlas') elif '^intel-parallel-studio' in spec or '^intel-mkl' in spec: configure_args.append('--mathlib=MKL') configure_args.append('--mkl-root=' + spec['blas'].prefix) if '+openmp' in spec['blas'].variants: configure_args.append('--mkl-threading=iomp') if '+cuda' in spec: configure_args.append('--use-cuda=yes') configure_args.append('--cudatk-dir=' + spec['cuda'].prefix) if spec.satisfies('@2019-07-29:'): configure_args.append('--cub-root=' + spec['cub'].prefix.include) with working_dir("src"): configure(*configure_args) make() mkdirp(prefix.bin) for root, dirs, files in os.walk('.'): for name in files: if name.endswith("." + dso_suffix) \ or name.endswith(".cc") \ or name.endswith(".pptx"): continue if "configure" == name: continue if os.access(join(root, name), os.X_OK): install(join(root, name), prefix.bin) mkdir(prefix.lib) for root, dirs, files in os.walk('lib'): for name in files: if name.endswith("." + dso_suffix): fpath = join(root, name) src = os.readlink(fpath) install(src, prefix.lib) for root, dirs, files in os.walk('.'): for name in files: if fnmatch(name, '*.h'): mkdirp(join(prefix.include, root.strip("./"))) install(join(root, name), join(prefix.include, root.strip("./"))) egs_dir = join(prefix, 'egs') install_tree('egs', egs_dir)
Python
0
@@ -2961,32 +2961,36 @@ c%5B'blas'%5D.prefix +.mkl )%0A if
219ec7659b06aece8a738198799893de5f28c2b2
Add to/from string methods for Box
rain/engine.py
rain/engine.py
from ctypes import CFUNCTYPE, POINTER from ctypes import Structure from ctypes import byref from ctypes import c_char_p from ctypes import c_int from ctypes import c_uint16 from ctypes import c_uint32 from ctypes import c_uint64 from ctypes import c_uint8 from ctypes import c_void_p from ctypes import cast import llvmlite.binding as llvm class Box(Structure): _fields_ = [("type", c_uint8), ("data", c_uint64), ("size", c_uint32)] Arg = POINTER(Box) class Engine: def __init__(self, ll_file=None, llvm_ir=None): llvm.initialize() llvm.initialize_native_target() llvm.initialize_native_asmprinter() # yes, even this one # Create a target machine representing the host target = llvm.Target.from_default_triple() target_machine = target.create_target_machine() # And an execution engine with a backing module if ll_file: self.main_mod = self.compile_file(ll_file) elif llvm_ir: self.main_mod = self.compile_ir(llvm_ir) else: self.main_mod = self.compile_ir('') self.engine = llvm.create_mcjit_compiler(self.main_mod, target_machine) def add_lib(self, *libs): for lib in libs: llvm.load_library_permanently(lib) def compile_file(self, ll_file): with open(ll_file) as tmp: return self.compile_ir(tmp.read()) def compile_ir(self, llvm_ir): # Create a LLVM module object from the IR mod = llvm.parse_assembly(llvm_ir) mod.verify() return mod def link_file(self, *additions): self.link_ir(*(self.compile_file(add) for add in additions)) def link_ir(self, *additions): for add in additions: self.main_mod.link_in(add) def set_main_mod(self, mod): self.main_mod = mod self.engine.add_module(mod) def finalize(self): self.engine.finalize_object() def get_func(self, name, *types): func_typ = CFUNCTYPE(*types) func_ptr = self.engine.get_function_address(name) return func_typ(func_ptr) def rain_get_str(self, table_box, key): get = self.get_func('rain_get', Arg, Arg, Arg) ret_box = Box(0, 0, 0) key_str = c_char_p(key.encode("utf-8")) key_box = Box(4, cast(key_str, c_void_p).value, len(key)) get(byref(ret_box), byref(table_box), byref(key_box)) return ret_box def main(self): main = self.get_func('main', c_int, c_int, POINTER(c_char_p)) argc = c_int(1) argv_0 = c_char_p("test".encode("utf-8")) main(argc, byref(argv_0))
Python
0
@@ -459,16 +459,246 @@ nt32)%5D%0A%0A + @classmethod%0A def from_str(cls, string):%0A str_p = c_char_p(string.encode(%22utf-8%22))%0A return cls(4, cast(str_p, c_void_p).value, len(string))%0A%0A def as_str(self):%0A return cast(self.data, c_char_p).value.decode(%22utf-8%22)%0A%0A Arg = PO
b8c18068c2cc2afe169c750f25318c6ba92e2763
use Spack compilers and remove x86_64 opts from Makefile (#13877)
var/spack/repos/builtin/packages/prank/package.py
var/spack/repos/builtin/packages/prank/package.py
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Prank(Package): """A powerful multiple sequence alignment browser.""" homepage = "http://wasabiapp.org/software/prank/" url = "http://wasabiapp.org/download/prank/prank.source.170427.tgz" version('170427', sha256='623eb5e9b5cb0be1f49c3bf715e5fabceb1059b21168437264bdcd5c587a8859') depends_on('mafft') depends_on('exonerate') depends_on('bpp-suite') # for bppancestor conflicts('%gcc@7.2.0', when='@:150803') def install(self, spec, prefix): with working_dir('src'): make() mkdirp(prefix.bin) install('prank', prefix.bin)
Python
0
@@ -743,16 +743,409 @@ 'src'):%0A +%0A filter_file('gcc', '%7B0%7D'.format(spack_cc),%0A 'Makefile', string=True)%0A filter_file('g++', '%7B0%7D'.format(spack_cxx),%0A 'Makefile', string=True)%0A if not spec.target.family == 'x86_64':%0A filter_file('-m64', '', 'Makefile', string=True)%0A filter_file('-pipe', '', 'Makefile', string=True)%0A%0A
26933550f7a3c195669c61539151c5fedf26aaad
add version 1.0.0 to r-hms (#21045)
var/spack/repos/builtin/packages/r-hms/package.py
var/spack/repos/builtin/packages/r-hms/package.py
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class RHms(RPackage): """Implements an S3 class for storing and formatting time-of-day values, based on the 'difftime' class.""" homepage = "https://cloud.r-project.org/package=hms" url = "https://cloud.r-project.org/src/contrib/hms_0.3.tar.gz" list_url = "https://cloud.r-project.org/src/contrib/Archive/hms" version('0.5.0', sha256='a87872665c3bf3901f597d78c152e7805f7129e4dbe27397051de4cf1a76561b') version('0.3', sha256='9368259cbc1094ce0e4cf61544875ec30088ef690d6667e6b0b564218ab3ff88') depends_on('r-pkgconfig', when='@0.5.0:', type=('build', 'run')) depends_on('r-rlang', when='@0.5.0:', type=('build', 'run')) depends_on('r-vctrs@0.2.0:', when='@0.5.0:', type=('build', 'run'))
Python
0
@@ -242,16 +242,40 @@ %0A %22%22%22 +Pretty Time of Day%0A%0A Implemen @@ -339,21 +339,18 @@ ues, -%0A based +%0A on @@ -574,16 +574,112 @@ e/hms%22%0A%0A + version('1.0.0', sha256='9704e903d724f0911d46e5ad18b469a7ed419c5b1f388bd064fd663cefa6c962')%0A vers @@ -861,16 +861,153 @@ ff88')%0A%0A + depends_on('r-ellipsis', when='@1.0.0:', type=('build', 'run'))%0A depends_on('r-lifecycle', when='@1.0.0:', type=('build', 'run'))%0A depe @@ -1124,32 +1124,32 @@ build', 'run'))%0A - depends_on(' @@ -1184,28 +1184,100 @@ 0:', type=('build', 'run'))%0A + depends_on('r-vctrs@0.2.1:', when='@1.0.0:', type=('build', 'run'))%0A
6a68d7b46e039dacc8b136c252fd1ff57e719734
Remove psutil; Retrieve cmdline manually.
instana/fsm.py
instana/fsm.py
import subprocess import os import psutil import socket import threading as t import fysom as f import instana.log as l import instana.agent_const as a class Discovery(object): pid = 0 name = None args = None fd = -1 inode = "" def __init__(self, **kwds): self.__dict__.update(kwds) def to_dict(self): kvs = dict() kvs['pid'] = self.pid kvs['name'] = self.name kvs['args'] = self.args kvs['fd'] = self.fd kvs['inode'] = self.inode return kvs class Fsm(object): RETRY_PERIOD = 30 agent = None fsm = None timer = None def __init__(self, agent): l.info("Stan is on the scene. Starting Instana instrumentation.") l.debug("initializing fsm") self.agent = agent self.fsm = f.Fysom({ "initial": "lostandalone", "events": [ ("startup", "*", "lostandalone"), ("lookup", "lostandalone", "found"), ("announce", "found", "announced"), ("ready", "announced", "good2go")], "callbacks": { "onlookup": self.lookup_agent_host, "onannounce": self.announce_sensor, "onchangestate": self.printstatechange}}) def printstatechange(self, e): l.debug('========= (%i#%s) FSM event: %s, src: %s, dst: %s ==========' % \ (os.getpid(), t.current_thread().name, e.event, e.src, e.dst)) def reset(self): self.fsm.lookup() def lookup_agent_host(self, e): if self.agent.sensor.options.agent_host != "": host = self.agent.sensor.options.agent_host else: host = a.AGENT_DEFAULT_HOST h = self.check_host(host) if h == a.AGENT_HEADER: self.agent.set_host(host) self.fsm.announce() return True elif os.path.exists("/proc/"): host = self.get_default_gateway() if host: h = self.check_host(host) if h == a.AGENT_HEADER: self.agent.set_host(host) self.fsm.announce() return True l.warn("Instana Host Agent can't be found. Scheduling retry.") self.schedule_retry(self.lookup_agent_host, e, "agent_lookup") return False def get_default_gateway(self): l.debug("checking default gateway") try: proc = subprocess.Popen( "/sbin/ip route | awk '/default/' | cut -d ' ' -f 3 | tr -d '\n'", shell=True, stdout=subprocess.PIPE) addr = proc.stdout.read() return addr.decode("UTF-8") except Exception as e: l.error(e) return None def check_host(self, host): l.debug("checking host", host) (_, h) = self.agent.request_header( self.agent.make_host_url(host, "/"), "GET", "Server") return h def announce_sensor(self, e): l.debug("announcing sensor to the agent") p = psutil.Process(os.getpid()) s = None d = Discovery(pid=p.pid, name=p.cmdline()[0], args=p.cmdline()[1:]) # If we're on a system with a procfs if os.path.exists("/proc/"): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((self.agent.host, 42699)) path = "/proc/%d/fd/%d" % (p.pid, s.fileno()) d.fd = s.fileno() d.inode = os.readlink(path) (b, _) = self.agent.request_response( self.agent.make_url(a.AGENT_DISCOVERY_URL), "PUT", d) if b: self.agent.set_from(b) self.fsm.ready() l.warn("Host agent available. We're in business. Announced pid: %i (true pid: %i)" % (p.pid, self.agent.from_.pid)) return True else: l.warn("Cannot announce sensor. Scheduling retry.") self.schedule_retry(self.announce_sensor, e, "announce") return False def schedule_retry(self, fun, e, name): l.debug("Scheduling: " + name) self.timer = t.Timer(self.RETRY_PERIOD, fun, [e]) self.timer.daemon = True self.timer.name = name self.timer.start() l.debug('Threadlist: ', str(t.enumerate())) def test_agent(self, e): l.debug("testing communication with the agent") (b, _) = self.agent.head(self.agent.make_url(a.AGENT_DATA_URL)) if not b: self.schedule_retry(self.test_agent, e, "agent test") else: self.fsm.test()
Python
0
@@ -28,22 +28,19 @@ %0Aimport -psutil +sys %0Aimport @@ -3103,56 +3103,326 @@ -p = psutil.Process(os.getpid())%0A s = None +s = None%0A pid = os.getpid()%0A%0A if os.path.isfile(%22/proc/self/cmdline%22):%0A with open(%22/proc/self/cmdline%22) as cmd:%0A cmdinfo = cmd.read()%0A cmdline = cmdinfo.split(%22%5C0%22)%0A else:%0A cmdline = %5Bos.path.basename(sys.executable)%5D%0A cmdline += sys.argv %0A%0A @@ -3445,18 +3445,16 @@ ery(pid= -p. pid,%0A @@ -3477,27 +3477,23 @@ name= -p. cmdline -() %5B0%5D,%0A @@ -3520,19 +3520,15 @@ rgs= -p. cmdline -() %5B1:%5D @@ -3761,26 +3761,24 @@ d/fd/%25d%22 %25 ( -p. pid, s.filen @@ -4145,10 +4145,8 @@ %25 ( -p. pid,
91cb70a94cd41bb6404fb6f21361bb8a7f01c9d5
Rework thread model
irrexplorer.py
irrexplorer.py
#!/usr/bin/env python # Copyright (c) 2015, Job Snijders # # This file is part of IRR Explorer # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from irrexplorer import config from irrexplorer import nrtm from threading import Thread from radix import Radix from Queue import Queue databases = config('irrexplorer_config.yml').databases def connect_nrtm(config): feed = nrtm.client(**config) for cmd, serial, obj in feed.get(): if not obj: continue print obj print cmd, serial, len(obj), config['dbname'] def radix_maintainer(): for dbase in databases: name = dbase.keys().pop() client_config = dict(d.items()[0] for d in dbase[name]) print client_config worker = Thread(target=connect_nrtm, args=(client_config,)) worker.setDaemon(True) worker.start() """ from irrexplorer.nrtm import client a = client(nrtmhost='whois.radb.net', nrtmport=43, serial='ftp://ftp.radb.net/radb/dbase/RADB.CURRENTSERIAL', dump='ftp://ftp.radb.net/radb/dbase/radb.db.gz', dbase="RADB") while True: for i in a.get(): print i """
Python
0
@@ -1559,16 +1559,37 @@ tabases%0A +nrtm_queue = Queue()%0A %0A%0Adef co @@ -1605,16 +1605,28 @@ m(config +, nrtm_queue ):%0A f @@ -1734,16 +1734,17 @@ ontinue%0A +# @@ -1748,16 +1748,21 @@ print + cmd, obj%0A @@ -1762,30 +1762,40 @@ obj%0A -print +nrtm_queue.put(( cmd, serial, @@ -1799,16 +1799,11 @@ al, -len( obj -) , co @@ -1816,16 +1816,19 @@ dbname'%5D +))%0A %0A%0Adef ra @@ -1842,18 +1842,165 @@ ntainer( +nrtm_queue):%0A import time%0A time.sleep(15)%0A while True:%0A update = nrtm_queue.get()%0A print update%0A nrtm_queue.task_done( ) -: %0A%0A%0Afor d @@ -2193,16 +2193,27 @@ _config, + nrtm_queue ))%0A w @@ -2254,17 +2254,133 @@ tart()%0A%0A -%0A +worker = Thread(target=radix_maintainer, args=(nrtm_queue,))%0Aworker.setDaemon(True)%0Aworker.start()%0A%0Anrtm_queue.join() %0A%0A%22%22%22%0Afr
82d180324cc4bf22d7dc13c15b334efdae0f4a1b
Raise error if empty data when expecting data
isp2/packet.py
isp2/packet.py
"""One data packet in Innovate Serial Protocol version 2 (ISP2). For data format specifications, see http://www.innovatemotorsports.com/support/downloads/Seriallog-2.pdf """ import struct class InnovatePacket(object): """An packet in the Innovate Serial Protocol version 2 (ISP2). ISP2 packets are composed of 16 bit words. """ # Define some bitmasks START_MARKER_MASK = 0b1000000000000000 # In a header word, bits 13, 9, and 7 will be 1. HEADER_MASK = START_MARKER_MASK | 0b0010001010000000 RECORDING_TO_FLASH_MASK = 0b0100000000000000 # In header. 1 is is recording. SENSOR_DATA_MASK = 0b0001000000000000 # In header. 1 if data, 0 if reply to command. CAN_LOG_MASK = 0b0000100000000000 # In header. 1 if originating device can do internal logging. AUX_CHANNEL_LOW_MASK = 0b1100000010000000 # The other bits are data from the sensor LM1_HIGH_MASK = START_MARKER_MASK LM1_LOW_MASK = 0b0010001010000000 # First word of LM-1 LC1_HIGH_MASK = 0b0100001000000000 # First of two words from an LC-1, bits always high LC1_LOW_MASK = 0b1010000010000000 # First of two words from an LC-1, bits always low def __init__(self, header=None, data=None, devices=None): self.header = header self.data = data self.devices = devices def _to_words(self, bytestring): """Convert a byte string to a list of words. Each word is an integer. """ if bytestring is None: return None # Each word is two bytes long n_words = len(bytestring)/2 # ISP2 words are big endian, indicated by ">" # ISP2 words are unsigned short, indicated by "H" return struct.unpack(">%dH" % n_words, bytestring) @property def header(self): return self._header @header.setter def header(self, header): """Input header as a bytestring. """ header = self._to_words(header) if header: if len(header) != 1: raise Exception('Header must be exactly one word long.') header = header[0] if not header & self.HEADER_MASK == self.HEADER_MASK: raise Exception('Invalid header') self._header = header ## Data stored in the header ## @property def packet_length(self): """Get the packet length from the header. Packet length is the number of data words after the header. Note that each word is 2 bytes long. """ if self._header: # Packet length is encoded in bit 8 and bits 6-0 # First, get bits 6-0 packet_length = self._header & 0b0000000001111111 # Bit 8 is the 7th (zero-indexed) bit in the length if self._header & 0b0000000100000000: packet_length += 0b10000000 # 128 return packet_length @property def is_recording_to_flash(self): """Return boolean indicating whether the data is being recorded to flash. """ if self._header: return self.header & self.RECORDING_TO_FLASH_MASK == self.RECORDING_TO_FLASH_MASK @property def is_sensor_data(self): """Return True if the packet contains sensor data, False if it is a reply to a command. """ if self.header: return self.header & self.SENSOR_DATA_MASK == self.SENSOR_DATA_MASK @property def can_log(self): """Return boolean indicating whether the originating device can do internal logging. """ if self._header: return self.header & self.CAN_LOG_MASK == self.CAN_LOG_MASK ## The data ## @property def data(self): return self._data @data.setter def data(self, data): """Input data as a bytestring. """ data = self._to_words(data) if self._header and len(data) != self.packet_length: raise Exception('Packet length does not match specification from header') self._data = data def aux_word2aux_channel(self, word): """Strip unused bits from an aux channel word. """ # Confirm that this is an aux channel word if not word & self.AUX_CHANNEL_LOW_MASK == 0: raise Exception('Not an aux channel word') # The MSB of each (8-bit) byte in Aux words is zero # First, get bits 6-0 aux_channel = word & 0b0000000001111111 # Now get bits 13-8 and shift them by one aux_channel += (word & 0b0011111100000000) >> 1 return aux_channel
Python
0.000031
@@ -3934,16 +3934,143 @@ s(data)%0A + if not data and self.packet_length:%0A raise Exception('No data in packet, expected %25i' %25 self.packet_length)%0A
03f33b099ec9adc480f599338b61214e870fedf6
Update iypm_domain export name to use a valid format
iypm_domain.py
iypm_domain.py
import sys try: from troposphere import Join, Sub, Output, Export from troposphere import Parameter, Ref, Template from troposphere.route53 import HostedZone from troposphere.certificatemanager import Certificate except ImportError: sys.exit('Unable to import troposphere. ' 'Try "pip install troposphere[policy]".') t = Template() t.add_description( 'Template for creating a DNS Zone and SSL Certificate. ' 'Note: Stack creation will block until domain ownership is verified.') zone_name = t.add_parameter(Parameter( 'ZoneName', Description='The name of the DNS Zone to create (example.com).', Type='String' )) hosted_zone = t.add_resource(HostedZone('DNSZone', Name=Ref(zone_name))) acm_certificate = t.add_resource(Certificate( 'Certificate', DomainName=Ref(zone_name), SubjectAlternativeNames=[Sub('*.${ZoneName}')] )) t.add_output([ Output( 'ZoneId', Description='Route53 Zone ID', Value=Ref(hosted_zone), Export=Export(Sub('${AWS::StackName}-${ZoneName}-R53Zone')) ), Output( 'CertificateId', Description='ACM Certificate ARN', Value=Ref(acm_certificate), Export=Export(Sub('${AWS::StackName}-${ZoneName}-CertARN')) ) ]) print(t.to_json())
Python
0
@@ -1055,28 +1055,16 @@ ckName%7D- -$%7BZoneName%7D- R53Zone' @@ -1238,20 +1238,8 @@ me%7D- -$%7BZoneName%7D- Cert
03b13e38467a94e784c6cac6915203e9a8924580
update for kazoo api change
jones/jones.py
jones/jones.py
""" Copyright 2012 DISQUS Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import collections import json from functools import partial import zkutil class ZNodeMap(object): """Associate znodes with names.""" SEPARATOR = ' -> ' def __init__(self, zk, path): """ zk: KazooClient instance path: znode to store associations """ self.zk = zk self.path = path zk.ensure_path(path) def set(self, name, dest): zmap, version = self._get() zmap[name] = dest self._set(zmap, version) def get(self, name): return self.get_all()[name] def get_all(self): """returns a map of names to destinations.""" zmap, v = self._get() return zmap def delete(self, name): zmap, version = self._get() del zmap[name] self._set(zmap, version) def _get(self): """get and parse data stored in self.path.""" def _deserialize(d): if not len(d): return {} return dict(l.split(self.SEPARATOR) for l in d.split('\n')) data, stat = self.zk.get(self.path) return _deserialize(data), stat.version def _set(self, data, version): """serialize and set data to self.path.""" def _serialize(d): return '\n'.join(self.SEPARATOR.join((k, d[k])) for k in d) self.zk.set(self.path, _serialize(data), version) class Jones(object): """ Glossary: view refers to a node which has has the following algorithm applied for node in root -> env update view with node.config environment a node in the service graph as passed to get/set config, it should identify the node within the service i.e. "production" or "dev/mwhooker" """ def __init__(self, service, zk): self.zk = zk self.service = service self.root = "/services/%s" % service self.conf_path = "%s/conf" % self.root self.view_path = "%s/views" % self.root self.associations = ZNodeMap(zk, "%s/nodemaps" % self.root) self._get_env_path = partial(self._get_path_by_env, self.conf_path) self._get_view_path = partial(self._get_path_by_env, self.view_path) def create_config(self, env, conf): """ Set conf to env under service. pass None to env for root. """ if not isinstance(conf, collections.Mapping): raise ValueError("conf must be a collections.Mapping") self.zk.ensure_path(self.view_path) self._create( self._get_env_path(env), conf ) self._update_view(env) def set_config(self, env, conf, version): """ Set conf to env under service. pass None to env for root. """ if not isinstance(conf, collections.Mapping): raise ValueError("conf must be a collections.Mapping") self._set( self._get_env_path(env), conf, version ) def propogate(src): """Update env's children with new config.""" self._update_view(src) path = self._get_env_path(src) for child in self.zk.get_children(path): if src: child = "%s/%s" % (src, child) propogate(child) propogate(env) def delete_config(self, env, version): self.zk.delete( self._get_env_path(env), version ) self.zk.delete( self._get_view_path(env) ) def get_config(self, hostname): """ Returns a 2-tuple like (version, data). Version must be used with future calls to set_config. """ return self._get( self.associations.get(hostname) ) def get_config_by_env(self, env): return self._get( self._get_env_path(env) ) def get_view_by_env(self, env): return self._get( self._get_view_path(env) ) def assoc_host(self, hostname, env): """ Associate a host with an environment. hostname is opaque to Jones. Any string which uniquely identifies a host is acceptable. """ dest = self._get_view_path(env) self.associations.set(hostname, dest) def get_associations(self, env=None): """ Get all the associations for this env, or all if env is None. returns a map of hostnames to environments. """ associations = self.associations.get_all() if not env: return associations return [assoc for assoc in associations if associations[assoc] == self._get_view_path(env)] def delete_association(self, hostname): self.associations.delete(hostname) def exists(self): """Does this service exist in zookeeper""" return self.zk.exists( self._get_env_path(None) ) def delete_all(self): self.zk.recursive_delete(self.root) def get_child_envs(self, env=None): prefix = self._get_env_path(env) envs = zkutil.walk(self.zk, prefix) return map(lambda e: e[len(prefix):], envs) def _flatten_to_root(self, env): """ Flatten values from root down in to new view. """ nodes = env.split('/') # Path through the znode graph from root ('') to env path = [nodes[:n] for n in xrange(len(nodes) + 1)] # Expand path and map it to the root path = map( self._get_env_path, ['/'.join(p) for p in path] ) data = {} for n in path: _, config = self._get(n) data.update(config) return data def _update_view(self, env): if not env: env = '' dest = self._get_view_path(env) if not self.zk.exists(dest): self.zk.ensure_path(dest) self._set(dest, self._flatten_to_root(env)) def _get_path_by_env(self, prefix, env): if not env: return prefix assert env[0] != '/' return '/'.join((prefix, env)) def _get_nodemap_path(self, hostname): return "%s/%s" % (self.nodemap_path, hostname) def _get(self, path): data, metadata = self.zk.get(path) return metadata.version, json.loads(data) def _set(self, path, data, *args, **kwargs): return self.zk.set(path, json.dumps(data), *args, **kwargs) def _create(self, path, data, *args, **kwargs): return self.zk.create(path, json.dumps(data), *args, **kwargs)
Python
0
@@ -5654,18 +5654,8 @@ .zk. -recursive_ dele @@ -5662,24 +5662,40 @@ te(self.root +, recursive=True )%0A%0A def g
0665beccbca954df9a477119bb976441c29dd5eb
fix test
test/sphinxext/test_sphinxext.py
test/sphinxext/test_sphinxext.py
import tempfile import os from sequana.sphinxext import snakemakerule from sphinx.application import Sphinx def test_doc(): res = snakemakerule.get_rule_doc("dag") res = snakemakerule.get_rule_doc("fastqc") try: res = snakemakerule.get_rule_doc("dummy") assert False except FileNotFoundError: assert True except: assert False with tempfile.TemporaryDirectory() as tmpdir: # Create the conf and index in tmpdir with open(tmpdir+os.sep+"index.rst", "w") as fh: fh.write(".. snakemakerule:: dag\n") with open(tmpdir+os.sep+"conf.py", "w") as fh: print(fh.name) fh.write(""" import sys, os import sphinx sys.path.insert(0, os.path.abspath('sphinxext')) extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', "sequana.sphinxext.snakemakerule" ] templates_path = ['_templates'] source_suffix = '.rst' master_doc = 'index' project = "sequana" copyright = "2016" version = '1.0' release = "1.0" exclude_patterns = [] add_module_names = False pygments_style = 'sphinx' intersphinx_mapping = {} """) # srcdir, confdir, outdir, doctreedir, buildername app = Sphinx(tmpdir, tmpdir, tmpdir, tmpdir, "html") app.build()
Python
0.000002
@@ -208,16 +208,24 @@ (%22fastqc +_dynamic %22)%0A%0A
da40c9a7c8bc54099e6115cb5e545a4ed51a083c
Remove unused argument
test/test_sqlalchemy_bigquery.py
test/test_sqlalchemy_bigquery.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from sqlalchemy.engine import create_engine from sqlalchemy.schema import Table, MetaData, Column from sqlalchemy import types, func, case from sqlalchemy.sql import expression, select, literal_column from sqlalchemy.exc import NoSuchTableError from sqlalchemy.orm import sessionmaker from pytz import timezone import pytest import sqlalchemy import datetime ONE_ROW_CONTENTS = [ 588, datetime.datetime(2013, 10, 10, 11, 27, 16, tzinfo=timezone('UTC')), 'W 52 St & 11 Ave', 40.76727216, False, datetime.date(2013, 10, 10), datetime.datetime(2013, 10, 10, 11, 27, 16), datetime.time(11, 27, 16), b'\xef' ] @pytest.fixture(scope='session') def engine(): engine = create_engine('bigquery://', echo=True) return engine @pytest.fixture(scope='session') def table(engine): return Table('test_pybigquery.sample', MetaData(bind=engine), autoload=True) @pytest.fixture(scope='session') def table_one_row(engine): return Table('test_pybigquery.sample_one_row', MetaData(bind=engine), autoload=True) @pytest.fixture(scope='session') def session(engine): Session = sessionmaker(bind=engine) session = Session() return session @pytest.fixture(scope='session') def query(table): col1 = literal_column("TIMESTAMP_TRUNC(timestamp, DAY)").label("timestamp_label") col2 = func.sum(table.c.integer) query = ( select([ col1, col2, ]) .where(col1 < '2017-01-01 00:00:00') .group_by(col1) .order_by(col2) ) return query def test_reflect_select(engine, table): assert len(table.c) == 9 assert isinstance(table.c.integer, Column) assert isinstance(table.c.integer.type, types.Integer) assert isinstance(table.c.timestamp.type, types.TIMESTAMP) assert isinstance(table.c.string.type, types.String) assert isinstance(table.c.float.type, types.Float) assert isinstance(table.c.boolean.type, types.Boolean) assert isinstance(table.c.date.type, types.DATE) assert isinstance(table.c.datetime.type, types.DATETIME) assert isinstance(table.c.time.type, types.TIME) assert isinstance(table.c.bytes.type, types.BINARY) rows = table.select().execute().fetchall() assert len(rows) == 1000 def test_content_from_raw_queries(engine): rows = engine.execute('SELECT * FROM test_pybigquery.sample_one_row').fetchall() assert list(rows[0]) == ONE_ROW_CONTENTS def test_content_from_reflect(engine, table_one_row): rows = table_one_row.select().execute().fetchall() assert list(rows[0]) == ONE_ROW_CONTENTS def test_unicode(engine, table_one_row): unicode_str = "白人看不懂" returned_str = sqlalchemy.select( [expression.bindparam("好", unicode_str)], from_obj=table_one_row, ).scalar() assert returned_str == unicode_str def test_reflect_select_shared_table(engine): one_row = Table('bigquery-public-data.samples.natality', MetaData(bind=engine), autoload=True) row = one_row.select().limit(1).execute().first() assert len(row) >= 1 def test_reflect_table_does_not_exist(engine): with pytest.raises(NoSuchTableError): table = Table('test_pybigquery.table_does_not_exist', MetaData(bind=engine), autoload=True) assert Table('test_pybigquery.table_does_not_exist', MetaData(bind=engine)).exists() is False def test_reflect_dataset_does_not_exist(engine): with pytest.raises(NoSuchTableError): Table('dataset_does_not_exist.table_does_not_exist', MetaData(bind=engine), autoload=True) def test_tables_list(engine): assert 'test_pybigquery.sample' in engine.table_names() assert 'test_pybigquery.sample_one_row' in engine.table_names() def test_group_by(session, table): """labels in SELECT clause should be correclty formatted (dots are replaced with underscores)""" result = session.query(table.c.string, func.count(table.c.integer)).group_by(table.c.string).all() assert len(result) > 0 def test_session_query(session, table): col_concat = func.concat(table.c.string).label('concat') result = ( session .query( table.c.string, col_concat, func.avg(table.c.integer), func.sum(case([(table.c.boolean == True, 1)], else_=0)) ) .group_by(table.c.string, col_concat) .having(func.avg(table.c.integer) > 10) ).all() assert len(result) > 0 def test_custom_expression(engine, query): """GROUP BY clause should use labels instead of expressions""" result = engine.execute(query).fetchall() assert len(result) > 0 def test_compiled_query_literal_binds(engine, query): compiled = query.compile(engine, compile_kwargs={"literal_binds": True}) result = engine.execute(compiled).fetchall() assert len(result) > 0 def test_joins(session, table, table_one_row): result = (session.query(table.c.string, func.count(table_one_row.c.integer)) .join(table_one_row, table_one_row.c.string == table.c.string) .group_by(table.c.string).all()) assert len(result) > 0 def test_querying_wildcard_tables(engine, query): table = Table('bigquery-public-data.noaa_gsod.gsod*', MetaData(bind=engine), autoload=True) rows = table.select().limit(1).execute().first() assert len(rows) > 0
Python
0.000009
@@ -5218,31 +5218,24 @@ ables(engine -, query ):%0A table
477ac5bb540e2d5c511945c9ef579368d0c9b897
Update version of Google Java Format
run-google-java-format.py
run-google-java-format.py
#!/usr/bin/python # This script reformats each file supplied on the command line according to # the Google Java style (by calling out to the google-java-format program, # https://github.com/google/google-java-format), but with improvements to # the formatting of annotations in comments. from __future__ import print_function from distutils import spawn import filecmp import os import stat import subprocess import sys import tempfile import urllib debug = False # debug = True script_dir = os.path.dirname(os.path.abspath(__file__)) # Rather than calling out to the shell, it would be better to # call directly in Python. fixup_py = os.path.join(script_dir, "fixup-google-java-format.py") # Version 1.1 mangles "@param <P>", so take a risk on 1.2 for now. # gjf_version = "google-java-format-1.1" # gjf_snapshot = "" # Never change the file at a URL; make unique by adding a date. gjf_version = "google-java-format-1.2" gjf_snapshot = "-SNAPSHOT-20161121" gjf_jar_name = gjf_version + gjf_snapshot + "-all-deps.jar" # gjf_url = "https://github.com/google/google-java-format/releases/download/" + gjf_version + "/" + gjf_jar_name gjf_url = "http://types.cs.washington.edu/" + gjf_jar_name # gjf_url = "http://homes.cs.washington.edu/~mernst/tmp2/" + gjf_jar_name # Set gjf_jar_path, or retrieve it if it doesn't appear locally. Does not update # from remove path if remote is newer, so never change files on the server. if os.path.isfile(os.path.join(script_dir, gjf_jar_name)): gjf_jar_path = os.path.join(script_dir, gjf_jar_name) elif os.path.isfile(os.path.join(os.path.dirname(script_dir), "lib", gjf_jar_name)): gjf_jar_path = os.path.join(os.path.dirname(script_dir), "lib", gjf_jar_name) else: gjf_jar_path = os.path.join(script_dir, gjf_jar_name) # print("retrieving " + gjf_url + " to " + gjf_jar_path) urllib.urlretrieve(gjf_url, gjf_jar_path) # For some reason, the "git ls-files" must be run from the root. # (I can run "git ls-files" from the command line in any directory.) def under_git(dir, filename): """Return true if filename in dir is under git control.""" if not spawn.find_executable("git"): if debug: print("no git executable found") return False FNULL = open(os.devnull, 'w') p = subprocess.Popen(["git", "ls-files", filename, "--error-unmatch"], cwd=dir, stdout=FNULL, stderr=subprocess.STDOUT) p.wait() if debug: print("p.returncode", p.returncode) return p.returncode == 0 # Don't replace local with remote if local is under version control. # It would be better to just test whether the remote is newer than local, # But raw GitHub URLs don't have the necessary last-modified information. if not under_git(script_dir, "fixup-google-java-format.py"): try: urllib.urlretrieve("https://raw.githubusercontent.com/plume-lib/run-google-java-format/master/fixup-google-java-format.py", fixup_py) except: if os.path.exists(fixup_py): print("Couldn't retrieve fixup-google-java-format.py; using cached version") else: print("Couldn't retrieve fixup-google-java-format.py") sys.exit(1) os.chmod(fixup_py, os.stat(fixup_py).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) if debug: print("script_dir:", script_dir) print("fixup_py: ", fixup_py) print("gjf_jar_path: ", gjf_jar_path) files = sys.argv[1:] if len(files) == 0: print("run-google-java-format.py expects 1 or more filenames as arguments") sys.exit(1) result = subprocess.call(["java", "-jar", gjf_jar_path, "--replace"] + files) # Don't stop if there was an error, because google-java-format won't munge # files and we still want to run fixup-google-java-format.py. # if result != 0: # print("Error when running google-java-format") # sys.exit(result) # Remove command-line arguments files = [f for f in files if not f.startswith("-")] # Exit if no files were supplied (maybe "--help" was supplied) if not files: sys.exit(0) if debug: print("Running fixup-google-java-format.py") result = subprocess.call([fixup_py] + files) if result != 0: print("Error when running fixup-google-java-format.py") sys.exit(result)
Python
0
@@ -953,17 +953,17 @@ -2016112 -1 +3 %22%0A%0Agjf_j
30e1f6ca2224cba216c2e08f2600ae55ba43cebb
update comment
test/unit/test_disco_aws_util.py
test/unit/test_disco_aws_util.py
""" Tests of disco_aws_util """ from unittest import TestCase from disco_aws_automation import disco_aws_util class DiscoAWSUtilTests(TestCase): '''Test disco_aws_util.py''' def test_size_as_rec_map_with_none(self): """_size_as_recurrence_map works with None""" self.assertEqual(disco_aws_util.size_as_recurrence_map(None), {"": None}) self.assertEqual(disco_aws_util.size_as_recurrence_map(''), {"": None}) def test_size_as_rec_map_with_int(self): """_size_as_recurrence_map works with simple integer""" self.assertEqual(disco_aws_util.size_as_recurrence_map(5, sentinel="0 0 * * *"), {"0 0 * * *": 5}) def test_size_as_rec_map_with_map(self): """_size_as_recurrence_map works with a map""" map_as_string = "2@1 0 * * *:3@6 0 * * *" map_as_dict = {"1 0 * * *": 2, "6 0 * * *": 3} self.assertEqual(disco_aws_util.size_as_recurrence_map(map_as_string), map_as_dict) def test_size_as_rec_map_with_duped_map(self): """_size_as_recurrence_map works with a duped map""" map_as_string = "2@1 0 * * *:3@6 0 * * *:3@6 0 * * *" map_as_dict = {"1 0 * * *": 2, "6 0 * * *": 3} self.assertEqual(disco_aws_util.size_as_recurrence_map(map_as_string), map_as_dict)
Python
0
@@ -224,33 +224,32 @@ lf):%0A %22%22%22 -_ size_as_recurren @@ -270,24 +270,24 @@ ith None%22%22%22%0A + self @@ -485,33 +485,32 @@ lf):%0A %22%22%22 -_ size_as_recurren @@ -726,33 +726,32 @@ lf):%0A %22%22%22 -_ size_as_recurren @@ -1037,17 +1037,16 @@ %22%22%22 -_ size_as_ @@ -1195,32 +1195,32 @@ %226 0 * * *%22: 3%7D%0A - self.ass @@ -1275,28 +1275,29 @@ ap_as_string), map_as_dict)%0A +%0A
82ff239e462338b89e1baa96d78f0f5efddab856
Add @jugmac00's Products.ZopeTree to integration test suite
tests/integration/test_projects_using_isort.py
tests/integration/test_projects_using_isort.py
"""Tests projects that use isort to see if any differences are found between their current imports and what isort suggest on the develop branch. This is an important early warning signal of regressions. NOTE: If you use isort within a public repository, please feel empowered to add your project here! It is important to isort that as few regressions as possible are experienced by our users. Having your project tested here is the most sure way to keep those regressions form ever happening. """ from pathlib import Path from subprocess import check_call from typing import Sequence from isort.main import main def git_clone(repository_url: str, directory: Path): """Clones the given repository into the given directory path""" check_call(["git", "clone", "--depth", "1", repository_url, str(directory)]) def run_isort(arguments: Sequence[str]): """Runs isort in diff and check mode with the given arguments""" main(["--check-only", "--diff", *arguments]) def test_django(tmpdir): git_clone("https://github.com/django/django.git", tmpdir) run_isort( str(target_dir) for target_dir in (tmpdir / "django", tmpdir / "tests", tmpdir / "scripts") ) def test_plone(tmpdir): git_clone("https://github.com/plone/plone.app.multilingualindexes.git", tmpdir) run_isort([str(tmpdir / "src")]) def test_pandas(tmpdir): # Need to limit extensions as isort has just made sorting pxd the default, and pandas # will have not picked it up yet # TODO: Remove below line as soon as these files are sorted on the mainline pandas project git_clone("https://github.com/pandas-dev/pandas.git", tmpdir) limit_extensions = ("--ext", "py", "--ext", "pyi", "--ext", "pyx") run_isort((str(tmpdir / "pandas"), "--skip", "__init__.py", *limit_extensions)) def test_fastapi(tmpdir): git_clone("https://github.com/tiangolo/fastapi.git", tmpdir) run_isort([str(tmpdir / "fastapi")]) def test_zulip(tmpdir): git_clone("https://github.com/zulip/zulip.git", tmpdir) run_isort((str(tmpdir), "--skip", "__init__.pyi")) def test_habitat_lab(tmpdir): git_clone("https://github.com/facebookresearch/habitat-lab.git", tmpdir) run_isort([str(tmpdir)]) def test_tmuxp(tmpdir): git_clone("https://github.com/tmux-python/tmuxp.git", tmpdir) run_isort([str(tmpdir)]) def test_websockets(tmpdir): git_clone("https://github.com/aaugustin/websockets.git", tmpdir) run_isort((str(tmpdir), "--skip", "example", "--skip", "docs", "--skip", "compliance")) def test_airflow(tmpdir): git_clone("https://github.com/apache/airflow.git", tmpdir) run_isort([str(tmpdir)]) def test_typeshed(tmpdir): git_clone("https://github.com/python/typeshed.git", tmpdir) run_isort( ( str(tmpdir), "--skip", "tests", "--skip", "scripts", "--skip", f"{tmpdir}/third_party/2and3/yaml/__init__.pyi", ) ) def test_pylint(tmpdir): git_clone("https://github.com/PyCQA/pylint.git", tmpdir) run_isort([str(tmpdir)]) def test_poetry(tmpdir): git_clone("https://github.com/python-poetry/poetry.git", tmpdir) run_isort((str(tmpdir), "--skip", "tests")) def test_hypothesis(tmpdir): git_clone("https://github.com/HypothesisWorks/hypothesis.git", tmpdir) run_isort((str(tmpdir), "--skip", "tests")) def test_pillow(tmpdir): git_clone("https://github.com/python-pillow/Pillow.git", tmpdir) run_isort((str(tmpdir), "--skip", "tests")) def test_attrs(tmpdir): git_clone("https://github.com/python-attrs/attrs.git", tmpdir) run_isort( ( str(tmpdir), "--skip", "tests", "--ext", "py", "--skip", "_compat.py", ) ) def test_datadog_integrations_core(tmpdir): git_clone("https://github.com/DataDog/integrations-core.git", tmpdir) run_isort([str(tmpdir)]) def test_pyramid(tmpdir): git_clone("https://github.com/Pylons/pyramid.git", tmpdir) run_isort( str(target_dir) for target_dir in (tmpdir / "src" / "pyramid", tmpdir / "tests", tmpdir / "setup.py") )
Python
0
@@ -4194,16 +4194,158 @@ etup.py%22)%0A )%0A +%0A%0Adef test_products_zopetree(tmpdir):%0A git_clone(%22https://github.com/jugmac00/Products.ZopeTree.git%22, tmpdir)%0A run_isort(%5Bstr(tmpdir)%5D)%0A
0df416d66ee6c28512295de297f44597b45acf7a
Bump version for release
src/pip/__init__.py
src/pip/__init__.py
__version__ = "19.2.dev0"
Python
0
@@ -16,11 +16,6 @@ 19.2 -.dev0 %22%0A
e2ce9caa84d0932b72894f17dc2c4884cc285bb0
update test case for jaccard
tests/TestReleaseScoringAlice.py
tests/TestReleaseScoringAlice.py
from subfind.release.alice import ReleaseScoringAlice __author__ = 'hiepsimu' import logging import unittest logging.basicConfig(level=logging.DEBUG) class ReleaseScoringAliceTestCase(unittest.TestCase): def test_01(self): """ Release which match the movie title should be the higher priority :return: :rtype: """ scoring = ReleaseScoringAlice() input_release_name = 'Survivor.2014.1080p.BluRay.H264.AAC-RARBG' found_releases = [ {'name': 'The.Hobbit.The.Battle.of.the.Five.Armies.2014.1080p.BluRay.H264.AAC-RARBG'}, {'name': 'Survivor.2015.1080p.BluRay.H264.AAC-RARBG'}, ] scoring.sort(input_release_name, found_releases) self.assertEqual('Survivor.2015.1080p.BluRay.H264.AAC-RARBG', found_releases[0]['name']) if __name__ == '__main__': unittest.main()
Python
0
@@ -1,8 +1,35 @@ +from pprint import pprint%0A%0A from sub @@ -856,16 +856,786 @@ ame'%5D)%0A%0A + def test_02(self):%0A %22%22%22%0A Test 100%25 match%0A%0A :return:%0A :rtype:%0A %22%22%22%0A scoring = ReleaseScoringAlice()%0A input_release_name = '400.Days.2015.1080p.BluRay.H264.AAC-RARBG'%0A%0A found_releases = %5B%0A %7B'name': '400.Days.2015.1080p.BluRay.H264.AAC-RARBG'%7D,%0A %7B'name': '400.Days.2015.720p.BluRay.H264.AAC-RARBG'%7D,%0A %7B'name': '400.Days.2015.BRRip.XviD.AC3-RARBG'%7D,%0A %7B'name': '400.Days.2015.1080p.BluRay.H264.AAC-RARBG'%7D,%0A %7B'name': '400.Days.2015.720p.BluRay.x264.%5BYTS.AG%5D'%7D,%0A %5D%0A scoring.sort(input_release_name, found_releases)%0A # pprint(found_releases)%0A%0A self.assertEqual('400.Days.2015.1080p.BluRay.H264.AAC-RARBG', found_releases%5B0%5D%5B'name'%5D)%0A%0A if __nam
04b91b797de680a970d77de76bed31934a38ede0
remove "transition" markup
sphinxprettysearchresults/__init__.py
sphinxprettysearchresults/__init__.py
import pkg_resources, shutil, subprocess import docutils from docutils import nodes from docutils.nodes import * from sphinx.jinja2glue import SphinxFileSystemLoader def clean_txts(language, srcdir, outdir, source_suffix, use_old_search_snippets): if not isinstance(outdir, str) and isinstance(outdir, unicode): outdir = outdir.encode('UTF-8') if not isinstance(srcdir, str) and isinstance(srcdir, unicode): srcdir = srcdir.encode('UTF-8') sources_path = outdir + '/_sources' sources_build_path = '_build_txt' if os.path.isdir(outdir + '/_raw_sources'): shutil.rmtree(outdir + '/_raw_sources') if os.path.isdir(sources_path): shutil.move(sources_path, outdir + '/_raw_sources') if not os.path.isdir(sources_build_path): os.makedirs(sources_build_path) if not language: language = 'en' build_txt = subprocess.Popen(['sphinx-build', '-a', '-b', 'text','-D' 'language=' + language, \ srcdir, sources_build_path]) build_txt.wait() shutil.move(sources_build_path, sources_path) if pkg_resources.get_distribution("sphinx").version >= "1.5.0" and not use_old_search_snippets: for root, dirs, files in os.walk(sources_path): for file in files: if source_suffix == '.txt': source_suffix = '' os.rename(os.path.join(root, file), os.path.join(root, file.replace('.txt', source_suffix + '.txt'))) def build_search_snippets(app, docname): if app.builder.name == 'html': source_suffix = app.config.source_suffix[0] clean_txts(app.config.language, app.srcdir, app.outdir, source_suffix, app.config.use_old_search_snippets) def remove_text_markup(app, doctree, docname): if app.builder.name == 'text': nodes_to_replace = doctree.traverse(table)\ + doctree.traverse(header)\ + doctree.traverse(title)\ + doctree.traverse(emphasis)\ + doctree.traverse(strong) \ + doctree.traverse(list_item) \ + doctree.traverse(reference) for node in nodes_to_replace: newnode = paragraph() newnode.append(line('', node.astext())) node.replace_self(newnode) nodes_to_remove = doctree.traverse(figure)\ + doctree.traverse(image)\ + doctree.traverse(compound) for node in nodes_to_remove: node.replace_self(docutils.nodes.line('','')) def add_custom_source_link(app): if app.builder.name == 'html': template_index = None index = None for template_set in app.builder.templates.loaders: if index is None: index = 0 else: index =+ 1 if 'sourcelink.html' in template_set.list_templates(): template_index = index break if index is not None: app.builder.templates.loaders.insert( template_index, SphinxFileSystemLoader(os.path.dirname(__file__))) def setup(app): app.add_config_value('use_old_search_snippets', False, 'html') app.connect('build-finished', build_search_snippets) app.connect('doctree-resolved', remove_text_markup) app.connect('builder-inited', add_custom_source_link)
Python
0.001275
@@ -2036,17 +2036,16 @@ (strong) - %5C%0A @@ -2079,17 +2079,16 @@ st_item) - %5C%0A @@ -2122,16 +2122,61 @@ ference) + %5C%0A + doctree.traverse(transition) %0A @@ -2217,24 +2217,25 @@ new +_ node = parag @@ -2256,16 +2256,17 @@ new +_ node.app @@ -2327,16 +2327,17 @@ self(new +_ node)%0A%0A
9a7eeb52704eb8d2d4034061edbd63251a8f1b9d
Improve name of test to fix for #623 indicating fresh row.
tests/test_loom_simulate_bivariate_gaussian.py
tests/test_loom_simulate_bivariate_gaussian.py
# -*- coding: utf-8 -*- # Copyright (c) 2010-2016, MIT Probabilistic Computing Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import os import struct import tempfile import numpy as np import pandas as pd import pytest from scipy import stats import bayeslite from bayeslite import read_pandas try: from bayeslite.backends.loom_backend import LoomBackend except ImportError: pytest.skip('Failed to import Loom.') from stochastic import stochastic def axis_aligned_gaussians(means, size, rng): return [rng.multivariate_normal(mean, [[0,1], [1,0]], size=size) for mean in means] def mix(gaussians, p, rng): assert len(gaussians) == len(p) assert reduce(lambda sum, n: sum + n, p) == 1 choices = rng.choice([0,1], size=len(gaussians[0]), p=p) return [gaussians[choices[i]][i] for i in range(len(choices))] def temp_file_path(suffix): """Returns a random file path within '/tmp'.""" _, temp_filename = tempfile.mkstemp(suffix=suffix) os.remove(temp_filename) return temp_filename def register_loom(bdb): loom_store_path = temp_file_path('.bdb') loom_backend = LoomBackend(loom_store_path=loom_store_path) bayeslite.bayesdb_register_backend(bdb, loom_backend) def distance(a, b): """Computes the Euclidean distance between points `a` and `b`.""" a = a if isinstance(a, np.ndarray) else np.array(a) b = b if isinstance(a, np.ndarray) else np.array(b) return abs(np.linalg.norm(a-b)) def prepare_bdb(bdb, samples, table): qt = bayeslite.bql_quote_name(table) dataframe = pd.DataFrame(data=samples) read_pandas.bayesdb_read_pandas_df(bdb, 'data', dataframe, create=True) bdb.execute(''' CREATE POPULATION FOR %s WITH SCHEMA ( GUESS STATTYPES OF (*) ) ''' % (qt,)) bdb.execute('CREATE GENERATOR FOR %s USING loom;' % (qt,)) bdb.execute('INITIALIZE 4 MODELS FOR %s;' % (qt,)) bdb.execute('ANALYZE %s FOR 100 ITERATIONS;' % (qt,)) def insert_row(bdb, table, x, y): qt = bayeslite.bql_quote_name(table) query = 'INSERT INTO %s ("0", "1") VALUES (?, ?)' % (qt,) bdb.sql_execute(query, bindings=(x, y)) cursor = bdb.sql_execute('SELECT last_insert_rowid()') return cursor.fetchone()[0] def simulate_from_rowid(bdb, table, column, rowid, limit=1000): qt = bayeslite.bql_quote_name(table) qc = bayeslite.bql_quote_name(str(column)) cursor = bdb.execute(''' SIMULATE %s FROM %s GIVEN rowid=? LIMIT ? ''' % (qc, qt) , bindings=(rowid, limit)) return [float(x[0]) for x in cursor] @stochastic(max_runs=5, min_passes=3) def test_mix_ratio(seed): means = ((0,20), (20,0)) sample_size = 100 mix_ratio = [0.7, 0.3] table = 'data' with bayeslite.bayesdb_open(seed=seed) as bdb: sample_gaussians = axis_aligned_gaussians(means, sample_size, bdb._np_prng) samples = mix(sample_gaussians, mix_ratio, bdb._np_prng) register_loom(bdb) prepare_bdb(bdb, samples, table) cursor = bdb.execute(''' SIMULATE "0", "1" FROM data LIMIT ? ''', (sample_size,)) simulated_samples = [sample for sample in cursor] counts = collections.Counter( (0 if distance((x,y), means[0]) < distance((x,y), means[1]) else 1 for x, y in simulated_samples)) simulated_mix_ratio = [counts[key] / float(len(simulated_samples)) for key in counts] for i in xrange(len(means)): difference = abs(mix_ratio[i] - simulated_mix_ratio[i]) assert difference < 0.1 @pytest.mark.xfail(strict=True, reason='no populate data for new row, #623.') @stochastic(max_runs=1, min_passes=1) def test_simulate_y_from_partially_populated_row(seed): means = ((0,20), (20,0)) sample_size = 50 mix_ratio = [0.7, 0.3] table = 'data' with bayeslite.bayesdb_open(seed=seed) as bdb: sample_gaussians = axis_aligned_gaussians(means, sample_size, bdb._np_prng) samples = mix(sample_gaussians, mix_ratio, bdb._np_prng) register_loom(bdb) prepare_bdb(bdb, samples, table) rowid = insert_row(bdb, table, means[0][0], None) simulated_samples = simulate_from_rowid(bdb, table, 1, rowid, limit=sample_size) y_samples = [y for _x, y in sample_gaussians[0]] _statistic, p_value = stats.ks_2samp(y_samples, simulated_samples) assert 0.10 < p_value def test_simulate_conflict(): """Cannot override existing value in table using GIVEN in SIMULATE.""" with bayeslite.bayesdb_open() as bdb: bdb.sql_execute(''' CREATE TABLE data ( "0" NUMERIC PRIMARY KEY, "1" NUMERIC ); ''') insert_row(bdb, 'data', 1, 1) bdb.execute(''' CREATE POPULATION FOR data WITH SCHEMA ( "0" NUMERICAL; "1" NUMERICAL; ); ''') bdb.execute('CREATE GENERATOR FOR data USING cgpm;') bdb.execute('INITIALIZE 1 MODELS FOR data;') rowid = insert_row(bdb, 'data', 0, None) with pytest.raises(bayeslite.BQLError): bdb.execute(''' SIMULATE "0" FROM data GIVEN rowid=?, "0"= 0, "1"=0 LIMIT 1; ''', (rowid,))
Python
0
@@ -4255,16 +4255,22 @@ pulated_ +fresh_ row(seed
c535c22884dbb0df227d4ad142e4d4515415ca29
Switch to wav test files for gstreamer tests
tests/backends/gstreamer_test.py
tests/backends/gstreamer_test.py
import unittest import os from mopidy.models import Playlist, Track from mopidy.backends.gstreamer import GStreamerBackend from tests.backends.base import (BasePlaybackControllerTest, BaseCurrentPlaylistControllerTest) folder = os.path.dirname(__file__) folder = os.path.join(folder, '..', 'data') folder = os.path.abspath(folder) song = os.path.join(folder, 'song%s.mp3') song = 'file://' + song # FIXME can be switched to generic test class GStreamerCurrentPlaylistHandlerTest(BaseCurrentPlaylistControllerTest, unittest.TestCase): tracks = [Track(uri=song % i, id=i, length=4464) for i in range(1, 4)] backend_class = GStreamerBackend class GStreamerPlaybackControllerTest(BasePlaybackControllerTest, unittest.TestCase): tracks = [Track(uri=song % i, id=i, length=4464) for i in range(1, 4)] backend_class = GStreamerBackend if __name__ == '__main__': unittest.main()
Python
0
@@ -400,11 +400,11 @@ g%25s. -mp3 +wav ')%0As
9e0725483e80a4e98d2635b90a268d00e4eae9f3
Update insertion-sort-1.py
hackerrank/insertion-sort-1.py
hackerrank/insertion-sort-1.py
''' https://www.hackerrank.com/challenges/insertionsort1 Sorting One common task for computers is to sort data. For example, people might want to see all their files on a computer sorted by size. Since sorting is a simple problem with many different possible solutions, it is often used to introduce the study of algorithms. Insertion Sort These challenges will cover Insertion Sort, a simple and intuitive sorting algorithm. We will first start with an already sorted list. Insert element into sorted list Given a sorted list with an unsorted number in the rightmost cell, can you write some simple code to insert into the array so that it remains sorted? Print the array every time a value is shifted in the array until the array is fully sorted. The goal of this challenge is to follow the correct order of insertion sort. Guideline: You can copy the value of to a variable and consider its cell "empty". Since this leaves an extra cell empty on the right, you can shift everything over until can be inserted. This will create a duplicate of each value, but when you reach the right spot, you can replace it with . Input Format There will be two lines of input: SIZE - the size of the array ARR - the unsorted array of integers Output Format On each line, output the entire array every time an item is shifted in it. ''' import sys def insertion_sort(ar): if len(ar) == 1: print(' '.join(map(str, ar))) return(ar) else: x = ar[-1] for i in reversed(range(len(ar) - 1)): if x < ar[i]: ar[i + 1] = ar[i] print(' '.join(map(str, ar))) if i == 0: ar[0] = x print_list(ar) break else: ar[i + 1] = x print_list(ar) break return(ar) if __name__ == '__main__': s = int(sys.stdin.readline()) ar = list(map(int, sys.stdin.readline().split())) inserti
Python
0.000001
@@ -1710,32 +1710,47 @@ print -_list(ar +(' '.join(map(str, ar)) )%0A @@ -1838,16 +1838,31 @@ rint -_list(ar +(' '.join(map(str, ar)) )%0A @@ -2048,9 +2048,20 @@ inserti +on_sort(ar) %0A
5c5d85983ae32618d8e2e51ce38006389b054de3
fix missing slots in forms test domain
tests/core/actions/test_forms.py
tests/core/actions/test_forms.py
from aioresponses import aioresponses from rasa.core.actions.action import ACTION_LISTEN_NAME from rasa.core.actions.forms import FormAction, REQUESTED_SLOT from rasa.core.channels import CollectingOutputChannel from rasa.core.domain import Domain from rasa.core.events import ( Form, SlotSet, UserUttered, ActionExecuted, BotUttered, Restarted, ) from rasa.core.nlg import TemplatedNaturalLanguageGenerator from rasa.core.trackers import DialogueStateTracker from rasa.utils.endpoints import EndpointConfig async def test_activate(): tracker = DialogueStateTracker.from_events(sender_id="bla", evts=[]) form_name = "my form" action = FormAction(form_name, None) slot_name = "num_people" domain = f""" forms: - {form_name}: {slot_name}: - type: from_entity entity: number responses: utter_ask_num_people: - text: "How many people?" """ domain = Domain.from_yaml(domain) events = await action.run( CollectingOutputChannel(), TemplatedNaturalLanguageGenerator(domain.templates), tracker, domain, ) assert events[:-1] == [Form(form_name), SlotSet(REQUESTED_SLOT, slot_name)] assert isinstance(events[-1], BotUttered) async def test_activate_and_immediate_deactivate(): slot_name = "num_people" slot_value = 5 tracker = DialogueStateTracker.from_events( sender_id="bla", evts=[ ActionExecuted(ACTION_LISTEN_NAME), UserUttered( "haha", {"name": "greet"}, entities=[{"entity": slot_name, "value": slot_value}], ), ], ) form_name = "my form" action = FormAction(form_name, None) domain = f""" forms: - {form_name}: {slot_name}: - type: from_entity entity: {slot_name} """ domain = Domain.from_yaml(domain) events = await action.run(None, None, tracker, domain) assert events == [ Form(form_name), SlotSet(slot_name, slot_value), Form(None), SlotSet(REQUESTED_SLOT, None), ] async def test_set_slot_and_deactivate(): form_name = "my form" slot_name = "num_people" slot_value = "dasdasdfasdf" events = [ Form(form_name), SlotSet(REQUESTED_SLOT, slot_name), ActionExecuted(ACTION_LISTEN_NAME), UserUttered(slot_value), ] tracker = DialogueStateTracker.from_events(sender_id="bla", evts=events) domain = f""" forms: - {form_name}: {slot_name}: - type: from_text """ domain = Domain.from_yaml(domain) action = FormAction(form_name, None) events = await action.run(None, None, tracker, domain) assert events == [ SlotSet(slot_name, slot_value), Form(None), SlotSet(REQUESTED_SLOT, None), ] async def test_validate_slots(): form_name = "my form" slot_name = "num_people" slot_value = "dasdasdfasdf" validated_slot_value = "so clean" events = [ Form(form_name), SlotSet(REQUESTED_SLOT, slot_name), ActionExecuted(ACTION_LISTEN_NAME), UserUttered(slot_value), ] tracker = DialogueStateTracker.from_events(sender_id="bla", evts=events) domain = f""" forms: - {form_name}: {slot_name}: - type: from_text actions: - action_validate_{form_name} """ domain = Domain.from_yaml(domain) action_server_url = "http:/my-action-server:5055/webhook" with aioresponses() as mocked: mocked.post( action_server_url, payload={ "events": [ {"event": "slot", "name": slot_name, "value": validated_slot_value} ] }, ) action_server = EndpointConfig(action_server_url) action = FormAction(form_name, action_server) events = await action.run(None, None, tracker, domain) assert events == [ SlotSet(slot_name, validated_slot_value), Form(None), SlotSet(REQUESTED_SLOT, None), ] def test_temporary_tracker(): extra_slot = "some_slot" sender_id = "test" domain = Domain.from_yaml( f""" slots: {extra_slot}: type: unfeaturized """ ) old_tracker = DialogueStateTracker.from_events( sender_id, [ActionExecuted(ACTION_LISTEN_NAME)], slots=domain.slots ) new_events = [Restarted()] temp_tracker = FormAction._temporary_tracker(old_tracker, new_events, domain) assert extra_slot in temp_tracker.slots.keys() assert len(temp_tracker.events) == 2
Python
0.000002
@@ -1845,24 +1845,81 @@ %7Bslot_name%7D%0A + slots:%0A %7Bslot_name%7D:%0A type: unfeaturized%0A %22%22%22%0A @@ -1978,35 +1978,128 @@ action.run( -None, None, +%0A CollectingOutputChannel(),%0A TemplatedNaturalLanguageGenerator(domain.templates),%0A tracker, do @@ -2087,39 +2087,53 @@ tracker, +%0A domain +,%0A )%0A assert eve @@ -2738,24 +2738,81 @@ : from_text%0A + slots:%0A %7Bslot_name%7D:%0A type: unfeaturized%0A %22%22%22%0A @@ -2913,35 +2913,128 @@ action.run( -None, None, +%0A CollectingOutputChannel(),%0A TemplatedNaturalLanguageGenerator(domain.templates),%0A tracker, do @@ -3026,31 +3026,45 @@ tracker, +%0A domain +,%0A )%0A assert @@ -3596,32 +3596,89 @@ domain = f%22%22%22%0A + slots:%0A %7Bslot_name%7D:%0A type: unfeaturized%0A forms:%0A - @@ -4314,35 +4314,162 @@ run( -None, None, tracker, domain +%0A CollectingOutputChannel(),%0A TemplatedNaturalLanguageGenerator(domain.templates),%0A tracker,%0A domain,%0A )%0A
eac24cf987ce6eb88d943a6a26f9c135592e2f86
add columns for year, month, day
samples-train-analysis.py
samples-train-analysis.py
'''analyze WORKING/samples-train.csv INVOCATION: python samles-train-analys.py ARGS INPUT FILES: WORKING/samples-train.csv OUTPUT FILES: WORKING/ME/0log.txt log file containing what is printed WORKING/ME/transactions.csv with columns apn | date | sequence | actual_price ''' import argparse import collections import numpy as np import pandas as pd import pdb import random import sys import Bunch import dirutility import Logger import Month import Path import Timer def make_control(argv): print 'argv', argv parser = argparse.ArgumentParser() parser.add_argument('--test', action='store_true', help='if present, truncated input and enable test code') parser.add_argument('--trace', action='store_true', help='if present, call pdb.set_trace() early in run') arg = parser.parse_args(argv[1:]) # ignore invocation name arg.me = 'samples-train-analysis' if arg.trace: pdb.set_trace() random_seed = 123 random.seed(random_seed) dir_working = Path.Path().dir_working() path_out_dir = dirutility.assure_exists(dir_working + arg.me + ('-test/' if arg.test else '') + '/') return Bunch.Bunch( arg=arg, path_in_samples=dir_working + 'samples-train.csv', path_out_log=path_out_dir + '0log.txt', path_out_csv=path_out_dir + 'transactions.csv', random_seed=random_seed, validation_month=Month.Month(2005, 12), test=arg.test, timer=Timer.Timer(), ) def make_index(apn, date, sequence_number): return '%d-%d-%d' % (apn, date, sequence_number) def do_work(control): df = pd.read_csv( control.path_in_samples, low_memory=False, nrows=1000 if control.test else None, ) print 'column names in input file', control.path_in_samples for i, column_name in enumerate(df.columns): print i, column_name column_apn = 'APN UNFORMATTED_deed' column_date = 'SALE DATE_deed' column_actual_price = 'SALE AMOUNT_deed' result = None n_duplicates = 0 for apn in set(df[column_apn]): df_apn = df[df[column_apn] == apn] for date in set(df_apn[column_date]): df_apn_date = df_apn[df_apn[column_date] == date] sequence_number = 0 for i, row in df_apn_date.iterrows(): if sequence_number > 0: print 'duplicate apn|date', apn, date n_duplicates += 1 new_df = pd.DataFrame( data={ 'apn': int(apn), 'date': int(date), 'sequence_number': sequence_number, 'actual_price': row[column_actual_price], }, index=[make_index(apn, date, sequence_number)], ) result = new_df if result is None else result.append(new_df, verify_integrity=True) sequence_number += 1 print 'number of duplicate apn|date values', n_duplicates print 'number of training samples', len(df) print 'number of unique apn-date-sequence_numbers', len(result) result.to_csv(control.path_out_csv) def main(argv): control = make_control(argv) sys.stdout = Logger.Logger(logfile_path=control.path_out_log) # now print statements also write to the log file print control lap = control.timer.lap do_work(control) lap('work completed') if control.test: print 'DISCARD OUTPUT: test' print control print 'done' return if __name__ == '__main__': main(sys.argv) if False: np pdb pd
Python
0.999863
@@ -1375,56 +1375,8 @@ ed,%0A - validation_month=Month.Month(2005, 12),%0A @@ -2419,140 +2419,571 @@ -new_df = pd.DataFrame(%0A data=%7B%0A 'apn': int(apn),%0A 'date': int(date) +date = int(date)%0A date_year = int(date / 10000)%0A date_month = int((date - date_year * 10000) / 100)%0A date_day = int(date - date_year * 10000 - date_month * 100)%0A assert date == date_year * 10000 + date_month * 100 + date_day, date%0A new_df = pd.DataFrame(%0A data=%7B%0A 'apn': int(apn),%0A 'date': date,%0A 'year': date_year,%0A 'month': date_month,%0A 'day': date_day ,%0A
4d1fa4bee77eba19cb0a4c80032f30dcc89e6b98
Fix date check
dcache-billing/python/download_billing_logs.py
dcache-billing/python/download_billing_logs.py
#!/usr/bin/env python import sys import urllib2 import argparse FAXBOX_PROCESSED_CSV_URL = "http://login.usatlas.org/logs/mwt2/dcache-billing/processed/" FAXBOX_RAW_CSV_URL = "http://login.usatlas.org/logs/mwt2/dcache-billing/raw/" def download_log(date_string): """ Download job log files from Amazon EC2 machines parameters: date_string - date to start download """ file_urls = [] url_file = "billing-{0}".format(date_string) file_url = "{0}/{1}/{2}".format(FAXBOX_PROCESSED_CSV_URL, date_string[0:4], url_file) file_urls.append((url_file, file_url)) url_file = "billing-error-{0}".format(date_string) file_url = "{0}/{1}/{2}".format(FAXBOX_PROCESSED_CSV_URL, date_string[0:4], url_file) file_urls.append((url_file, file_url)) for file_info in file_urls: try: url = file_info[1] request = urllib2.urlopen(url) if request.getcode() != 200: sys.stderr.write("Can't download {0}".format(url)) return None except urllib2.HTTPError: sys.stderr.write("Can't download {0}".format(url)) return False output_file = open(file_info[0], 'w') for line in request: output_file.write(line) output_file.close() return True if __name__ == '__main__': parser = argparse.ArgumentParser(description='Download DCache billing records') parser.add_argument('--date', dest='date', default=None, required=True, help='Date to download') args = parser.parse_args(sys.argv[1:]) if len(args.date) != 8: sys.stderr.write("Invalid date argument: {0}\n".format(args.date)) try: int(args.date) except ValueError: sys.stderr.write("Invalid date argument: {0}\n".format(args.date)) download_log(args.date)
Python
0.00106
@@ -1762,9 +1762,10 @@ != -8 +10 :%0A
6ed3d0d8f554e578b65db89e5c5f88cd14bfaea4
Update tools/hcluster_sg_parser/hcluster_sg_parser.py
tools/hcluster_sg_parser/hcluster_sg_parser.py
tools/hcluster_sg_parser/hcluster_sg_parser.py
""" A simple parser to convert the hcluster_sg output into lists of IDs, one list for each cluster. When a minimum and/or maximum number of cluster elements are specified, the IDs contained in the filtered-out clusters are collected in the "discarded IDS" output dataset. Usage: python hcluster_sg_parser.py [-m <N>] [-M <N>] <file> <discarded_out> """ import optparse import os import sys def main(): parser = optparse.OptionParser() parser.add_option('-m', '--min', type='int', default=0, help='Minimum number of cluster elements') parser.add_option('-M', '--max', type='int', default=sys.maxsize, help='Maximum number of cluster elements') parser.add_option('-d', '--dir', type='string', help="Absolute or relative path to output directory. If the directory does not exist, it will be created") options, args = parser.parse_args() if options.dir and not os.path.exists(options.dir): os.mkdir(options.dir) with open(args[2], 'w') as discarded_max_out: with open(args[1], 'w') as discarded_min_out: with open(args[0]) as fh: for line in fh: line = line.rstrip() line_cols = line.split('\t') cluster_id = line_cols[0] n_ids = int(line_cols[-2]) id_list = line_cols[-1].replace(',', '\n') if n_ids < options.min: discarded_min_out.write(id_list) elif n_ids > options.max: discarded_max_out.write(id_list) else: outfile = cluster_id + '_output.txt' if options.dir != "": outfile = options.dir + "/" + cluster_id + '_output.txt' with open(outfile, 'w') as f: f.write(id_list) if __name__ == "__main__": main()
Python
0
@@ -1692,14 +1692,8 @@ .dir - != %22%22 :%0A @@ -1733,53 +1733,41 @@ = o -ptions.dir + %22/%22 + cluster_id + '_output.txt' +s.path.join(options.dir, outfile) %0A
0f7cb25ea5a3fbb3c88f4fd7207144f29140f69c
Change happy_numbers to check for number 4.
happy_numbers/happy_numbers.py
happy_numbers/happy_numbers.py
""" Happy numbers solution, code eval. https://www.codeeval.com/open_challenges/39/ A happy number is defined by the following process. Starting with any positive integer, replace the number by the sum of the squares of its digits, and repeat the process until the number equals 1 (where it will stay), or it loops endlessly in a cycle which does not include 1. Those numbers for which this process ends in 1 are happy numbers, while those that do not end in 1 are unhappy numbers. INPUT SAMPLE: The first argument is the pathname to a file which contains test data, one test case per line. Each line contains a positive integer. E.g. 1 7 22 OUTPUT SAMPLE: If the number is a happy number, print out 1. If not, print out 0. E.g 1 1 0 For the curious, here's why 7 is a happy number: 7->49->97->130->10->1. Here's why 22 is NOT a happy number: 22->8->64->52->29->85->89->145->42->20->4->16->37->58->89 ... """ import sys def happy_number(num, past=[]): if num == 1: return 1 elif num in past: return 0 else: past.append(num) num = sum([int(x)**2 for x in str(num)]) return happy_number(num, past) def main(input_file): with open(input_file, 'r') as f: for line in f: print happy_number(int(line.strip())) if __name__ == '__main__': input_file = sys.argv[1] main(input_file)
Python
0
@@ -944,20 +944,11 @@ (num -, past=%5B%5D ):%0A + @@ -994,16 +994,68 @@ num -in past: +== 4: # all unhappy numbers end up in cycle including 4 %0A @@ -1082,33 +1082,8 @@ se:%0A - past.append(num)%0A @@ -1127,16 +1127,16 @@ (num)%5D)%0A + @@ -1162,14 +1162,8 @@ (num -, past )%0A%0A%0A
5af16432976f72de1d86f1d725205c4ec6a6caa2
Add warning when entity not found in reproduce_state
homeassistant/helpers/state.py
homeassistant/helpers/state.py
""" homeassistant.helpers.state ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Helpers that help with state related things. """ import logging from homeassistant.core import State import homeassistant.util.dt as dt_util from homeassistant.const import ( STATE_ON, STATE_OFF, SERVICE_TURN_ON, SERVICE_TURN_OFF, ATTR_ENTITY_ID) _LOGGER = logging.getLogger(__name__) # pylint: disable=too-few-public-methods, attribute-defined-outside-init class TrackStates(object): """ Records the time when the with-block is entered. Will add all states that have changed since the start time to the return list when with-block is exited. """ def __init__(self, hass): self.hass = hass self.states = [] def __enter__(self): self.now = dt_util.utcnow() return self.states def __exit__(self, exc_type, exc_value, traceback): self.states.extend(get_changed_since(self.hass.states.all(), self.now)) def get_changed_since(states, utc_point_in_time): """ Returns all states that have been changed since utc_point_in_time. """ point_in_time = dt_util.strip_microseconds(utc_point_in_time) return [state for state in states if state.last_updated >= point_in_time] def reproduce_state(hass, states, blocking=False): """ Takes in a state and will try to have the entity reproduce it. """ if isinstance(states, State): states = [states] for state in states: current_state = hass.states.get(state.entity_id) if current_state is None: continue if state.state == STATE_ON: service = SERVICE_TURN_ON elif state.state == STATE_OFF: service = SERVICE_TURN_OFF else: _LOGGER.warning("Unable to reproduce state for %s", state) continue service_data = dict(state.attributes) service_data[ATTR_ENTITY_ID] = state.entity_id hass.services.call(state.domain, service, service_data, blocking)
Python
0.000003
@@ -1525,16 +1525,134 @@ s None:%0A + _LOGGER.warning('reproduce_state: Unable to find entity %25s',%0A state.entity_id)%0A @@ -1860,16 +1860,33 @@ arning(%22 +reproduce_state: Unable t @@ -1907,16 +1907,40 @@ ate -for %25s%22, +%0A sta
6f3f171707d456c49167af381bd65dbbb0b3ce06
Add checksum for test_consistency (#9570)
tests/python/gpu/test_forward.py
tests/python/gpu/test_forward.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os import numpy as np import mxnet as mx from mxnet.test_utils import * def _get_model(): if not os.path.exists('model/Inception-7-symbol.json'): download('http://data.mxnet.io/models/imagenet/inception-v3.tar.gz', dirname='model') os.system("cd model; tar -xf inception-v3.tar.gz --strip-components 1") def _dump_images(shape): import skimage.io import skimage.transform img_list = [] for img in sorted(os.listdir('data/test_images/')): img = skimage.io.imread('data/test_images/'+img) short_egde = min(img.shape[:2]) yy = int((img.shape[0] - short_egde) / 2) xx = int((img.shape[1] - short_egde) / 2) img = img[yy : yy + short_egde, xx : xx + short_egde] img = skimage.transform.resize(img, shape) img_list.append(img) imgs = np.asarray(img_list, dtype=np.float32).transpose((0, 3, 1, 2)) - 128 np.save('data/test_images_%d_%d.npy'%shape, imgs) def _get_data(shape): download("http://data.mxnet.io/data/test_images_%d_%d.npy" % (shape), dirname='data') download("http://data.mxnet.io/data/inception-v3-dump.npz", dirname="data") def test_consistency(dump=False): shape = (299, 299) _get_model() _get_data(shape) if dump: _dump_images(shape) gt = None else: gt = {n: mx.nd.array(a) for n, a in np.load('data/inception-v3-dump.npz').items()} data = np.load('data/test_images_%d_%d.npy'%shape) sym, arg_params, aux_params = mx.model.load_checkpoint('model/Inception-7', 1) arg_params['data'] = data arg_params['softmax_label'] = np.random.randint(low=1, high=1000, size=(data.shape[0],)) ctx_list = [{'ctx': mx.gpu(0), 'data': data.shape, 'type_dict': {'data': data.dtype}}, {'ctx': mx.cpu(0), 'data': data.shape, 'type_dict': {'data': data.dtype}}] gt = check_consistency(sym, ctx_list, arg_params=arg_params, aux_params=aux_params, tol=1e-3, grad_req='null', raise_on_err=False, ground_truth=gt) if dump: np.savez('data/inception-v3-dump.npz', **{n: a.asnumpy() for n, a in gt.items()}) if __name__ == '__main__': test_consistency(False)
Python
0.000001
@@ -857,16 +857,46 @@ import * +%0Afrom mxnet.gluon import utils %0A%0Adef _g @@ -1796,98 +1796,516 @@ -download(%22http://data.mxnet.io/data/test_images_%25d_%25d.npy%22 %25 (shape), dirname='data' +hash_test_img = %22355e15800642286e7fe607d87c38aeeab085b0cc%22%0A hash_inception_v3 = %2291807dfdbd336eb3b265dd62c2408882462752b9%22%0A fname = utils.download(%22http://data.mxnet.io/data/test_images_%25d_%25d.npy%22 %25 (shape),%0A path=%22data/test_images_%25d_%25d.npy%22 %25 (shape),%0A sha1_hash=hash_test_img)%0A if not utils.check_sha1(fname, hash_test_img):%0A raise RuntimeError(%22File %25s not downloaded completely%22 %25 (%22test_images_%25d_%25d.npy%22%25(shape)) )%0A +%0A +fname = utils. down @@ -2363,23 +2363,272 @@ pz%22, - dirname=%22data%22 +%0A path='data/inception-v3-dump.npz',%0A sha1_hash=hash_inception_v3)%0A if not utils.check_sha1(fname, hash_inception_v3):%0A raise RuntimeError(%22File %25s not downloaded completely%22 %25 (%22inception-v3-dump.npz%22) )%0A%0Ad
e2553fd92216c57faddb7f1a161a840b04e67e42
Add tests for market orders
tests/shipane_sdk/test_client.py
tests/shipane_sdk/test_client.py
# -*- coding: utf-8 -*- import logging import os import unittest import six from hamcrest import * from requests import HTTPError from six.moves import configparser from shipane_sdk import Client from shipane_sdk.client import MediaType from tests.shipane_sdk.matchers.dataframe_matchers import * if six.PY2: ConfigParser = configparser.RawConfigParser else: ConfigParser = configparser.ConfigParser class ClientTest(unittest.TestCase): @classmethod def setUpClass(cls): logging.basicConfig(level=logging.DEBUG) config = ConfigParser() dir_path = os.path.dirname(os.path.realpath(__file__)) config.read('{}/../config/config.ini'.format(dir_path)) cls.client = Client(logging.getLogger(), **dict(config.items('ShiPanE'))) cls.client_param = config.get('ShiPanE', 'client') def test_get_account(self): try: self.client.get_account(self.client_param) except HTTPError as e: self.fail() def test_get_positions(self): try: data = self.client.get_positions(self.client_param) assert_that(data['sub_accounts'], has_row(u'人民币')) assert_that(data['positions'], has_column(u'证券代码')) except HTTPError as e: self.fail() def test_get_positions_in_jq_format(self): try: data = self.client.get_positions(self.client_param, media_type=MediaType.JOIN_QUANT) self.assertIsNotNone(data['availableCash']) except HTTPError as e: self.fail() def test_get_orders(self): try: df = self.client.get_orders(self.client_param) assert_that(df, has_column_matches(u"(委托|合同)编号")) except HTTPError as e: self.fail() def test_get_open_orders(self): try: df = self.client.get_orders(self.client_param, 'open') assert_that(df, has_column_matches(u"(委托|合同)编号")) except HTTPError as e: self.fail() def test_get_filled_orders(self): try: df = self.client.get_orders(self.client_param, 'filled') assert_that(df, has_column_matches(u"(委托|合同)编号")) except HTTPError as e: self.fail() def test_buy_stock(self): try: order = self.client.buy(self.client_param, symbol='000001', price=11.11, amount=100) self.assertIsNotNone(order['id']) except HTTPError as e: result = e.response.json() self.assertNotEqual(result['source'], "实盘易") def test_sell_stock(self): try: order = self.client.sell(self.client_param, symbol='000001', price=12.11, amount=100) self.assertIsNotNone(order['id']) except HTTPError as e: result = e.response.json() self.assertNotEqual(result['source'], "实盘易") def test_cancel_all(self): try: self.client.cancel_all(self.client_param) except HTTPError as e: self.fail() def test_query(self): try: df = self.client.query(self.client_param, '查询>资金股份') assert_that(df, has_column(u'证券代码')) except HTTPError as e: self.fail() def test_query_new_stocks(self): df = self.client.query_new_stocks() self.assertTrue((df.columns == ['code', 'xcode', 'name', 'ipo_date', 'price']).all())
Python
0
@@ -2870,32 +2870,724 @@ urce'%5D, %22%E5%AE%9E%E7%9B%98%E6%98%93%22)%0A%0A + def test_buy_stock_at_market_price(self):%0A try:%0A order = self.client.buy(self.client_param, symbol='000001', type='MARKET', priceType=4, amount=100)%0A self.assertIsNotNone(order%5B'id'%5D)%0A except HTTPError as e:%0A result = e.response.json()%0A self.assertNotEqual(result%5B'source'%5D, %22%E5%AE%9E%E7%9B%98%E6%98%93%22)%0A%0A def test_sell_stock_at_market_price(self):%0A try:%0A order = self.client.sell(self.client_param, symbol='000001', type='MARKET', priceType=4, amount=100)%0A self.assertIsNotNone(order%5B'id'%5D)%0A except HTTPError as e:%0A result = e.response.json()%0A self.assertNotEqual(result%5B'source'%5D, %22%E5%AE%9E%E7%9B%98%E6%98%93%22)%0A%0A def test_can
a01efdffeb12d56c1e24932396ffd51b659cc8fd
Write a test for `metaclasses`.
tests/test_generic_decorators.py
tests/test_generic_decorators.py
__author__ = "John Kirkham <kirkhamj@janelia.hhmi.org>" __date__ = "$Mar 25, 2015 13:30:52 EDT$" import functools import nanshe.nanshe.generic_decorators class TestGenericDecorators(object): def test_update_wrapper(self): def wrapper(a_callable): def wrapped(*args, **kwargs): return(a_callable(*args, **kwargs)) return(wrapped) def func(a, b=2): return(a + b) func_wrapped_1 = functools.update_wrapper(wrapper, func) if not hasattr(func_wrapped_1, "__wrapped__"): setattr(func_wrapped_1, "__wrapped__", func) func_wrapped_2 = nanshe.nanshe.generic_decorators.update_wrapper( wrapper, func ) assert func_wrapped_1 == func_wrapped_2 def test_wraps(self): def wrapper(a_callable): def wrapped(*args, **kwargs): return(a_callable(*args, **kwargs)) return(wrapped) def func(a, b=2): return(a + b) func_wrapped_1 = functools.wraps(wrapper)(func) if not hasattr(func_wrapped_1, "__wrapped__"): setattr(func_wrapped_1, "__wrapped__", func) func_wrapped_2 = nanshe.nanshe.generic_decorators.wraps(wrapper)( func ) assert func_wrapped_1 == func_wrapped_2 def test_identity_wrapper(self): def func(a, b=2): return(a + b) func_wrapped = nanshe.nanshe.generic_decorators.identity_wrapper( func ) assert func_wrapped != func assert not hasattr(func, "__wrapped__") assert hasattr(func_wrapped, "__wrapped__") assert func_wrapped.__wrapped__ == func def test_static_variables(self): def func(a, b=2): return(a + b) func_wrapped = nanshe.nanshe.generic_decorators.static_variables( c = 7 )( func ) assert func_wrapped.__wrapped__ == func assert not hasattr(func, "c") assert hasattr(func_wrapped, "c") assert func_wrapped.c == 7 def test_metaclass_0(self): class Meta(type): pass class Class(object): pass ClassWrapped = nanshe.nanshe.generic_decorators.metaclass(Meta)(Class) assert ClassWrapped != Class assert not hasattr(Class, "__wrapped__") assert hasattr(ClassWrapped, "__wrapped__") assert ClassWrapped.__wrapped__ == Class def test_metaclass_1(self): class Meta(type): pass class Class(object): __slots__ = ("__special_object__",) def __init__(self): self.__special_object__ = object ClassWrapped = nanshe.nanshe.generic_decorators.metaclass(Meta)(Class) assert ClassWrapped != Class assert not hasattr(Class, "__wrapped__") assert hasattr(ClassWrapped, "__wrapped__") assert ClassWrapped.__wrapped__ == Class a = Class() b = ClassWrapped() assert hasattr(a, "__special_object__") assert hasattr(b, "__special_object__") assert b.__special_object__ == a.__special_object__
Python
0
@@ -3195,8 +3195,554 @@ bject__%0A +%0A%0A def test_metaclasses(self):%0A class Meta1(type):%0A pass%0A%0A class Meta2(type):%0A pass%0A%0A class Class(object):%0A pass%0A%0A ClassWrapped = nanshe.nanshe.generic_decorators.metaclasses(%0A Meta1, Meta2%0A )(Class)%0A%0A assert ClassWrapped != Class%0A assert not hasattr(Class, %22__wrapped__%22)%0A assert hasattr(ClassWrapped, %22__wrapped__%22)%0A assert hasattr(ClassWrapped.__wrapped__, %22__wrapped__%22)%0A assert ClassWrapped.__wrapped__.__wrapped__ == Class%0A