commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
dc333069f4536fdc978d76924b098d10a1a8a50a
Fix error on status for last line in file.
ruby_coverage_status.py
ruby_coverage_status.py
import os import sublime import sublime_plugin import json import re from .common.json_coverage_reader import JsonCoverageReader STATUS_KEY = 'ruby-coverage-status' class RubyCoverageStatusListener(sublime_plugin.EventListener): """Show coverage statistics in status bar.""" def on_load(self, view): self.on_selection_modified(view) def on_selection_modified(self, view): if 'source.ruby' not in view.scope_name(0): return self.view = view if sublime.load_settings('SublimeRubyCoverage.sublime-settings').get('coverage_status_in_status_bar'): sublime.set_timeout_async(self.update_status, 0) else: self.erase_status() def update_status(self): view = self.view view.set_status(STATUS_KEY, self.get_view_coverage_status()) def erase_status(self): view = self.view view.erase_status(STATUS_KEY) def get_view_coverage_status(self): view = self.view filename = view.file_name() if not filename: self.erase_status() r = JsonCoverageReader(filename) coverage = r.get_file_coverage(filename) if r else None if coverage is None: self.erase_status() return '' line_number = self.get_line_number() if line_number is None: self.erase_status() file_coverage = "File covered {:.1f}% ({}/{})".format( coverage['covered_percent'], coverage['covered_lines'], coverage['lines_of_code'] ) line_coverage = coverage['coverage'][line_number] if line_coverage is None: line_coverage = 'Line not executable' elif line_coverage > 0: line_coverage = 'Line covered × {}'.format(line_coverage) else: line_coverage = 'Line not covered' return file_coverage + ', ' + line_coverage def get_line_number(self): view = self.view regions = view.sel() if len(regions) > 1: return return view.rowcol(regions[0].a)[0]
Python
0
@@ -1630,16 +1630,69 @@ _number%5D + if len(coverage%5B'coverage'%5D) %3E line_number else None %0A
d00a12acee4f0709a81287d925577928f8d8dcda
Fix SymbiFlow tool prefix
edalize/symbiflow.py
edalize/symbiflow.py
import logging import os.path import platform import re import subprocess from edalize.edatool import Edatool from edalize.yosys import Yosys from importlib import import_module logger = logging.getLogger(__name__) """ Symbiflow backtend A core (usually the system core) can add the following files: - Standard design sources (Verilog only) - Constraints: unmanaged constraints with file_type SDC, pin_constraints with file_type PCF and placement constraints with file_type xdc """ class Symbiflow(Edatool): argtypes = ["vlogdefine", "vlogparam", "generic"] @classmethod def get_doc(cls, api_ver): if api_ver == 0: symbiflow_help = { "members": [ { "name": "package", "type": "String", "desc": "FPGA chip package (e.g. clg400-1)", }, { "name": "part", "type": "String", "desc": "FPGA part type (e.g. xc7a50t)", }, { "name": "vendor", "type": "String", "desc": 'Target architecture. Currently only "xilinx" is supported', }, { "name": "pnr", "type": "String", "desc": 'Place and Route tool. Currently only "vpr" is supported', }, { "name": "vpr_options", "type": "String", "desc": "Additional vpr tool options. If not used, default options for the tool will be used", }, { "name" : "environment_script", "type" : "String", "desc" : "Optional bash script that will be sourced before each build step." }, ] } symbiflow_members = symbiflow_help["members"] return { "description": "The Symbiflow backend executes Yosys sythesis tool and VPR place and route. It can target multiple different FPGA vendors", "members": symbiflow_members, } def get_version(self): return "1.0" def configure_vpr(self): (src_files, incdirs) = self._get_fileset_files(force_slash=True) has_vhdl = "vhdlSource" in [x.file_type for x in src_files] has_vhdl2008 = "vhdlSource-2008" in [x.file_type for x in src_files] if has_vhdl or has_vhdl2008: logger.error("VHDL files are not supported in Yosys") file_list = [] timing_constraints = [] pins_constraints = [] placement_constraints = [] user_files = [] for f in src_files: if f.file_type in ["verilogSource"]: file_list.append(f.name) if f.file_type in ["SDC"]: timing_constraints.append(f.name) if f.file_type in ["PCF"]: pins_constraints.append(f.name) if f.file_type in ["xdc"]: placement_constraints.append(f.name) if f.file_type in ["user"]: user_files.append(f.name) part = self.tool_options.get('part', None) package = self.tool_options.get('package', None) vendor = self.tool_options.get('vendor', None) if not part: logger.error('Missing required "part" parameter') if not package: logger.error('Missing required "package" parameter') if vendor == 'xilinx': if 'xc7a' in part: bitstream_device = 'artix7' if 'xc7z' in part: bitstream_device = 'zynq7' if 'xc7k' in part: bitstream_device = 'kintex7' partname = part + package # a35t are in fact a50t # leave partname with 35 so we access correct DB if part == 'xc7a35t': part = 'xc7a50t' device_suffix = 'test' toolchain_prefix = 'symbiflow_' elif vendor == 'quicklogic': partname = package device_suffix = 'wlcsp' bitstream_device = part + "_" + device_suffix # Newest Quicklogic toolchain release do not have any toolchain_prefix # if if will change in the future this variable should be adjusted. toolchain_prefix = '' vpr_options = self.tool_options.get("vpr_options", None) # Optional script that will be sourced right before executing each build step in Makefile # This script can for example setup enviroment variables or conda enviroment. # This file needs to be a bash file environment_script = self.tool_options.get('environment_script', None) makefile_params = { "top": self.toplevel, "sources": " ".join(file_list), "partname": partname, "part": part, "bitstream_device": bitstream_device, "sdc": " ".join(timing_constraints), "pcf": " ".join(pins_constraints), "xdc": " ".join(placement_constraints), "vpr_options": vpr_options, "device_suffix": device_suffix, "toolchain_prefix": toolchain_prefix, "environment_script": environment_script, "vendor": vendor, } self.render_template("symbiflow-vpr-makefile.j2", "Makefile", makefile_params) def configure_main(self): if self.tool_options.get("pnr") == "vtr": self.configure_vpr() else: logger.error("VPR is the only P&R tool currently supported in SymbiFlow") def run_main(self): logger.info("Programming")
Python
0
@@ -4207,52 +4207,8 @@ st'%0A - toolchain_prefix = 'symbiflow_'%0A @@ -4368,205 +4368,8 @@ ffix -%0A # Newest Quicklogic toolchain release do not have any toolchain_prefix%0A # if if will change in the future this variable should be adjusted.%0A toolchain_prefix = '' %0A%0A @@ -5221,32 +5221,28 @@ refix%22: -toolchain_prefix +'symbiflow_' ,%0A
921bdcc5d6f6ac4be7dfd0015e5b5fd6d06e6486
Raise exception when --debug is specified to main script
runcommands/__main__.py
runcommands/__main__.py
import sys from .config import RawConfig, RunConfig from .exc import RunCommandsError from .run import run, partition_argv, read_run_args from .util import printer def main(argv=None): try: all_argv, run_argv, command_argv = partition_argv(argv) cli_args = run.parse_args(RawConfig(run=RunConfig()), run_argv) run_args = read_run_args(run) run_args.update(cli_args) run.implementation( None, all_argv=all_argv, run_argv=run_argv, command_argv=command_argv, cli_args=cli_args, **run_args) except RunCommandsError as exc: printer.error(exc, file=sys.stderr) return 1 return 0 if __name__ == '__main__': sys.exit(main())
Python
0
@@ -181,16 +181,33 @@ =None):%0A + debug = None%0A try: @@ -207,16 +207,16 @@ try:%0A - @@ -411,24 +411,95 @@ e(cli_args)%0A + debug = run_args.get('debug', run.parameters%5B'debug'%5D.default)%0A run. @@ -636,24 +636,24 @@ **run_args)%0A - except R @@ -676,16 +676,201 @@ as exc:%0A + if debug or debug is None:%0A # User specified --debug OR processing didn't get far enough%0A # to determine whether user specified --debug.%0A raise%0A
5ca0e0683a663271c40d728e5f88ee19a26eca61
Add ProfileSummary to defaults
devserver/settings.py
devserver/settings.py
DEVSERVER_MODULES = ( 'devserver.modules.sql.SQLRealTimeModule', 'devserver.modules.cache.CacheSummaryModule', ) # This variable gets set to True when we're running the devserver DEVSERVER_ACTIVE = False
Python
0
@@ -61,24 +61,80 @@ Module',%0A + 'devserver.modules.profile.ProfileSummaryModule',%0A # 'devserver.
0bbe7b5d349ba0d3e4113933d6582246874f3bb6
Rearrange admin headings
dexter/admin/admin.py
dexter/admin/admin.py
from dexter.models import db, Document, Entity, Medium, DocumentType, Topic, Fairness, Individual, User from flask.ext.admin import Admin, expose, AdminIndexView from flask.ext.admin.contrib.sqla import ModelView from flask.ext.admin.model.template import macro from wtforms.fields import SelectField, TextAreaField, TextField, HiddenField import flask_wtf from flask.ext.login import current_user from ..forms import Form class MyModelView(ModelView): form_base_class = Form can_create = True can_edit = True can_delete = False page_size = 50 def is_accessible(self): return current_user.is_authenticated() and current_user.admin class MyIndexView(AdminIndexView): @expose('/') def index(self): document_count = Document.query.count() self._template_args['document_count'] = document_count earliest = Document.query.order_by(Document.published_at).first() if earliest: self._template_args['date_from'] = earliest.published_at latest = Document.query.order_by(Document.published_at.desc()).first() if latest: self._template_args['date_to'] = latest.published_at group_counts = {} tmp = db.session.query(db.func.count(Entity.id), Entity.group).group_by(Entity.group).all() if tmp: for row in tmp: group_counts[str(row[1])] = int(row[0]) self._template_args['group_counts'] = group_counts source_count = [] tmp = db.session.query(db.func.count(Document.id), Medium.name) \ .join(Medium) \ .group_by(Document.medium_id) \ .order_by(db.func.count(Document.id)) \ .limit(5) for row in tmp: source_count.append([str(row[1]), int(row[0])]) self._template_args['source_count'] = source_count return super(MyIndexView, self).index() class DocumentView(MyModelView): can_create = False can_edit = False can_delete = False list_template = 'admin/custom_list_template.html' column_list = ( 'published_at', 'medium', 'title', 'summary', 'updated_at' ) column_labels = dict( published_at='Date Published', medium='Source', updated_at='Last Updated', ) column_sortable_list = ( 'published_at', ('medium', Medium.name), 'title', 'summary', 'updated_at' ) column_formatters = dict( medium=macro('render_medium'), published_at=macro('render_date'), title=macro('render_document_title'), updated_at=macro('render_date') ) form_overrides = dict( summary=TextAreaField, text=TextAreaField, ) column_searchable_list = ( 'title', 'summary', ) page_size = 50 class EntityView(MyModelView): can_create = False can_edit = False can_delete = False list_template = 'admin/custom_list_template.html' column_list = ( 'name', 'group', 'created_at', 'updated_at' ) column_labels = dict( created_at='Date Created', group='Type', updated_at='Last Updated', ) column_formatters = dict( name=macro('render_entity_name'), ) column_searchable_list = ( 'name', 'group' ) page_size = 50 class MediumView(MyModelView): list_template = 'admin/custom_list_template.html' column_labels = dict( medium_type='Publication Type', ) column_formatters = dict( medium_type=macro('render_medium_type'), ) choices = [] for choice in ["PRINT", "ONLINE", "TELEVISION", "RADIO", "OTHER"]: choices.append((choice, choice.title())) form_overrides = dict(medium_type=SelectField) form_args = dict( # Pass the choices to the `SelectField` medium_type=dict( choices=choices )) class IndividualView(MyModelView): can_create = True can_edit = True can_delete = False list_template = 'admin/custom_list_template.html' column_list = ( 'code', 'name', ) column_searchable_list = ( 'code', 'name' ) page_size = 100 class UserView(MyModelView): can_create = True can_edit = True can_delete = False list_template = 'admin/custom_list_template.html' column_list = ( 'email', 'disabled', 'admin', ) column_searchable_list = ( 'email', ) page_size = 50 def scaffold_form(self): form_class = super(UserView, self).scaffold_form() form_class.password = TextField('Change password') del form_class.encrypted_password del form_class.created_at del form_class.updated_at return form_class admin_instance = Admin(url='/admin', base_template='admin/custom_master.html', name="Dexter Admin", index_view=MyIndexView()) admin_instance.add_view(UserView(User, db.session, name="Users", endpoint='user')) admin_instance.add_view(DocumentView(Document, db.session, name="Articles", endpoint='document')) admin_instance.add_view(EntityView(Entity, db.session, name="Entities", endpoint='entity')) admin_instance.add_view(MyModelView(Topic, db.session, name="Article Topics", endpoint="topic")) admin_instance.add_view(MyModelView(DocumentType, db.session, name="Article Types", endpoint="type")) admin_instance.add_view(MediumView(Medium, db.session, name="Mediums", endpoint="medium")) admin_instance.add_view(MyModelView(Fairness, db.session, name="Fairness", endpoint="fairness")) admin_instance.add_view(IndividualView(Individual, db.session, name="Individuals", endpoint="individuals"))
Python
0.00001
@@ -5093,198 +5093,8 @@ '))%0A -admin_instance.add_view(DocumentView(Document, db.session, name=%22Articles%22, endpoint='document'))%0Aadmin_instance.add_view(EntityView(Entity, db.session, name=%22Entities%22, endpoint='entity'))%0A admi @@ -5443,23 +5443,19 @@ , name=%22 -Fairnes +Bia s%22, endp @@ -5464,15 +5464,11 @@ nt=%22 -fairnes +bia s%22)) @@ -5541,43 +5541,235 @@ me=%22 -Individuals%22, endpoint=%22individuals%22 +Affiliations%22, endpoint=%22affiliations%22))%0Aadmin_instance.add_view(EntityView(Entity, db.session, name=%22Entities%22, endpoint='entity'))%0Aadmin_instance.add_view(DocumentView(Document, db.session, name=%22Articles%22, endpoint='document' ))%0A
75ac453e873727675ba18e1f45b5bc0cfda26fd7
Increment the version number
angel/__init__.py
angel/__init__.py
__title__ = 'angel' __version__ = '0.0.1' __author__ = 'Bugra Akyildiz' __license__ = 'MIT' __copyright__ = 'Copyright 2014 Bugra Akyildiz'
Python
0.999999
@@ -32,17 +32,17 @@ = '0.0. -1 +2 '%0A__auth
c39d6494d1bc27dedb2141970cdd7a51382f0af4
Update version 0.5.0.dev1 -> 0.5.0
dimod/package_info.py
dimod/package_info.py
__version__ = '0.5.0.dev1' __author__ = 'D-Wave Systems Inc.' __authoremail__ = 'acondello@dwavesys.com' __description__ = 'A shared API for binary quadratic model samplers.'
Python
0
@@ -17,13 +17,8 @@ .5.0 -.dev1 '%0A__
ff504057223cd71f8ebbb7e7a53dc7982a9422a8
Add stream logger config convenience function
boto3/__init__.py
boto3/__init__.py
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import logging from boto3.session import Session __author__ = 'Amazon Web Services' __version__ = '0.0.1' # The default Boto3 session; autoloaded when needed. DEFAULT_SESSION = None def setup_default_session(**kwargs): """ Set up a default session, passing through any parameters to the session constructor. There is no need to call this unless you wish to pass custom parameters, because a default session will be created for you. """ global DEFAULT_SESSION DEFAULT_SESSION = Session(**kwargs) def _get_default_session(): """ Get the default session, creating one if needed. :rtype: boto3.session.Sesssion :return: The default session """ if DEFAULT_SESSION is None: setup_default_session() return DEFAULT_SESSION def client(service): """ Create a low-level service client by name using the default session. :type service: string :param service: The name of a service, e.g. 's3' or 'ec2' :return: Service client instance """ return _get_default_session().client(service) def resource(service): """ Create a resource service client by name using the default session. :type service: string :param service: The name of a service, e.g. 's3' or 'ec2' :return: Resource client instance """ return _get_default_session().resource(service) # Set up logging to ``/dev/null`` like a library is supposed to. # http://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library class NullHandler(logging.Handler): def emit(self, record): pass logging.getLogger('boto3').addHandler(NullHandler())
Python
0
@@ -1085,16 +1085,895 @@ wargs)%0A%0A +def set_stream_logger(name='boto3', level=logging.DEBUG, format_string=None):%0A %22%22%22%0A Add a stream handler for the given name and level to the logging module.%0A By default, this logs all boto3 messages to %60%60stdout%60%60.%0A%0A %3E%3E%3E import boto3%0A %3E%3E%3E boto3.set_stream_logger('boto3.resources', logging.INFO)%0A%0A :type name: string%0A :param name: Log name%0A :type level: int%0A :param level: Logging level, e.g. %60%60logging.INFO%60%60%0A :type format_string: str%0A :param format_string: Log message format%0A %22%22%22%0A if format_string is None:%0A format_string = %22%25(asctime)s %25(name)s %5B%25(levelname)s%5D %25(message)s%22%0A%0A logger = logging.getLogger(name)%0A logger.setLevel(level)%0A handler = logging.StreamHandler()%0A handler.setLevel(level)%0A formatter = logging.Formatter(format_string)%0A handler.setFormatter(formatter)%0A logger.addHandler(handler)%0A%0A def _get
c88d5b0935f9cba8b3d76face5cf40098ef5a87b
Move difference file to build directory
preprint/latexdiff.py
preprint/latexdiff.py
#!/usr/bin/env python # encoding: utf-8 """ Command for running latexdiff. """ import logging import os import subprocess import codecs import git from preprint.textools import inline, inline_blob, remove_comments from preprint.gittools import read_git_blob from cliff.command import Command class Diff(Command): """Run latexdiff between HEAD and a git ref.""" log = logging.getLogger(__name__) def get_parser(self, prog_name): parser = super(Diff, self).get_parser(prog_name) parser.add_argument('prev_commit', help="Commit SHA to compare HEAD against.") parser.add_argument('-n', '--name', default="diff", help="Name of the difference file.") return parser def take_action(self, parsed_args): # Inline current and previous versions of the document current_path = self._inline_current(self.app.options.master) prev_path = self._inline_prev(parsed_args.prev_commit, self.app.options.master) # Run latexmk diff_path = os.path.splitext(parsed_args.name)[0] ldiff_cmd = "latexdiff {prev} {current} > {diff}.tex".format( prev=prev_path, current=current_path, diff=diff_path) subprocess.call(ldiff_cmd, shell=True) # Compile the diff document with latexmk ltmk_cmd = "latexmk -f -pdf -bibtex-cond -c -gg {0}.tex".format( diff_path) subprocess.call(ltmk_cmd, shell=True) def _inline_current(self, root_tex): """Inline the current manuscript.""" with codecs.open(root_tex, 'r', encoding='utf-8') as f: root_text = f.read() root_text = remove_comments(root_text) root_text = inline(root_text) output_path = "_current.tex" if os.path.exists(output_path): os.remove(output_path) with codecs.open(output_path, 'w', encoding='utf-8') as f: f.write(root_text) return output_path def _inline_prev(self, commit_ref, root_tex): """Inline the previous manuscript in the git tree.""" root_text = read_git_blob(commit_ref, root_tex) root_text = remove_comments(root_text) root_text = inline_blob(commit_ref, root_text) output_path = "_prev.tex" if os.path.exists(output_path): os.remove(output_path) with codecs.open(output_path, 'w', encoding='utf-8') as f: f.write(root_text) return output_path def _get_n_commits(self): """docstring for _get_n_commits""" repo = git.Repo(".") print "HEAD", repo.head.commit.hexsha commits = list(repo.iter_commits()) n = len(commits) return n def _get_commits(self): """docstring for _get_commits""" repo = git.Repo(".") commits = list(repo.iter_commits()) # for cm in commits: # print cm.committed_date, cm.hexsha return commits def _match_commit(self, sha): """Match the sha fragment to a commit.""" commits = self._get_commits() for cm in commits: if cm.hexsha.startswith(sha): print sha, "match", cm.hexsha return cm return None
Python
0
@@ -130,16 +130,30 @@ codecs%0A +import shutil%0A import g @@ -678,14 +678,12 @@ ult= -%22diff%22 +None ,%0A @@ -1029,16 +1029,166 @@ aster)%0A%0A + if parsed_args.name is None:%0A name = %22HEAD_%7B0%7D%22.format(parsed_args.prev_commit)%0A else:%0A name = parsed_args.name%0A%0A @@ -1238,28 +1238,16 @@ plitext( -parsed_args. name)%5B0%5D @@ -1659,24 +1659,278 @@ hell=True)%0A%0A + # Copy to build directory%0A if not os.path.exists(%22build%22):%0A os.makedirs(%22build%22)%0A pdf_path = %22%7B0%7D.pdf%22.format(name)%0A if os.path.exists(pdf_path):%0A shutil.move(pdf_path, os.path.join(%22build%22, pdf_path))%0A%0A def _inl
608e9a878b56115c914be517980d1fbaae8bb7eb
edit preprocess
preprocess_markers.py
preprocess_markers.py
#!/usr/lib/python from subprocess import call import random ''' compress those makers that are very close into a single marker which is present! ''' def bin_makers(mapfile, min_len, output_file, heteros_gt): print '1 cycle...' f1 = open(mapfile) all_lines = f1.readlines() f2 = open(output_file, 'w') fir_line = all_lines[0] f2.write(fir_line) min_interval = int(min_len) first_flag = all_lines[1:-1] second_flag = all_lines[2:] gtMatrix = [] #all lines except first line binnedPos = [] #chr-pos which has been bined in second flag for fir, sec in zip(first_flag, second_flag): first = fir.split() second = sec.split() fir_gt = first[1:] sec_gt = second[1:] fir_chr = first[0].split('-')[0] sec_chr = second[0].split('-')[0] fir_pos = int(first[0].split('-')[-1]) sec_pos = int(second[0].split('-')[1]) if fir_chr == sec_chr: if first[0] not in binnedPos: if sec_pos-fir_pos<=min_interval: new_locus_name = fir.split()[0]+'-'+str(sec_pos) new_gt = combine_gt(fir_gt, sec_gt, heteros_gt) bin_line = new_locus_name + '\t' + new_gt + '\n' gtMatrix.append(bin_line) binnedPos.append(second[0]) # add marker bined to the pos set else: gtMatrix.append(fir) else: pass else: if first[0] not in binnedPos: gtMatrix.append(fir) else: pass last_line = second_flag[-1] if last_line.split()[0] not in binnedPos: gtMatrix.append(last_line) remainN = len(gtMatrix) print '%s markers binned!'%len(binnedPos) cycle_n = 2 while True: print '%s cycles'%cycle_n N, gtMatrix = cycle_bin(gtMatrix,min_interval,heteros_gt) print '%s markers binned in this cycle.'%(N) if N == 0: break cycle_n += 1 new_Matrix = gen_binned_names(gtMatrix) f2.writelines(new_Matrix) def cycle_bin(gtMatrix,min_interval, heteros_gt): N1 = len(gtMatrix) first_flag = gtMatrix[0:-1] second_flag = gtMatrix[1:] Matrix_gt = [] #all lines except first line binned_Pos = [] #chr-pos which has been bined in second flag for fir, sec in zip(first_flag, second_flag): first = fir.split() second = sec.split() fir_gt = first[1:] sec_gt = second[1:] fir_chr = first[0].split('-')[0] sec_chr = second[0].split('-')[0] fir_pos = int(first[0].split('-')[-1]) sec_pos = int(second[0].split('-')[1]) if fir_chr == sec_chr: if first[0] not in binned_Pos: if sec_pos-fir_pos<=min_interval: new_locus_name = first[0]+'-'+'-'.join(second[0].split('-')[1:]) new_gt = combine_gt(fir_gt, sec_gt, heteros_gt) bin_line = new_locus_name + '\t' + new_gt + '\n' Matrix_gt.append(bin_line) binned_Pos.append(second[0]) # add marker bined to the pos list else: Matrix_gt.append(fir) else: pass else: if first[0] not in binned_Pos: Matrix_gt.append(fir) else: pass last_line = second_flag[-1] if last_line.split()[0] not in binned_Pos: Matrix_gt.append(last_line) N2 = len(Matrix_gt) return N1-N2, Matrix_gt def combine_gt(gt_list1,gt_list2, heteros_gt): new_gt_ls = [] for i,j in zip(gt_list1,gt_list2): if i == j:new_gt_ls.append(i) elif i == '-':new_gt_ls.append(j) elif j == '-':new_gt_ls.append(i) elif i == heteros_gt: new_gt_ls.append(i) elif j == heteros_gt: new_gt_ls.append(j) else: new_gt_ls.append(random.choice([i,j])) return '\t'.join(new_gt_ls) def gen_binned_names(gtMatrix): f = open('binned_markers.txt', 'w') new_Matrix = [] for i in gtMatrix: j = i.split()[0].split('-') if len(j) > 2: GTs = '\t'.join(i.split()[1:]) newline = '%s-%s\t%s\n'%(j[0],j[1],GTs) new_Matrix.append(newline) f.write('%s-%s: %s\n'%(j[0],j[1],i.split()[0])) else:new_Matrix.append(i) return new_Matrix if __name__ == "__main__": import sys if len(sys.argv) == 5: bin_makers(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4]) else: print 'usage:\npython bin_markers.py map_file min_interval output_file hetegt_letter'
Python
0.000001
@@ -3911,26 +3911,27 @@ en(' -binned_markers.txt +preprocess_bin.info ', '
3e6508e4036def376f1a41943499a30f034e665c
Fix __sub__
rwrtrack/core/record.py
rwrtrack/core/record.py
from sqlalchemy import Column, ForeignKey, Integer, String from sqlalchemy.orm import aliased from sqlalchemy.orm.exc import NoResultFound from sqlalchemy.ext.hybrid import hybrid_property from .db import DeclarativeBase, sesh from .derivedstats import DerivedStats from .exceptions import NoRecordError class Record(DeclarativeBase, DerivedStats): __tablename__ = "records" date = Column(Integer, primary_key=True) account_id = Column(Integer, ForeignKey("accounts._id"), primary_key=True) username = Column(String, nullable=False) xp = Column(Integer, nullable=False) time_played = Column(Integer, nullable=False) kills = Column(Integer, nullable=False) deaths = Column(Integer, nullable=False) kill_streak = Column(Integer, nullable=False) targets_destroyed = Column(Integer, nullable=False) vehicles_destroyed = Column(Integer, nullable=False) soldiers_healed = Column(Integer, nullable=False) team_kills = Column(Integer, nullable=False) distance_moved = Column(Integer, nullable=False) shots_fired = Column(Integer, nullable=False) throwables_thrown = Column(Integer, nullable=False) def __repr__(self): return f"Record(date={self.date}, account_id={self.account_id}, username='{self.username}', " \ f"xp={self.xp}, time_played={self.time_played}, " \ f"kills={self.kills}, deaths={self.deaths}, score={self.score}, kdr={self.kdr}, " \ f"kill_streak={self.kill_streak}, " \ f"targets_destroyed={self.targets_destroyed}, vehicles_destroyed={self.vehicles_destroyed}, " \ f"soldiers_healed={self.soldiers_healed}, team_kills={self.team_kills}, " \ f"distance_moved={self.distance_moved}, " \ f"shots_fired={self.shots_fired}, throwables_thrown={self.throwables_thrown})" def __sub__(self, other): date = f"'diff:{other.date}-{self.date}'" account_id = self.account_id username = self.username xp = self.xp - other.xp time_played = self.time_played - other.time_played kills = self.kills - other.kills deaths = self.deaths - other.deaths kill_streak = self.kill_streak - other.kill_streak targets_destroyed = self.targets_destroyed - other.targets_destroyed vehicles_destroyed = self.vehicles_destroyed - other.vehicles_destroyed soldiers_healed = self.soldiers_healed - other.soldiers_healed team_kills = self.team_kills - other.team_kills distance_moved = self.distance_moved - other.distance_moved shots_fired = self.shots_fired - other.shots_fired throwables_thrown = self.throwables_thrown - other.throwables_thrown r = Record(date, account_id, username, xp, time_played, kills, deaths, kill_streak, targets_destroyed, vehicles_destroyed, soldiers_healed, team_kills, distance_moved, shots_fired, throwables_thrown) return r # Set aliases for Record to use in self-join scenarios RA, RB = aliased(Record, name="ra"), aliased(Record, name="rb") def get_records_on_date(date): try: return sesh.query(Record).filter_by(date=date).all() except NoResultFound as e: raise NoRecordError(f"No records on {date}") from e
Python
0.00023
@@ -2750,16 +2750,21 @@ ord(date +=date , accoun @@ -2771,52 +2771,119 @@ t_id -, username, xp, time_played, +=account_id, username=username, xp=xp, time_played=time_played,%0A kills= kills, +deaths= deaths, kill @@ -2878,16 +2878,28 @@ deaths, +kill_streak= kill_str @@ -2914,32 +2914,50 @@ +targets_destroyed= targets_destroye @@ -2981,39 +2981,119 @@ oyed -, soldiers_healed, team_kills, +=vehicles_destroyed,%0A soldiers_healed=soldiers_healed, team_kills=team_kills, distance_moved= dist @@ -3134,18 +3134,48 @@ ts_fired -, +=shots_fired, throwables_thrown= throwabl
7b071f3ccacd87f6dcb0e9a570d8ce386dbf7a4f
change FULLNAME to AUTHOR_FULLNAME
pelicanconf.py
pelicanconf.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # from __future__ import unicode_literals AUTHOR = u'jchen' FULLNAME = u'Jon Chen' SITENAME = u'BURRITO 4 LYFE' SITEURL = '' TIMEZONE = 'ETC/UTC' DEFAULT_LANG = u'en' CSS_FILE = 'style.css' # theme stuff THEME = './theme' # plugins PLUGIN_PATH = './plugins' PLUGINS = ['gravatar'] DISQUS_SITENAME = "voltaireblog" # gravatar email AUTHOR_EMAIL = 'dabestmayne@burrito.sh' # social TWITTER_USERNAME = 's_jchen' # Feed generation is usually not desired when developing FEED_ALL_ATOM = None CATEGORY_FEED_ATOM = None TRANSLATION_FEED_ATOM = None DEFAULT_PAGINATION = 10 DISPLAY_CATEGORIES_ON_MENU = False DISPLAY_MENUITEMS_ON_MENU = False DISPLAY_NAVBAR = False DISPLAY_PAGES_ON_MENU = False DEFAULT_DATE_FORMAT = ('%Y-%m-%d') # Uncomment following line if you want document-relative URLs when developing RELATIVE_URLS = True
Python
0.999663
@@ -100,16 +100,23 @@ 'jchen'%0A +AUTHOR_ FULLNAME
15a889dd91d0530e2ded5ed343b33296a9950c5a
Add link to license of tree_pp.
penchy/util.py
penchy/util.py
""" This module provides miscellaneous utilities. .. moduleauthor:: Fabian Hirschmann <fabian@hirschm.net> .. moduleauthor:: Michael Markert <markert.michael@googlemail.com> :copyright: PenchY Developers 2011-2012, see AUTHORS :license: MIT License, see LICENSE """ from __future__ import print_function import hashlib import imp import logging import os import shutil import sys import tempfile import inspect from contextlib import contextmanager from functools import wraps from xml.etree import ElementTree from xml.etree.ElementTree import SubElement from tempfile import NamedTemporaryFile from penchy.compat import write from penchy import bootstrap log = logging.getLogger(__name__) def memoized(f): """ Decorator that provides memoization, i.e. a cache that saves the result of a function call and returns them if called with the same arguments. The function will not be evaluated if the arguments are present in the cache. """ cache = {} @wraps(f) def _memoized(*args, **kwargs): key = tuple(args) + tuple(kwargs.items()) try: if key in cache: return cache[key] except TypeError: # if passed an unhashable type evaluate directly return f(*args, **kwargs) ret = f(*args, **kwargs) cache[key] = ret return ret return _memoized # Copyright (c) 1995-2010 by Frederik Lundh # http://effbot.org/zone/element-lib.htm#prettyprint # Licensed under the terms of the Historical Permission Notice # and Disclaimer. def tree_pp(elem, level=0): """ Pretty-prints an ElementTree. :param elem: root node :type elem: :class:`~xml.etree.ElementTree.Element` :param level: current level in tree :type level: int """ i = '\n' + level * ' ' if len(elem): if not elem.text or not elem.text.strip(): elem.text = i + ' ' if not elem.tail or not elem.tail.strip(): elem.tail = i for elem in elem: tree_pp(elem, level + 1) if not elem.tail or not elem.tail.strip(): elem.tail = i else: if level and (not elem.tail or not elem.tail.strip()): elem.tail = i def dict2tree(elem, dict_): """ Transform the given dictionary to a ElementTree and add it to the given element. :param elem: parent element :type elem: :class:`xml.etree.ElementTree.Element` :param dict_: dict to add to ``elem`` :type dict_: dict """ for key in dict_: if dict_[key]: e = SubElement(elem, key) if type(dict_[key]) == dict: dict2tree(e, dict_[key]) else: e.text = dict_[key] def sha1sum(filename, blocksize=65536): """ Returns the sha1 hexdigest of a file. """ hasher = hashlib.sha1() with open(filename, 'rb') as afile: buf = afile.read(blocksize) while len(buf) > 0: hasher.update(buf) buf = afile.read(blocksize) return hasher.hexdigest() @contextmanager def tempdir(prefix='penchy-invocation', delete=False): """ Contextmanager to execute in new created temporary directory. :param prefix: prefix of the temporary directory :type prefix: str :param delete: delete the temporary directory afterwards :type delete: bool """ fwd = os.getcwd() cwd = tempfile.mkdtemp(prefix=prefix) os.chdir(cwd) yield os.chdir(fwd) if delete: shutil.rmtree(cwd) def make_bootstrap_client(): """ Returns the temporary filename of a file containing the bootstrap client. """ tf = NamedTemporaryFile() source = inspect.getsource(bootstrap) write(tf, source) tf.flush() return tf def load_job(filename): """ Loads a job. :param filename: filename of the job :type filename: str """ assert 'config' in sys.modules, 'You have to load the penchyrc before a job' with disable_write_bytecode(): job = imp.load_source('job', filename) log.info('Loaded job from %s' % filename) return job def load_config(filename): """ Loads the config module from filename. :param filename: filename of the config file :type filename: str """ with disable_write_bytecode(): config = imp.load_source('config', filename) log.info('Loaded configuration from %s' % filename) return config def get_config_attribute(config, name, default_value): """ Returns an attribute of a config module or the default value. :param config: config module to use :param name: attribute name :type name: str :param default: default value """ if hasattr(config, name): return getattr(config, name) else: return default_value @contextmanager def disable_write_bytecode(): """ Contextmanager to temporarily disable writing bytecode while executing. """ old_state = sys.dont_write_bytecode sys.dont_write_bytecode = True yield sys.dont_write_bytecode = old_state def default(value, replacement): """ Check if ``value`` is ``None`` and then return ``replacement`` or else ``value``. :param value: value to check :param replacement: default replacement for value :returns: return the value or replacement if value is None """ return value if value is not None else replacement def die(msg): """ Print msg to stderr and exit with exit code 1. :param msg: msg to print :type msg: str """ print(msg, file=sys.stderr) sys.exit(1) def extract_maven_credentials(id_, path=os.path.expanduser('~/.m2/settings.xml')): """ Extracts the username and password for a given ``id_`` from a maven settings.xml. :param id_: id of the remote machine as defined in the settings file :type id_: str :param filename: path to settings.xml :type filename: str """ xmlns = '{http://maven.apache.org/SETTINGS/1.0.0}' tree = ElementTree.parse(path) xpath = './/{0}server[{0}id="{1}"]'.format(xmlns, id_) servers = tree.findall(xpath) username = servers[0].findtext(".//{0}username".format(xmlns)) password = servers[0].findtext(".//{0}password".format(xmlns)) return username, password
Python
0
@@ -1554,16 +1554,58 @@ sclaimer +, see http://effbot.org/zone/copyright.htm .%0Adef tr
a3c582df681aae77034e2db08999c89866cd6470
Refactor earth mover's distance implementation
utilities.py
utilities.py
import collections def each(function, iterable): for item in iterable: function(item) def each_unpack(function, iterable): for item in iterable: function(*item) def minmax(*args): min = None max = None for x in args: if max < x: max = x if x > min: min = x return min, max def map_inplace(function, list, depth=0): if depth <= 0: list[:] = map(function, list) else: for item in list: map_inplace(function, item, depth - 1) def count_if(function, iterable): count = 0 for item in iterable: if function(item): count += 1 return count def teemap(iterable, *functions): map(lambda item: (f(item) for f in functions), iterable) class ProbabilityDistribution(collections.defaultdict): """"Holds a probability distribution and can compute the distance to other dists""" def __init__(self): collections.defaultdict.__init__(self, int) def get(self, k, d = 0): return dict.get(self, k, d) def distance_to(self, compare_to): key_set = self.viewkeys() | compare_to.viewkeys() currentEMD = 0 lastEMD = 0 totaldistance = 0 for key in key_set: lastEMD = currentEMD currentEMD = (self.get(key, 0) + lastEMD) - compare_to.get(key, 0) totaldistance += math.fabs(currentEMD) return totaldistance
Python
0
@@ -1027,35 +1027,46 @@ -key_set = self.viewkeys() %7C +return sum(%0A (abs(self.get(bin) - com @@ -1077,178 +1077,54 @@ _to. -viewkeys()%0A%0A currentEMD = 0%0A lastEMD = 0%0A totaldistance = 0%0A%0A for key in key_set:%0A lastEMD = currentEMD%0A currentEMD = (self.get(key, 0) + lastEMD) - +get(bin))%0A for bin in self.viewkeys() %7C com @@ -1135,86 +1135,26 @@ _to. -get(key, 0)%0A totaldistance += math.fabs(currentEMD)%0A%0A return totaldistance +viewkeys()),%0A 0)%0A
49151e5c490f1111e4de82aa2d6215e927e03cca
Make get_search_state dynamic for SearchHighlighter.
prompt_toolkit/layout/highlighters.py
prompt_toolkit/layout/highlighters.py
""" Highlighters for usage in a BufferControl. Highlighters are very similar to processors, but they are applied after the BufferControl created a screen instance. (Instead of right before creating the screen.) Highlighters can't change the content of the screen, but they can mark regions (start_pos, end_pos) as highlighted, using a certain Token. When possible, it's adviced to use a Highlighter instead of a Processor, because most of the highlighting code is applied only to the visible region of the screen. (The Window class will apply the highlighting to the visible region.) """ from __future__ import unicode_literals from pygments.token import Token from abc import ABCMeta, abstractmethod from six import with_metaclass from prompt_toolkit.document import Document from prompt_toolkit.enums import SEARCH_BUFFER from prompt_toolkit.filters import to_cli_filter __all__ = ( 'Fragment', 'SelectionHighlighter', 'SearchHighlighter', 'MatchingBracketHighlighter', 'ConditionalHighlighter', ) class Fragment(object): """ Highlight fragment. :param start: (int) Cursor start position. :param end: (int) Cursor end position. :param token: Pygments Token. """ def __init__(self, start, end, token): self.start = start self.end = end self.token = token def __repr__(self): return 'Fragment(%r, %r, %r)' % (self.start, self.end, self.token) class Highlighter(with_metaclass(ABCMeta, object)): @abstractmethod def get_fragments(self, cli, document): """ Return a list of :class:`.Fragment` instances. (This can be a generator as well.) """ return [] class SelectionHighlighter(Highlighter): """ Highlight the selection. """ def get_fragments(self, cli, document): selection_range = document.selection_range() if selection_range: from_, to = selection_range yield Fragment(from_, to, Token.SelectedText) def invalidation_hash(self, cli, document): # When the selection range changes, highlighting will be different. return ( document.selection_range(), ) class SearchHighlighter(Highlighter): """ Highlight search matches in the document. :param preview_search: A Filter; when active it indicates that we take the search text in real time while the user is typing, instead of the last active search state. """ def __init__(self, preview_search=False, search_buffer_name=SEARCH_BUFFER): self.preview_search = to_cli_filter(preview_search) self.search_buffer_name = search_buffer_name def _get_search_text(self, cli): """ The text we are searching for. """ # When the search buffer has focus, take that text. if self.preview_search(cli) and cli.buffers[self.search_buffer_name].text: return cli.buffers[self.search_buffer_name].text # Otherwise, take the text of the last active search. else: return cli.search_state.text def get_fragments(self, cli, document): search_text = self._get_search_text(cli) ignore_case = cli.is_ignoring_case if search_text and not cli.is_returning: for index in document.find_all(search_text, ignore_case=ignore_case): if index == document.cursor_position: token = Token.SearchMatch.Current else: token = Token.SearchMatch yield Fragment(index, index + len(search_text), token) def invalidation_hash(self, cli, document): search_text = self._get_search_text(cli) # When the search state changes, highlighting will be different. return ( search_text, cli.is_returning, # When we search for text, and the cursor position changes. The # processor has to be applied every time again, because the current # match is highlighted in another color. (search_text and document.cursor_position), ) class ConditionalHighlighter(Highlighter): """ Highlighter that applies another highlighter, according to a certain condition. :param highlighter: :class:`.Highlighter` instance. :param filter: :class:`~prompt_toolkit.filters.CLIFilter` instance. """ def __init__(self, highlighter, filter): assert isinstance(highlighter, Highlighter) self.highlighter = highlighter self.filter = to_cli_filter(filter) def get_fragments(self, cli, document): if self.filter(cli): return self.highlighter.get_fragments(cli, document) else: return [] def invalidation_hash(self, cli, document): # When enabled, use the hash of the highlighter. Otherwise, just use # False. if self.filter(cli): return (True, self.highlighter.invalidation_hash(cli, document)) else: return False class MatchingBracketHighlighter(Highlighter): """ When the cursor is on or right after a bracket, it highlights the matching bracket. """ _closing_braces = '])}>' def __init__(self, chars='[](){}<>'): self.chars = chars def get_fragments(self, cli, document): result = [] def replace_token(pos): """ Replace token in list of tokens. """ result.append(Fragment(pos, pos + 1, Token.MatchingBracket)) def apply_for_document(document): """ Find and replace matching tokens. """ if document.current_char in self.chars: pos = document.matching_bracket_position if pos: replace_token(document.cursor_position) replace_token(document.cursor_position + pos) return True # Apply for character below cursor. applied = apply_for_document(document) # Otherwise, apply for character before cursor. if (not applied and document.cursor_position > 0 and document.char_before_cursor in self._closing_braces): apply_for_document(Document(document.text, document.cursor_position - 1)) return result def invalidation_hash(self, cli, document): on_brace = document.current_char in self.chars after_brace = document.char_before_cursor in self.chars if on_brace: return (True, document.cursor_position) elif after_brace and document.char_before_cursor in self._closing_braces: return (True, document.cursor_position - 1) else: # Don't include the cursor position in the hash if we are not *on* # a brace. We don't have to rerender the output, because it will be # the same anyway. return False
Python
0
@@ -2472,16 +2472,168 @@ state.%0A + :param get_search_state: (Optional) Callable that takes a%0A CommandLineInterface and returns the SearchState to be used for the highlighting.%0A %22%22%22%0A @@ -2709,16 +2709,56 @@ H_BUFFER +,%0A get_search_state=None ):%0A @@ -2864,16 +2864,65 @@ fer_name +%0A self.get_search_state = get_search_state %0A%0A de @@ -3219,16 +3219,16 @@ e%5D.text%0A - @@ -3281,16 +3281,103 @@ search.%0A + elif self.get_search_state:%0A return self.get_search_state(cli).text%0A
f7b964b2ce42d8c5cb6707cd571cca5eeadb2ff7
Implement feedback on final state of firmware
confluent_server/confluent/firmwaremanager.py
confluent_server/confluent/firmwaremanager.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2017 Lenovo # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # provide managing firmware update process and firmware repository if/when # the time comes import confluent.exceptions as exc import confluent.messages as msg import eventlet updatesbytarget = {} def execupdate(handler, filename, updateobj): try: handler(filename, progress=updateobj.handle_progress) updateobj.handle_progress({'phase': 'complete', 'progress': 100.0}) except Exception as e: updateobj.handle_progress({'phase': 'error', 'progress': 0.0, 'detail': str(e)}) class Updater(object): def __init__(self, node, handler, filename, tenant=None, name=None): self.node = node self.phase = 'initializing' self.detail = '' self.percent = 0.0 self.updateproc = eventlet.spawn(execupdate, handler, filename, self) if (node, tenant) not in updatesbytarget: updatesbytarget[(node, tenant)] = {} if name is None: name = 1 while '{0}'.format(name) in updatesbytarget[(node, tenant)]: name += 1 self.name = '{0}'.format(name) updatesbytarget[(node, tenant)][self.name] = self def handle_progress(self, progress): self.phase = progress['phase'] self.percent = float(progress['progress']) self.detail = progress.get('detail', '') def cancel(self): self.updateproc.kill() @property def progress(self): return {'phase': self.phase, 'progress': self.percent, 'detail': self.detail} def remove_updates(nodes, tenant, element): if len(element) < 5: raise exc.InvalidArgumentException() upid = element[-1] for node in nodes: try: upd = updatesbytarget[(node, tenant)][upid] except KeyError: raise exc.NotFoundException('No active update matches request') upd.cancel() del updatesbytarget[(node, tenant)][upid] yield msg.DeletedResource( 'nodes/{0}/inventory/firmware/updates/active/{1}'.format( node, upid)) def list_updates(nodes, tenant, element): showmode = False if len(element) > 4: showmode = True upid = element[-1] for node in nodes: if showmode: try: updater = updatesbytarget[(node, tenant)][upid] except KeyError: raise exc.NotFoundException('No matching update process found') yield msg.KeyValueData(updater.progress, name=node) else: for updateid in updatesbytarget.get((node, tenant), {}): yield msg.ChildCollection(updateid)
Python
0
@@ -872,16 +872,29 @@ %0A + completion = handler @@ -940,16 +940,83 @@ ogress)%0A + if completion is None:%0A completion = 'complete'%0A @@ -1051,26 +1051,26 @@ phase': -' complet -e' +ion , 'progr
d4f2e9edf2b07cc224dbccc12e583a4bef2429dc
Load config directly in save_last_tweet
press_sec_bot_plus.py
press_sec_bot_plus.py
#!/usr/bin/env python import sys from datetime import date from ConfigParser import SafeConfigParser, NoOptionError, NoSectionError from subprocess import Popen, PIPE from distutils.spawn import find_executable from tempfile import NamedTemporaryFile from io import BytesIO import twitter import jinja2 from PIL import Image config_file='press_sec_bot_plus.conf' def load_config(): global config_file config = SafeConfigParser() if not config.read(config_file): print("Couldn't load configuration.") sys.exit(1) return config def save_config(config): with open(config_file, 'w') as f: config.write(f) def save_last_tweet(config, tweet_id): if not config.has_section('saved_state'): config.add_section('saved_state') config.set('saved_state', 'last_tweet_id', str(tweet_id)) save_config(config) def api_from_config(config): api = twitter.Api( consumer_key=config.get('twitter', 'consumer_key'), consumer_secret=config.get('twitter', 'consumer_secret'), access_token_key=config.get('twitter', 'access_token_key'), access_token_secret=config.get('twitter', 'access_token_secret'), tweet_mode='extended') return api def render_tweet_html(tweet): date_format = '%B %-d, %Y' context = { 'body': process_tweet_text(tweet), 'date': date.fromtimestamp(tweet.created_at_in_seconds).strftime(date_format) } return jinja2.Environment( loader=jinja2.FileSystemLoader('./') ).get_template('release_template.html').render(context) def process_tweet_text(tweet): text = tweet.full_text for url in tweet.urls: text = text.replace(url.url, url.expanded_url) for media in tweet.media or []: text = text.replace(media.url, '') return jinja2.Markup(text.replace('\n', '<br>').strip()) def html_to_png(html): command = ['wkhtmltoimage'] if not find_executable(command[0]): raise ImportError('%s not found' % command[0]) command += ['-f', 'png'] # format output as PNG command += ['--zoom', '2'] # retina image command += ['--width', '750'] # viewport 750px wide command += ['-'] # read from stdin command += ['-'] # write to stdout wkhtml_process = Popen(command, stdin=PIPE, stdout=PIPE, stderr=PIPE) (output, err) = wkhtml_process.communicate(input=html.encode('utf-8')) image = Image.open(BytesIO(output)) image = set_transparent_pixel(image) return image def set_transparent_pixel(image): pixel_location = (0,0) pixel_colour = (255,255,255,254) # nearly opaque white pixel image.putpixel(pixel_location, pixel_colour) return image def release_tweet(tweet, api): """Formats and publishes a Tweet to the account""" tweet_html = render_tweet_html(tweet) image = html_to_png(tweet_html) status = '' media = [] # Max 4 photos, or 1 video or 1 GIF for media_item in tweet.media or []: extra_media_url = 'https://twitter.com/%s/status/%d' % (tweet.user.screen_name, tweet.id) if media_item.type == 'video': status = '[Video: %s]' % extra_media_url if media_item.type == 'animated_gif': status = '[GIF: %s]' % extra_media_url if media_item.type == 'photo': if len(media) < 3: media.append(media_item.media_url_https) # Use large photo size if available if media_item.sizes.has_key('large'): media[-1] += ':large' else: if status != '': status += '\n' status += '[Photo: %s]' % extra_media_url print(status) print(media) with NamedTemporaryFile(suffix='.png') as png_file: image.save(png_file, format='PNG', dpi=(144,144)) media.insert(0, png_file) api.PostUpdate(status=status, media=media) def poll_for_updates(api, account_to_follow, starting_id=None, interval=300): """Gets new tweets and releases them every interval (seconds). If starting_id is provided, the initial run will start with all tweets since that id, up to a maximum of 200.""" from time import sleep latest_tweet_id = starting_id or api.GetUserTimeline(screen_name=account_to_follow, count=1)[0].id while True: new_tweets = api.GetUserTimeline(screen_name=account_to_follow, since_id=latest_tweet_id, count=200, include_rts=False) # process the list in reverse order, to preserve time-order for tweet in new_tweets[::-1]: release_tweet(tweet, api) if len(new_tweets) > 0: latest_tweet_id = new_tweets[0].id save_last_tweet(load_config(), latest_tweet_id) sleep(interval) def main(): config = load_config() api = api_from_config(config) account_to_follow = config.get('settings', 'account_to_follow') try: last_tweet_id = int(config.get('saved_state', 'last_tweet_id')) except (NoOptionError, NoSectionError) as e: last_tweet_id = None poll_for_updates(api, account_to_follow, last_tweet_id) if __name__ == "__main__": main()
Python
0.000001
@@ -669,16 +669,8 @@ eet( -config, twee @@ -675,16 +675,43 @@ eet_id): +%0A config = load_config() %0A if @@ -4734,23 +4734,8 @@ eet( -load_config(), late
26cb100e7e4782cdc4d7a55f6a096a9da2db2b5c
fix bug 1495003: add search to GraphicsDeviceAdmin
webapp-django/crashstats/crashstats/admin.py
webapp-django/crashstats/crashstats/admin.py
from django.contrib import admin from django.contrib.auth.admin import UserAdmin from django.contrib.admin.models import LogEntry, ADDITION, CHANGE, DELETION from crashstats.crashstats.models import ( GraphicsDevice, Signature, ) # Fix the Django Admin User list display so it shows the columns we care about UserAdmin.list_display = [ 'email', 'first_name', 'last_name', 'is_superuser', 'is_staff', 'is_active', 'date_joined', 'last_login' ] ACTION_TO_NAME = { ADDITION: 'add', CHANGE: 'change', DELETION: 'delete' } @admin.register(LogEntry) class LogEntryAdmin(admin.ModelAdmin): date_hierarchy = 'action_time' list_display = [ 'action_time', 'user_email', 'content_type', 'object_repr', 'action_name', 'get_change_message' ] def user_email(self, obj): return obj.user.email def action_name(self, obj): return ACTION_TO_NAME[obj.action_flag] def has_add_permission(self, request): return False def has_change_permission(self, request, obj=None): # FIXME(willkg): If this always returned False, then this modeladmin # doesn't show up in the index. However, this means you get a change # page that suggests you can change it, but errors out when saving. # # We can nix this and use has_view_permission when we upgrade to # Django 2.1. return request.method != 'POST' def has_delete_permission(self, request, obj=None): return False def has_module_permission(self, request): return True @admin.register(GraphicsDevice) class GraphicsDeviceAdmin(admin.ModelAdmin): list_display = [ 'id', 'vendor_hex', 'adapter_hex', 'vendor_name', 'adapter_name' ] @admin.register(Signature) class Signature(admin.ModelAdmin): list_display = [ 'signature', 'first_build', 'first_date' ]
Python
0
@@ -1837,16 +1837,135 @@ '%0A %5D%0A + search_fields = %5B%0A 'vendor_hex',%0A 'adapter_hex',%0A 'vendor_name',%0A 'adapter_name'%0A %5D%0A %0A%0A@admin @@ -2104,16 +2104,64 @@ rst_date'%0A %5D%0A + search_fields = %5B%0A 'signature'%0A %5D%0A
5ac72b84f943b30523b915bc1afd4432760513b1
Add a better __repr__ for syscalls
simuvex/procedures/syscalls/handler.py
simuvex/procedures/syscalls/handler.py
import simuvex import simuvex.s_cc import logging l = logging.getLogger('simuvex.procedures.syscalls') # TODO: per-OS and per-arch syscall_map = { } syscall_map['AMD64'] = { } syscall_map['AMD64'][60] = 'exit' syscall_map['AMD64'][0] = 'read' syscall_map['AMD64'][1] = 'write' syscall_map['AMD64'][2] = 'open' syscall_map['AMD64'][3] = 'close' #syscall_map['AMD64'][4] = 'stat' syscall_map['AMD64'][5] = 'fstat' syscall_map['AMD64'][6] = 'lstat' syscall_map['AMD64'][9] = 'mmap' syscall_map['AMD64'][231] = 'exit' # really exit_group, but close enough syscall_map['CGC'] = { } syscall_map['CGC'][1] = '_terminate' syscall_map['CGC'][2] = 'transmit' syscall_map['CGC'][3] = 'receive' syscall_map['CGC'][4] = 'fdwait' syscall_map['CGC'][5] = 'allocate' syscall_map['CGC'][6] = 'deallocate' syscall_map['CGC'][7] = 'random' class handler(simuvex.SimProcedure): def run(self): #pylint:disable=attribute-defined-outside-init syscall_num = self.syscall_num() maximum = self.state.posix.maximum_symbolic_syscalls possible = self.state.se.any_n_int(syscall_num, maximum+1) if len(possible) == 0: raise SimUnsatError("unsatisifiable state attempting to do a syscall") if len(possible) > maximum: l.warning("Too many possible syscalls. Concretizing to 1.") possible = possible[:1] l.debug("Possible syscall values: %s", possible) self.state.add_constraints(self.state.se.Or(*[syscall_num == n for n in possible])) for n in possible: if self.state.has_plugin('cgc'): map_name = 'CGC' syscall_lib = 'cgc' elif self.state.arch.name == 'X86': # FIXME: THIS IS A GIANT QUICK HACK FOR CGC SCORED EVENT 1! map_name = 'CGC' syscall_lib = 'cgc' else: map_name = self.state.arch.name syscall_lib = 'syscalls' if n not in syscall_map[map_name]: l.error("no syscall %d for arch %s", n, map_name) if simuvex.o.BYPASS_UNSUPPORTED_SYSCALL in self.state.options: self.state.log.add_event('resilience', resilience_type='syscall', syscall=n, message='unsupported syscall') return self.state.se.Unconstrained('syscall_%d' % n, self.state.arch.bits) else: raise simuvex.UnsupportedSyscallError("no syscall %d for arch %s", n, map_name) callname = syscall_map[map_name][n] l.debug("Routing to syscall %s", callname) cc = simuvex.s_cc.SyscallCC[self.state.arch.name](self.state.arch) self._syscall = simuvex.SimProcedures[syscall_lib][callname](self.state, ret_to=self.state.regs.ip, convention=cc) self.successors.extend(self._syscall.successors) self.flat_successors.extend(self._syscall.flat_successors) self.unsat_successors.extend(self._syscall.unsat_successors) @property def syscall(self): return self._syscall def syscall_num(self): if self.state.arch.name == 'AMD64': return self.state.regs.rax if self.state.arch.name == 'X86': return self.state.regs.eax if self.state.arch.name == 'MIPS32': return self.state.regs.v0 raise UnsupportedSyscallError("syscall_num is not implemented for architecture %s", self.state.arch.name) from ...s_errors import UnsupportedSyscallError from ...s_errors import SimUnsatError
Python
0.999329
@@ -929,16 +929,45 @@ de-init%0A + self.callname = None%0A @@ -1683,201 +1683,8 @@ gc'%0A - elif self.state.arch.name == 'X86':%0A # FIXME: THIS IS A GIANT QUICK HACK FOR CGC SCORED EVENT 1!%0A map_name = 'CGC'%0A syscall_lib = 'cgc'%0A @@ -2337,16 +2337,21 @@ +self. callname @@ -2423,16 +2423,21 @@ ll %25s%22, +self. callname @@ -2581,16 +2581,21 @@ ll_lib%5D%5B +self. callname @@ -2856,16 +2856,135 @@ ssors)%0A%0A + def __repr__(self):%0A return '%3CSyscall (%25s)%3E' %25 ('Unsupported' if self.callname is None else self.callname)%0A%0A @pro
912729a30f619ac4aa3ac8edbfc751fffb146256
Make port explicit in configuration so that it is more clear how to change to MySQL
appengine/flexible/django_cloudsql/mysite/settings.py
appengine/flexible/django_cloudsql/mysite/settings.py
# Copyright 2015 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'pf-@jxtojga)z+4s*uwbgjrq$aep62-thd0q7f&o77xtpka!_m' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True # SECURITY WARNING: App Engine's security features ensure that it is safe to # have ALLOWED_HOSTS = ['*'] when the app is deployed. If you deploy a Django # app not on App Engine, make sure to set an appropriate host here. # See https://docs.djangoproject.com/en/1.10/ref/settings/ ALLOWED_HOSTS = ['*'] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'polls' ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', ) ROOT_URLCONF = 'mysite.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'mysite.wsgi.application' # Database # https://docs.djangoproject.com/en/1.8/ref/settings/#databases # [START dbconfig] DATABASES = { 'default': { # If you are using Cloud SQL for MySQL rather than PostgreSQL, set # 'ENGINE': 'django.db.backends.mysql' instead of the following. 'ENGINE': 'django.db.backends.postgresql', 'NAME': 'polls', 'USER': '<your-database-user>', 'PASSWORD': '<your-database-password>', } } # In the flexible environment, you connect to CloudSQL using a unix socket. # Locally, you can use the CloudSQL proxy to proxy a localhost connection # to the instance DATABASES['default']['HOST'] = '/cloudsql/<your-cloudsql-connection-string>' if os.getenv('GAE_INSTANCE'): pass else: DATABASES['default']['HOST'] = '127.0.0.1' # [END dbconfig] # Internationalization # https://docs.djangoproject.com/en/1.8/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ # [START staticurl] # Fill in your cloud bucket and switch which one of the following 2 lines # is commented to serve static content from GCS # STATIC_URL = 'https://storage.googleapis.com/<your-gcs-bucket>/static/' STATIC_URL = '/static/' # [END staticurl] STATIC_ROOT = 'static/'
Python
0.000003
@@ -3112,24 +3112,200 @@ password%3E',%0A + # For MySQL, set 'PORT': '3306' instead of the following. Any Cloud%0A # SQL Proxy instances running locally must also be set to tcp:3306.%0A 'PORT': '5432',%0A %7D%0A%7D%0A# In
da15aa46a6906dda35e80111a873b6dbbc12dcef
Define properties
turbustat/statistics/scf/scf.py
turbustat/statistics/scf/scf.py
# Licensed under an MIT open source license - see LICENSE import numpy as np from ..psds import pspec class SCF(object): ''' Computes the Spectral Correlation Function of a data cube (Rosolowsky et al, 1999). Parameters ---------- cube : numpy.ndarray Data cube. size : int, optional Maximum size roll over which SCF will be calculated. ''' def __init__(self, cube, size=11): super(SCF, self).__init__() self.cube = cube if size % 2 == 0: print "Size must be odd. Reducing size to next lowest odd number." self.size = size - 1 else: self.size = size self.scf_surface = None def compute_surface(self): ''' Compute the SCF up to the given size. ''' self.scf_surface = np.zeros((self.size, self.size)) dx = np.arange(self.size) - self.size / 2 dy = np.arange(self.size) - self.size / 2 for i in dx: for j in dy: tmp = np.roll(self.cube, i, axis=1) tmp = np.roll(tmp, j, axis=2) values = np.nansum(((self.cube - tmp) ** 2), axis=0) / \ (np.nansum(self.cube ** 2, axis=0) + np.nansum(tmp ** 2, axis=0)) scf_value = 1. - \ np.sqrt(np.nansum(values) / np.sum(np.isfinite(values))) self.scf_surface[ i + self.size / 2, j + self.size / 2] = scf_value return self def compute_spectrum(self, logspacing=False, **kwargs): ''' Compute the 1D spectrum as a function of lag. Can optionally use log-spaced bins. kwargs are passed into the pspec function, which provides many options. The default settings are applicable in nearly all use cases. Parameters ---------- logspacing : bool, optional Return logarithmically spaced bins for the lags. ''' # If scf_surface hasn't been computed, do it if self.scf_surface is None: self.compute_surface() self.lags, self.scf_spectrum = \ pspec(self.scf_surface, logspacing=logspacing, **kwargs) def run(self, logspacing=False, verbose=False): ''' Computes the SCF. Necessary to maintain package standards. Parameters ---------- logspacing : bool, optional Return logarithmically spaced bins for the lags. verbose : bool, optional Enables plotting. ''' self.compute_surface() self.compute_spectrum() if verbose: import matplotlib.pyplot as p p.subplot(2, 1, 1) p.imshow(self.scf_surface, origin="lower", interpolation="nearest") p.colorbar() p.subplot(2, 1, 2) p.hist(self.scf_surface.ravel()) p.show() class SCF_Distance(object): ''' Calculates the distance between two data cubes based on their SCF surfaces. The distance is the L2 norm between the surfaces. We weight the surface by 1/r^2 where r is the distance from the centre. Parameters ---------- cube1 : numpy.ndarray Data cube. cube2 : numpy.ndarray Data cube. size : int, optional Maximum size roll over which SCF will be calculated. fiducial_model : SCF Computed SCF object. Use to avoid recomputing. weighted : bool, optional Sets whether to apply the 1/r^2 weighting to the distance. ''' def __init__(self, cube1, cube2, size=11, fiducial_model=None, weighted=True): super(SCF_Distance, self).__init__() self.cube1 = cube1 self.cube2 = cube2 self.size = size self.weighted = weighted if fiducial_model is not None: self.scf1 = fiducial_model else: self.scf1 = SCF(self.cube1, self.size) self.scf1.run() self.scf2 = SCF(self.cube2, self.size) self.scf2.run() self.distance = None def distance_metric(self, verbose=False): ''' Compute the distance between the surfaces. Parameters ---------- verbose : bool, optional Enables plotting. ''' dx = np.arange(self.size) - self.size / 2 dy = np.arange(self.size) - self.size / 2 a, b = np.meshgrid(dx, dy) if self.weighted: # Centre pixel set to 1 a[np.where(a == 0)] = 1. b[np.where(b == 0)] = 1. dist_weight = 1 / np.sqrt(a ** 2 + b ** 2) else: dist_weight = np.ones((self.size, self.size)) difference = ( (self.scf1.scf_surface - self.scf2.scf_surface) * dist_weight) ** 2. self.distance = np.sqrt( np.nansum(difference) / np.sum(np.isfinite(difference))) if verbose: import matplotlib.pyplot as p # print "Distance: %s" % (self.distance) p.subplot(1, 3, 1) p.imshow( self.scf1.scf_surface, origin="lower", interpolation="nearest") p.title("SCF1") p.colorbar() p.subplot(1, 3, 2) p.imshow( self.scf2.scf_surface, origin="lower", interpolation="nearest") p.title("SCF2") p.colorbar() p.subplot(1, 3, 3) p.imshow(difference, origin="lower", interpolation="nearest") p.title("Difference") p.colorbar() p.show() return self
Python
0.000214
@@ -676,32 +676,33 @@ e%0A%0A self. +_ scf_surface = No @@ -697,32 +697,245 @@ surface = None%0A%0A + @property%0A def scf_surface(self):%0A return self._scf_surface%0A%0A @property%0A def scf_spectrum(self):%0A return self._scf_spectrum%0A%0A @property%0A def lags(self):%0A return self._lags%0A%0A def compute_ @@ -1025,32 +1025,33 @@ '%0A%0A self. +_ scf_surface = np @@ -1637,24 +1637,25 @@ self. +_ scf_surface%5B @@ -1658,33 +1658,10 @@ ace%5B -%0A i + +i+ self @@ -1669,18 +1669,14 @@ size - / +/ 2, j - + ++ self @@ -1680,19 +1680,17 @@ elf.size - / +/ 2%5D = scf @@ -2325,16 +2325,17 @@ self. +_ lags, se @@ -2333,24 +2333,25 @@ _lags, self. +_ scf_spectrum
0d2079b1dcb97708dc55c32d9e2c1a0f12595875
Replace string substitution with string formatting
salt/runners/launchd.py
salt/runners/launchd.py
# -*- coding: utf-8 -*- ''' Manage launchd plist files ''' # Import python libs import os import sys def write_launchd_plist(program): ''' Write a launchd plist for managing salt-master or salt-minion CLI Example: .. code-block:: bash salt-run launchd.write_launchd_plist salt-master ''' plist_sample_text = """ <?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> <plist version="1.0"> <dict> <key>Label</key> <string>org.saltstack.{program}</string> <key>ProgramArguments</key> <array> <string>{python}</string> <string>{script}</string> </array> <key>RunAtLoad</key> <true/> </dict> </plist> """.strip() supported_programs = ['salt-master', 'salt-minion'] if program not in supported_programs: sys.stderr.write("Supported programs: %r\n" % supported_programs) sys.exit(-1) sys.stdout.write( plist_sample_text.format( program=program, python=sys.executable, script=os.path.join(os.path.dirname(sys.executable), program) ) )
Python
0.001018
@@ -339,19 +339,19 @@ _text = -%22%22%22 +''' %0A%3C?xml v @@ -769,11 +769,11 @@ -%22%22%22 +''' .str @@ -902,17 +902,17 @@ r.write( -%22 +' Supporte @@ -927,16 +927,24 @@ ms: -%25r%5Cn%22 %25 +%7B0!r%7D%5Cn'.format( supp @@ -958,16 +958,17 @@ rograms) +) %0A
b89a469a051f3844b90028182742d3dad36271ae
Format args
processors/slurper.py
processors/slurper.py
import os import json import datetime import time import requests import sqlalchemy as sa import logging import boto from boto.s3.key import Key from io import BytesIO from processors.config import DB_CONN, AWS_KEY, AWS_SECRET, S3_BUCKET, \ CARTODB_SETTINGS from processors.poll import poll class Slurper(object): def __init__(self): self.engine = sa.create_engine(DB_CONN) self.time_format = "%a %b %d %H:%M:%S %Z %Y" self.gps_data_url = "https://gisapps.cityofchicago.org/PlowTrackerWeb/services/plowtrackerservice/getTrackingData" self.route_points_table = sa.Table('route_points', sa.MetaData(), sa.Column('id', sa.Integer, primary_key=True), sa.Column('object_id', sa.Integer), sa.Column('posting_time', sa.DateTime), sa.Column('direction', sa.Integer), sa.Column('x', sa.Float), sa.Column('y', sa.Float), sa.Column('lat', sa.Float), sa.Column('lon', sa.Float), sa.Column('inserted', sa.Boolean, server_default=sa.text('FALSE')), sa.UniqueConstraint('object_id', 'posting_time')) self.assets_table = sa.Table('assets', sa.MetaData(), sa.Column('object_id', sa.Integer, primary_key=True), sa.Column('asset_name', sa.String), sa.Column('asset_type', sa.String)) def initializeDB(self, recreate=False): if recreate: self.backup() self.deleteFromCartoDB() self.route_points_table.drop(bind=self.engine, checkfirst=True) self.assets_table.drop(bind=self.engine, checkfirst=True) self.route_points_table.create(bind=self.engine, checkfirst=True) self.assets_table.create(bind=self.engine, checkfirst=True) def fetchData(self): def data() : payload = {"TrackingDataInput":{"envelope":{"minX":0, "minY":0, "maxX":0, "maxY":0}}} while True: try: response = requests.post(self.gps_data_url, data=json.dumps(payload)) except Exception as e : logging.warn(e) time.sleep(10) continue yield response.json() for locations in poll(data()) : try: yield locations['TrackingDataResponse']['locationList'] except KeyError : logging.warn("Expected 'TrackingResponse' and 'locationList' not in response") def insertPoints(self, route_points): # This is inside the loop as an act of perhaps irrational # defensive programming, as the script stopped updating the db for # no apparent reasons and without throwing an error. for route_point in route_points: point = {} (point['object_id'], asset_name, asset_type, point['posting_time'], point['direction'], point['x'], point['y'], point['lat'], point['lon']) = (int(route_point['assetName'].replace("S","")), # cast the assetName to an integer route_point['assetName'], route_point['assetType'], route_point['postingTimeFormatted'], route_point['directionDegrees'], route_point['XCoord'], route_point['YCoord'], route_point['latitude'], route_point['longitude']) point['posting_time'] = self.formatTime(point['posting_time']) conn = self.engine.connect() trans = conn.begin() try: conn.execute(self.route_points_table.insert(), **point) trans.commit() except sa.exc.IntegrityError: trans.rollback() trans = conn.begin() update_stmt = self.route_points_table.update()\ .where(self.route_points_table.c.object_id == point['object_id'])\ .where(self.route_points_table.c.posting_time == point['posting_time']) conn.execute(update_stmt.values(**point)) trans.commit() trans = conn.begin() try: asset_info = { 'object_id': point['object_id'], 'asset_name': asset_name, 'asset_type': asset_type } conn.execute(self.assets_table.insert(), **asset_info) trans.commit() except sa.exc.IntegrityError: trans.rollback() conn.close() self.engine.dispose() def formatTime(self, s) : return datetime.datetime(*time.strptime(s, self.time_format)[:6]) def run(self, recreate=False): self.initializeDB(recreate=recreate) for route_point in self.fetchData(): self.insertPoints(route_point) def backup(self): conn = self.engine.raw_connection() s3conn = boto.connect_s3(AWS_KEY, AWS_SECRET) bucket = s3conn.get_bucket(S3_BUCKET) now = datetime.datetime.now().strftime('%m-%d-%Y_%H:%M') for table in ['route_points', 'assets']: copy = ''' COPY (SELECT * FROM {table}) TO STDOUT WITH CSV HEADER DELIMITER ',' '''.format(table=table) fname = 'backups/{now}_{table}.csv'.format(now=now, table=table) with open(fname, 'w') as f: curs = conn.cursor() curs.copy_expert(copy, f) key = Key(bucket) key.key = fname key.set_contents_from_filename(fname) key.set_acl('public-read') conn.close() params = { 'q': 'SELECT * FROM {table}'.format(CARTODB_SETTINGS['table']), 'format': 'geojson', } url = 'https://{user}.cartodb.com/api/v2/sql'.format(user=CARTODB_SETTINGS['user']) geojson_dump = requests.get(url, params=params) key.key = 'backups/cartodb_{now}.geojson'.format(now=now) key.set_contents_from_file(BytesIO(geojson_dump.content)) key.set_acl('public-read') s3conn.close() def deleteFromCartoDB(self): params = { 'q': 'DELETE * FROM {table}'.format(CARTODB_SETTINGS['table']), 'api_key': CARTODB_SETTINGS['api_key'], } url = 'https://{user}.cartodb.com/api/v2/sql'.format(user=CARTODB_SETTINGS['user']) delete = requests.get(url, params=params) class TestSlurper(Slurper) : def fetchData(self): from os.path import abspath, join, dirname test_feed_dir = abspath(join(dirname(__file__), '..', 'test_data')) for test_file in sorted(os.listdir(test_feed_dir)): test_file_path = abspath(join(test_feed_dir, test_file)) test_feed = json.load(open(test_file_path)) yield test_feed['TrackingDataResponse']['locationList'] def writeRawResponse(self): now = int(datetime.datetime.now().timestamp()) response = next(self.fetchData()) with open('%s.json' % now, 'w') as f: f.write(json.dumps(response))
Python
0.000045
@@ -6976,32 +6976,38 @@ %7Btable%7D'.format( +table= CARTODB_SETTINGS
d967505ea9db8af0286abe1959a8fcba556b2d7a
add setting the slave of a specified master on a sentinel
salt/states/redismod.py
salt/states/redismod.py
# -*- coding: utf-8 -*- ''' Management of Redis server ========================== .. versionadded:: 2014.7.0 :depends: - redis Python module :configuration: See :py:mod:`salt.modules.redis` for setup instructions. .. code-block:: yaml key_in_redis: redis.string: - value: string data The redis server information specified in the minion config file can be overridden in states using the following arguments: ``host``, ``post``, ``db``, ``password``. .. code-block:: yaml key_in_redis: redis.string: - value: string data - host: localhost - port: 6379 - db: 0 - password: somuchkittycat ''' __virtualname__ = 'redis' def __virtual__(): ''' Only load if the redis module is in __salt__ ''' if 'redis.set_key' in __salt__: return __virtualname__ return False def string(name, value, expire=None, expireat=None, **connection_args): ''' Ensure that the key exists in redis with the value specified name Redis key to manage value Data to persist in key expire Sets time to live for key in seconds expireat Sets expiration time for key via UNIX timestamp, overrides `expire` ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': 'Key already set to defined value'} old_key = __salt__['redis.get_key'](name, **connection_args) if old_key != value: __salt__['redis.set_key'](name, value, **connection_args) ret['changes'][name] = 'Value updated' ret['comment'] = 'Key updated to new value' if expireat: __salt__['redis.expireat'](name, expireat, **connection_args) ret['changes']['expireat'] = 'Key expires at {0}'.format(expireat) elif expire: __salt__['redis.expire'](name, expire, **connection_args) ret['changes']['expire'] = 'TTL set to {0} seconds'.format(expire) return ret def absent(name, keys=None, **connection_args): ''' Ensure key absent from redis name Key to ensure absent from redis keys list of keys to ensure absent, name will be ignored if this is used ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': 'Key(s) specified already absent'} if keys: if not isinstance(keys, list): ret['result'] = False ret['comment'] = '`keys` not formed as a list type' return ret delete_list = [key for key in keys if __salt__['redis.exists'](key, **connection_args)] if not len(delete_list): return ret __salt__['redis.delete'](*delete_list, **connection_args) ret['changes']['deleted'] = delete_list ret['comment'] = 'Keys deleted' return ret if __salt__['redis.exists'](name, **connection_args): __salt__['redis.delete'](name, **connection_args) ret['comment'] = 'Key deleted' ret['changes']['deleted'] = [name] return ret
Python
0.000001
@@ -659,16 +659,28 @@ ycat%0A''' +%0Aimport copy %0A%0A__virt @@ -3071,24 +3071,1696 @@ = %5Bname%5D%0A return ret%0A +%0A%0Adef slaveof(name, sentinel_host=None, sentinel_port=None, sentinel_password=None, **connection_args):%0A '''%0A Set this redis instance as a slave.%0A %0A name%0A Master to make this a slave of%0A%0A sentinel_host%0A Ip of the sentinel to check for the master%0A%0A sentinel_port%0A Port of the sentinel to check for the master%0A%0A '''%0A ret = %7B'name': name,%0A 'changes': %7B%7D,%0A 'result': False,%0A 'comment': 'Failed to setup slave'%7D%0A%0A kwargs = copy.copy(connection_args)%0A sentinel_master = __salt__%5B'redis.sentinel_get_master_ip'%5D(name, sentinel_host, sentinel_port, sentinel_password)%0A if sentinel_master%5B'master_host'%5D in __salt__%5B'network.ip_addrs'%5D():%0A ret%5B'result'%5D = True%0A ret%5B'comment'%5D = 'Minion is the master: '.format(name)%0A return ret%0A%0A first_master = __salt__%5B'redis.get_master_ip'%5D(**connection_args)%0A if first_master == sentinel_master:%0A ret%5B'result'%5D = True%0A ret%5B'comment'%5D = 'Minion already slave of master: %7B0%7D'.format(name)%0A return ret%0A%0A%0A if __opts__%5B'test'%5D == True:%0A ret%5B'comment'%5D = 'Minion will be made a slave of %7B0%7D: %7B1%7D'.format(name, sentinel_master%5B'host'%5D)%0A ret%5B'result'%5D = None%0A return ret%0A%0A kwargs.update(**sentinel_master)%0A __salt__%5B'redis.slaveof'%5D(**kwargs)%0A%0A current_master = __salt__%5B'redis.get_master_ip'%5D(**connection_args)%0A if current_master != sentinel_master:%0A return ret%0A%0A ret%5B'result'%5D = True%0A ret%5B'changes'%5D = %7B%0A 'old': first_master,%0A 'new': current_master,%0A %7D%0A ret%5B'comment'%5D = 'Minion successfully connected to master: %7B0%7D'.format(name)%0A%0A return ret%0A
73f9703abc355f91fb4c4530f73d9310cb501b99
bring in line with changes to OneSampleIterator
lib/neuroimaging/algorithms/onesample.py
lib/neuroimaging/algorithms/onesample.py
import os, types import numpy as N from neuroimaging import traits from neuroimaging.core.image.image import Image, ImageSequenceIterator from neuroimaging.algorithms.statistics import onesample from neuroimaging.algorithms.statistics.regression import RegressionOutput class ImageOneSample(onesample.OneSampleIterator): """ Fit a one sample t to a sequence of images. Input should be either a sequence of images (in which case variances are treated as equal) or a sequence of pairs of images and weights (in which case the variance of each image is a function of the \'weight\' image). The \'weight\' image can be a 'std', 'var', or 'weight' -- the appropriate transform will be applied. """ all = traits.false haveW = traits.false t = traits.true sd = traits.true mean = traits.true clobber = traits.false path = traits.Str('onesample') basename = traits.Str() ext = traits.Str('.hdr') varatioimg = traits.Any() est_varatio = traits.true varfiximg = traits.Any() est_varfix = traits.true which = traits.Trait('mean', 'varatio') def weights(self): ## TO DO: rename this methods, something like "getinput" if self.haveW: w = self.witerator.next(value=self.iterator.grid.itervalue()) else: return 1. if self.varatioimg is not None: value = self.iterator.grid.itervalue() self.varatio = self.varatioimg.next(value=value) else: self.varatio = 1. if self.varfiximg is not None: value = self.iterator.grid.itervalue() self.varfix = self.varfiximg.next(value=value) else: self.varfix = 0. return w def __init__(self, input, outputs=[], **keywords): traits.HasTraits.__init__(self, **keywords) if type(input[0]) in [types.ListType, types.TupleType]: self.haveW = True imgs = [val[0] for val in input] wimgs = [val[1] for val in input] self.iterator = ImageSequenceIterator(imgs) ## don't know if this should go here.... #if self.all: # self.iterator.grid.itertype = 'all' # self.iterator.grid = iter(self.iterator.grid) self.witerator = ImageSequenceIterator(wimgs, grid=self.iterator.grid) else: self.iterator = ImageSequenceIterator(input) onesample.OneSampleIterator.__init__(self, self.iterator, outputs=outputs, **keywords) self.outputs = outputs if self.which == 'mean': if self.t: self.outputs.append(TOutput(self.iterator.grid, path=self.path, clobber=self.clobber, ext=self.ext)) if self.sd: self.outputs.append(SdOutput(self.iterator.grid, path=self.path, clobber=self.clobber, ext=self.ext)) if self.mean: self.outputs.append(MeanOutput(self.iterator.grid, path=self.path, clobber=self.clobber, ext=self.ext)) else: if self.est_varatio: self.outputs.append(VaratioOutput(self.iterator.grid, path=self.path, clobber=self.clobber, ext=self.ext)) if self.est_varfix: self.outputs.append(VarfixOutput(self.iterator.grid, path=self.path, clobber=self.clobber, ext=self.ext)) def fit(self): onesample.OneSampleIterator.fit(self, which=self.which) class ImageOneSampleOutput(RegressionOutput): """ A class to output things a one sample T passes through data. It uses the image\'s iterator values to output to an image. """ nout = traits.Int(1) clobber = traits.false path = traits.Str('onesample') ext = traits.Str('.img') def __init__(self, grid, basename="", **keywords): RegressionOutput.__init__(self, **keywords) self.basename = basename self.grid = grid if not os.path.exists(self.path): os.makedirs(self.path) self.img = iter(Image('%s/%s%s' % (self.path, self.basename, self.ext), mode='w', clobber=self.clobber, grid=grid)) def sync_grid(self, img=None): """ Synchronize an image's grid iterator to self.grid's iterator. """ if img is None: img = self.img img.grid._iterguy = self.grid._iterguy iter(img) def __iter__(self): return self def next(self, data=None): value = self.grid.itervalue() self.img.next(data=data, value=value) def extract(self, results): raise NotImplementedError class TOutput(ImageOneSampleOutput): Tmax = 100. Tmin = -100. def __init__(self, grid, **keywords): ImageOneSampleOutput(self, grid, 't', **keywords) def extract(self, results): return N.clip(results.t, self.Tmin, self.Tmax) class SdOutput(ImageOneSampleOutput): def __init__(self, grid, **keywords): ImageOneSampleOutput(self, grid, 'sd', **keywords) def extract(self, results): return results.sd class MeanOutput(ImageOneSampleOutput): def __init__(self, grid, **keywords): ImageOneSampleOutput(self, grid, 'effect', **keywords) def extract(self, results): return results.mu class VaratioOutput(ImageOneSampleOutput): def __init__(self, grid, **keywords): ImageOneSampleOutput(self, grid, 'varatio', **keywords) def extract(self, results): return results.varatio class VarfixOutput(ImageOneSampleOutput): def __init__(self, grid, **keywords): ImageOneSampleOutput(self, grid, 'varfix', **keywords) def extract(self, results): return results.varfix
Python
0
@@ -314,16 +314,34 @@ Iterator +, traits.HasTraits ):%0A %0A @@ -3652,32 +3652,39 @@ (self):%0A +return onesample.OneSam @@ -4276,16 +4276,17 @@ f.path)%0A +%0A
75d9f6339ad7c7cd92bd268277979aa444cd4b59
Normalized the unicode lemma
dnnwsd/corpus/base.py
dnnwsd/corpus/base.py
# -*- coding: utf-8 -*- import os import unicodedata _tokens_with_symbols = { u"ee.uu.": u"EEUU", u"estados_unidos": u"EEUU", u"u.s.a.": u"USA", u"sr.": u"sr", u"sra.": u"sra", u"srta.": u"srta", u"a.m.": u"am", u"p.m.": u"pm" } class Word(object): def __init__(self, token, tag=None, lemma=None, is_main_verb=False): assert isinstance(token, unicode) and (lemma is None or isinstance(lemma, unicode)) self.token = token if token.lower() not in _tokens_with_symbols else _tokens_with_symbols[token.lower()] self.tag = tag self.lemma = lemma self.is_main_verb = is_main_verb def __unicode__(self): tag = self.tag if self.tag else u"" lemma = self.lemma if self.lemma else u"" verb = u"verb" if self.is_main_verb else u"" return u"{} {} {} {}".format(self.token, lemma, tag, verb).strip() def __str__(self): return unicode(self).encode("utf-8") def __repr__(self): return str(self) def tokens_and_lemmas(self): """ Method to get different combinations of tokens and lemmas, with different use of capital letters in order to look for it in a embedding model. :return: A list of all possible combinations of a token or a lemma, ordered by importance """ return [ self.token, self.token.lower(), self.token.capitalize(), self.token.upper(), self.lemma, self.lemma.capitalize(), self.lemma.upper() ] class Sentence(object): def __init__(self, words, predicate_index, sense=u'?'): assert isinstance(sense, unicode) self._words = words """:type : list of dnnwsd.corpus.base.Word""" self.predicate_index = predicate_index """:type : int""" self.sense = sense """:type : unicode""" def __iter__(self): for word in self._words: yield word def __getitem__(self, item): return self._words[item] def __unicode__(self): return "\n".join(map(lambda (i, w): u"{:03d} {}".format(i, unicode(w)), enumerate(self))) def __str__(self): return unicode(self).encode("utf-8") def __repr__(self): return str(self) def __len__(self): return len(self._words) def predicate_window(self, window_size): """ Gives the window around the predicate. If the window size is zero or less returns all the words. :param window_size: Size of the window :return: Words in the window. """ window_size = len(self) if window_size <= 0 else window_size start = max(0, self.predicate_index - window_size) end = min(len(self), self.predicate_index + window_size + 1) return self._words[start:end] def predicate(self): return self._words[self.predicate_index] class CorpusDirectoryIterator(object): def __init__(self, corpus_dir): self._corpus_dir = corpus_dir self.verbs = [] self.__get_verbs__() def __get_verbs__(self): with open(os.path.join(self._corpus_dir, "verbs"), "r") as f: self.verbs = unicodedata.normalize("NFC", f.read().decode("utf-8")).strip().split("\n") def __iter__(self): raise NotImplementedError class Corpus(object): def __init__(self, lemma): assert isinstance(lemma, unicode) self.lemma = lemma self._sentences = [] """:type : list of dnnwsd.corpus.base.Sentence""" def __iter__(self): for sentence in self._sentences: yield sentence def __getitem__(self, item): return self._sentences[item] def __unicode__(self): return "\n\n".join(map(lambda s: unicode(s), self._sentences)) def __str__(self): return unicode(self).encode("utf-8") def __repr__(self): return str(self) def __len__(self): return len(self._sentences) def tokens(self, window_size=0): """ Method to return all the tokens for every predicate window of every sentence. Useful to get collocations. :param window_size: Size of the window. If 0, return all tokens in the sentence. :return: A list of the tokens. """ for sentence in self: for word in sentence.predicate_window(window_size): yield word.token
Python
0.999994
@@ -3478,38 +3478,68 @@ self.lemma = + unicodedata.normalize(%22NFC%22, lemma +) %0A self._s
a2fff1b9613d6c4349fea5001313bf9e0d1cc052
remove unnecessary import
pergenie/lib/utils/demo.py
pergenie/lib/utils/demo.py
from uuid import uuid4 from datetime import timedelta from django.core.management.base import BaseCommand, CommandError from django.db import models, transaction from django.utils import timezone from django.conf import settings from apps.authentication.models import User from apps.genome.models import Genome from lib.utils import clogging log = clogging.getColorLogger(__name__) # TODO: @transaction.atomic def create_demo_user(): '''Create demo user records. - demo Genome is defined as: - owner = one of the admin users - file_name = settings.DEMO_GENOME_FILE_NAME - demo User is defined as: - is_demo = True ''' admin_user = User.objects.filter(is_admin=True).last() if not admin_user: raise Exception, '[FATAL] Before create demo user, you need to create admin user: $ python manage.py createsuperuser' # Init demo genome (once) genome, is_created = Genome.objects.get_or_create(owner=admin_user, file_name=settings.DEMO_GENOME_FILE_NAME, display_name='Demo VCF', file_format=Genome.FILE_FORMAT_VCF, population=Genome.POPULATION_UNKNOWN, sex=Genome.SEX_UNKNOWN) # TODO: Init demo genotype (once) # Init demo user email = '{}@{}'.format(uuid4(), settings.DOMAIN) demo_user = User.objects.create_user(username=email, email=email, password='', is_demo=True) genome.readers.add(demo_user) return demo_user def prune_demo_user(): '''Prune old (not logged in 30 days) demo user records. ''' date_30_days_ago = timezone.now() - timedelta(30) not_logged_in_30_days_demo_users = User.objects.filter(is_demo=True, last_login__lt=date_30_days_ago) admin_users = User.objects.filter(is_admin=True) demo_genomes = Genome.objects.filter(owner__in=admin_users, file_name=settings.DEMO_GENOME_FILE_NAME) for genome in demo_genomes: for user in not_logged_in_30_days_demo_users: if user in genome.readers.all(): genome.readers.remove(user) not_logged_in_30_days_demo_users.delete()
Python
0.000037
@@ -52,116 +52,8 @@ ta%0A%0A -from django.core.management.base import BaseCommand, CommandError%0Afrom django.db import models, transaction%0A from
be7e89bf4a6c784963b43052264ec0a6346c7958
correct tests
pgmpy/tests/test_Factor.py
pgmpy/tests/test_Factor.py
import unittest from pgmpy.Factor import Factor from pgmpy.tests import help_functions as hf from collections import OrderedDict import numpy.testing as np_test import numpy as np from pgmpy import Exceptions class TestFactorInit(unittest.TestCase): def test_class_init(self): phi = Factor(['x1', 'x2', 'x3'], [2, 2, 2], np.ones(8)) dic = {'x1': ['x1_0', 'x1_1'], 'x2': ['x2_0', 'x2_1'], 'x3': ['x3_0', 'x3_1']} hf.assertOrderedDictEqual(phi.variables, OrderedDict(sorted(dic.items(), key=lambda t: t[1]))) np_test.assert_array_equal(phi.cardinality, np.array([2, 2, 2])) np_test.assert_array_equal(phi.values, np.ones(8)) def test_class_init_sizeerror(self): self.assertRaises(Exceptions.SizeError, Factor, ['x1', 'x2', 'x3'], [2, 2, 2], np.ones(9)) class TestFactorMethods(unittest.TestCase): def setUp(self): self.phi = Factor(['x1', 'x2', 'x3'], [2, 2, 2], np.random.uniform(5, 10, size=8)) self.phi1 = Factor(['x1', 'x2', 'x3'], [2, 3, 2], range(12)) def test_assignment(self): self.assertListEqual(self.phi.assignment([0]), [['x1_0', 'x2_0', 'x3_0']]) self.assertListEqual(self.phi.assignment([4, 5, 6]), [['x1_0', 'x2_0', 'x3_1'], ['x1_1', 'x2_0', 'x3_1'], ['x1_0', 'x2_1', 'x3_1']]) self.assertListEqual(self.phi.assignment(np.array([4, 5, 6])), [['x1_0', 'x2_0', 'x3_1'], ['x1_1', 'x2_0', 'x3_1'], ['x1_0', 'x2_1', 'x3_1']]) def test_assignment_indexerror(self): self.assertRaises(IndexError, self.phi.assignment, [10]) self.assertRaises(IndexError, self.phi.assignment, [1, 3, 10, 5]) self.assertRaises(IndexError, self.phi.assignment, np.array([1, 3, 10, 5])) def test_get_cardinality(self): self.assertEqual(self.phi.get_cardinality('x1'), 2) self.assertEqual(self.phi.get_cardinality('x2'), 2) self.assertEqual(self.phi.get_cardinality('x3'), 2) def test_get_cardinality_scopeerror(self): self.assertRaises(Exceptions.ScopeError, self.phi.get_cardinality, 'x4') def test_marginalize(self): self.phi1.marginalize('x1') np_test.assert_array_equal(self.phi1.values, np.array([1, 5, 9, 13, 17, 21])) self.phi1.marginalize(['x2']) np_test.assert_array_equal(self.phi1.values, np.array([15, 51])) self.phi1.marginalize('x3') np_test.assert_array_equal(self.phi1.values, np.array([66])) def test_marginalize_scopeerror(self): self.assertRaises(Exceptions.ScopeError, self.phi.marginalize, 'x4') self.assertRaises(Exceptions.ScopeError, self.phi.marginalize, ['x4']) self.phi.marginalize('x1') self.assertRaises(Exceptions.ScopeError, self.phi.marginalize, 'x1') def test_normalize(self): self.phi1.normalize() np_test.assert_almost_equal(self.phi1.values, np.array( [0, 0.01515152, 0.03030303, 0.04545455, 0.06060606, 0.07575758, 0.09090909, 0.10606061, 0.12121212, 0.13636364, 0.15151515, 0.16666667])) def test_reduce(self): self.phi1.reduce(['x1_0', 'x2_0']) np_test.assert_array_equal(self.phi1.values, np.array([0, 6])) def test_reduce_typeerror(self): self.assertRaises(TypeError, self.phi1.reduce, 'x10') self.assertRaises(TypeError, self.phi1.reduce, ['x10']) def test_reduce_scopeerror(self): self.assertRaises(Exceptions.ScopeError, self.phi1.reduce, 'x4_1') def test_reduce_sizeerror(self): self.assertRaises(Exceptions.SizeError, self.phi1.reduce, 'x3_5') def factor_product(self): from pgmpy import Factor phi = Factor.Factor(['x1', 'x2'], [2, 2], range(4)) phi1 = Factor.Factor(['x3', 'x4'], [2, 2], range(4)) factor_product = Factor.factor_product(phi, phi1) np_test.assert_array_equal(factor_product.values, np.array([0, 0, 0, 0, 0, 1, 2, 3, 0, 2, 4, 6, 0, 3, 6, 9])) self.assertEqual(factor_product.variables, OrderedDict([ ('x1', ['x1_0', 'x1_1']), ('x2', ['x2_0', 'x2_1']), ('x3', ['x3_0', 'x3_1']), ('x4', ['x4_0', 'x4_1'])] )) phi = Factor.Factor(['x1', 'x2'], [3, 2], range(6)) phi1 = Factor.Factor(['x2', 'x3'], [2, 2], range(4)) factor_product = Factor.factor_product(phi, phi1) np_test.assert_array_equal(factor_product.values, np.array([0, 1, 0, 3, 0, 5, 0, 3, 4, 9, 8, 15])) self.assertEqual(factor_product.variables, OrderedDict( [('x1', ['x1_0', 'x1_1', 'x1_2']), ('x2', ['x2_0', 'x2_1']), ('x3', ['x3_0', 'x3_1'])])) def tearDown(self): del self.phi del self.phi1
Python
0.00001
@@ -45,25 +45,8 @@ tor%0A -from pgmpy.tests impo
a2a6b336295e65d29881e83ba45e1758c4582bbb
add available filters
corehq/apps/reports/standard/users/reports.py
corehq/apps/reports/standard/users/reports.py
from django.utils.translation import ugettext as _ from django.utils.translation import ugettext_lazy from memoized import memoized from corehq.apps.reports.datatables import DataTablesColumn, DataTablesHeader from corehq.apps.reports.dispatcher import UserManagementReportDispatcher from corehq.apps.reports.generic import GenericTabularReport from corehq.apps.reports.standard import ProjectReport from corehq.apps.users.models import UserHistory class UserHistoryReport(GenericTabularReport, ProjectReport): slug = 'user_history' name = ugettext_lazy("User History") section_name = ugettext_lazy("User Management") dispatcher = UserManagementReportDispatcher # ToDo: Add filters fields = [] description = ugettext_lazy("History of user updates") ajax_pagination = True sortable = False @property def headers(self): # ToDo: Add headers h = [ DataTablesColumn(_("User")), ] return DataTablesHeader(*h) @property def total_records(self): return self._get_queryset().count() @memoized def _get_queryset(self): # ToDo: add query based on params return UserHistory.objects.none() @property def rows(self): records = self._get_queryset().order_by('-changed_at')[ self.pagination.start:self.pagination.start + self.pagination.count ] for record in records: yield _user_history_row(record) def _user_history_row(record): # ToDo: add render for each row return []
Python
0
@@ -380,16 +380,31 @@ d import + DatespanMixin, Project @@ -406,24 +406,85 @@ ojectReport%0A +from corehq.apps.reports.util import datespan_from_beginning%0A from corehq. @@ -546,16 +546,31 @@ yReport( +DatespanMixin, GenericT @@ -784,16 +784,24 @@ Do: Add +pending filters%0A @@ -818,118 +818,376 @@ = %5B -%5D%0A%0A description = ugettext_lazy(%22History of user updates%22)%0A ajax_pagination = True%0A%0A sortable = False +%0A 'corehq.apps.reports.filters.users.ExpandedMobileWorkerFilter',%0A 'corehq.apps.reports.filters.dates.DatespanFilter',%0A %5D%0A%0A description = ugettext_lazy(%22History of user updates%22)%0A ajax_pagination = True%0A%0A sortable = False%0A%0A @property%0A def default_datespan(self):%0A return datespan_from_beginning(self.domain_object, self.timezone) %0A%0A
e1f6e98d7e3a1840567b1b5e379f87ec1e0aa9dc
add two more views
connector8/__openerp__.py
connector8/__openerp__.py
# -*- coding: utf-8 -*- {'name': 'Connector8', 'version': '0.1', 'author': 'Openerp Connector Core Editors and Amdeb', 'license': 'AGPL-3', 'category': 'Generic Modules', 'description': """ This is a port of OCA connector to Odoo 8.0 """, 'depends': ['mail' ], 'data': ['security/connector_security.xml', 'security/ir.model.access.csv', 'queue/model_view.xml', 'queue/queue_data.xml', 'checkpoint/checkpoint_view.xml', 'res_partner_view.xml', ], 'installable': True, 'application': True, }
Python
0
@@ -465,32 +465,94 @@ oint_view.xml',%0A + 'connector_menu.xml',%0A 'setting_view.xml',%0A 'res_p
7369244fbfcda67e1b14ebedd9fb9467fe5d8870
Update module list wizard should not miss search view of Modules
bin/addons/base/module/wizard/wizard_update_module.py
bin/addons/base/module/wizard/wizard_update_module.py
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import wizard import netsvc import pooler class wizard_update_module(wizard.interface): arch = '''<?xml version="1.0"?> <form string="Scan for new modules"> <label string="This function will check for new modules in the 'addons' path" colspan="4" align="0.0"/> </form>''' fields = { } arch_module = '''<?xml version="1.0"?> <form string="New modules"> <field name="update" colspan="4"/> <field name="add" colspan="4"/> </form>''' fields_module = { 'update': {'type': 'integer', 'string': 'Number of modules updated', 'readonly': True}, 'add': {'type': 'integer', 'string': 'Number of modules added', 'readonly': True}, } def _update_module(self, cr, uid, data, context): update, add = pooler.get_pool(cr.dbname).get('ir.module.module').update_list(cr, uid) return {'update': update, 'add': add} def _action_module_open(self, cr, uid, data, context): return { 'domain': str([]), 'name': 'Module List', 'view_type': 'form', 'view_mode': 'tree,form', 'res_model': 'ir.module.module', 'view_id': False, 'type': 'ir.actions.act_window' } states = { 'init': { 'actions': [], 'result': {'type': 'form', 'arch': arch, 'fields': fields, 'state': [ ('end', 'Cancel', 'gtk-cancel'), ('update', 'Check new modules', 'gtk-ok', True) ] } }, 'update': { 'actions': [_update_module], 'result': {'type': 'form', 'arch': arch_module, 'fields': fields_module, 'state': [ ('open_window', 'Ok', 'gtk-ok', True) ] } }, 'open_window': { 'actions': [], 'result': {'type': 'action', 'action': _action_module_open, 'state':'end'} } } wizard_update_module('module.module.update') # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
Python
0
@@ -1958,22 +1958,30 @@ -return +%0A res = %7B%0A @@ -2241,17 +2241,283 @@ %7D%0A -%0A + %0A search_view_id = pooler.get_pool(cr.dbname).get('ir.ui.view').search(cr, uid, %5B('name','=','ir.module.module.list.select')%5D, context=context)%0A if search_view_id:%0A res.update(%7B'search_view_id' : search_view_id%5B0%5D%7D)%0A return res %0A%0A st
199f0f3e8e02f53f570e5463cc4350568771395f
Refactor tasks.py
edx_shopify/tasks.py
edx_shopify/tasks.py
from celery import Task from celery.utils.log import get_task_logger from .models import Order, OrderItem from .utils import auto_enroll_email logger = get_task_logger(__name__) class ProcessOrder(Task): """ Process order creation event. """ def run(self, data): logger.debug('Processing order data: %s' % data) order = Order.objects.get(id=data['id']) # If the order is anything but UNPROCESSED, abandon the attempt. if order.status != Order.UNPROCESSED: logger.warning('Order %s has already ' 'been processed, ignoring' % order.id) return # Mark the order as being processed. order.status = Order.PROCESSING order.save() # Process line items order_error = False for item in data['line_items']: logger.debug('Processing line item: %s' % item) try: sku = item['sku'] email = next( p['value'] for p in item['properties'] if p['name'] == 'email' ) except (KeyError, StopIteration): order_error = True logger.error('Malformed line item %s in order %s, ' 'unable to process' % (item, order.id)) continue # Store line item order_item, created = OrderItem.objects.get_or_create( order=order, sku=sku, email=email ) if order_item.status == OrderItem.UNPROCESSED: try: # Enroll the email in the course auto_enroll_email(sku, email) except: logger.error('Unable to enroll ' '%s in %s' % (email, sku)) order_error = True order_item.status = OrderItem.ERROR order_item.save() continue # Mark the item as processed order_item.status = OrderItem.PROCESSED order_item.save() logger.debug('Successfully processed line item ' '%s for order %s' % (item, order.id)) elif order_item.status == OrderItem.ERROR: order_error = True # Mark the order status if order_error: order.status = Order.ERROR logger.error('Failed to fully ' 'process order %s' % order.id) else: order.status = Order.PROCESSED logger.error('Successfully processed ' 'order %s' % order.id) order.save()
Python
0
@@ -212,76 +212,544 @@ %22%22%22 -%0A Process order creation event.%0A%0A +Process a newly received order, and enroll learners in courses%0A using their email address.%0A%0A On failure, store the order in an ERROR state.%0A %22%22%22%0A%0A def __init__(self):%0A %22%22%22Set up an order as an instance member, so we can manipulate it both%0A from run() and from on_failure().%0A + %22%22%22%0A - def run(self, data): +%0A self.order = None%0A%0A def run(self, data):%0A %22%22%22Parse input data for line items, and create enrollments.%0A%0A On any error, raise the exception in order to be handled by%0A on_failure().%0A %22%22%22%0A %0A @@ -806,24 +806,29 @@ ta)%0A +self. order = Orde @@ -937,24 +937,29 @@ %0A if +self. order.status @@ -1084,24 +1084,29 @@ ignoring' %25 +self. order.id)%0A @@ -1168,32 +1168,37 @@ cessed.%0A +self. order.status = O @@ -1213,32 +1213,37 @@ CESSING%0A +self. order.save()%0A%0A @@ -1273,36 +1273,8 @@ ems%0A - order_error = False%0A @@ -1593,70 +1593,9 @@ cept - (KeyError, StopIteration):%0A order_error = True +: %0A @@ -1709,32 +1709,37 @@ ocess' %25 (item, +self. order.id))%0A @@ -1741,39 +1741,36 @@ -continu +rais e%0A%0A # @@ -1875,16 +1875,21 @@ order= +self. order,%0A @@ -2299,148 +2299,12 @@ -order_error = True%0A order_item.status = OrderItem.ERROR%0A order_item.save()%0A continu +rais e%0A%0A @@ -2558,16 +2558,21 @@ (item, +self. order.id @@ -2587,27 +2587,50 @@ - elif order_item +# Mark the order status%0A self.order .sta @@ -2638,162 +2638,429 @@ us = -= Order -Item.ERROR:%0A order_ +.PROCESSED%0A logger. error - = True%0A%0A # Mark the order status%0A if order_error:%0A order.status = Order.ERROR%0A +('Successfully processed '%0A 'order %25s' %25 self.order.id)%0A self.order.save()%0A%0A def on_failure(self, exc, task_id, args, kwargs, einfo):%0A %22%22%22Handle the run() method having raised an exception: log an%0A exception stack trace and a prose message, save the order with%0A an ERROR status.%0A %22%22%22%0A%0A logger.error(exc, exc_info=True)%0A @@ -3116,20 +3116,16 @@ - 'process @@ -3129,37 +3129,26 @@ ess order %25s + ' - %25 order.id) %0A els @@ -3148,14 +3148,8 @@ -else:%0A @@ -3160,137 +3160,130 @@ -order.status = Order.PROCESSED%0A logger.error('Successfully processed '%0A 'order %25s' %25 order.id) + '(task ID %25s)' %25 (self.order.id,%0A task_id))%0A self.order.status = Order.ERROR %0A @@ -3283,24 +3283,29 @@ ROR%0A +self. order.save()
fe96f6539b40a880e88f7efe8502279cea1de506
update test
corehq/apps/accounting/tests/test_model_validation.py
corehq/apps/accounting/tests/test_model_validation.py
from datetime import date from django.core.exceptions import ValidationError from django.test import TransactionTestCase from corehq.apps.accounting.models import ( BillingAccount, CreditAdjustment, Invoice, LineItem, Subscriber, Subscription, ) from corehq.apps.accounting.tests import generator from corehq.apps.accounting.tests.base_tests import BaseAccountingTest class TestCreditAdjustmentValidation(BaseAccountingTest): def tearDown(self): CreditAdjustment.objects.all().delete() LineItem.objects.all().delete() Invoice.objects.all().delete() generator.delete_all_subscriptions() generator.delete_all_accounts() super(TestCreditAdjustmentValidation, self).tearDown() def test_clean(self): account = BillingAccount.objects.create( currency=generator.init_default_currency(), ) subscription = Subscription.objects.create( account=account, date_start=date.today(), plan_version=generator.subscribable_plan(), subscriber=Subscriber.objects.create(domain='test') ) invoice = Invoice.objects.create( date_start=date.today(), date_end=date.today(), subscription=subscription, ) line_item = LineItem.objects.create( invoice=invoice, ) with self.assertRaises(ValidationError): try: CreditAdjustment( invoice=invoice, line_item=line_item, ).save() except ValidationError as e: self.assertIn('__all__', e.error_dict) raise e
Python
0.000001
@@ -74,52 +74,8 @@ rror -%0Afrom django.test import TransactionTestCase %0A%0Afr @@ -771,32 +771,108 @@ objects.create(%0A + name='Test Account',%0A created_by='test@example.com',%0A curr
7c46287f7f7b0b18d671dc91e69668961c98adee
update test_flake8
_unittests/ut_module/test_flake8.py
_unittests/ut_module/test_flake8.py
""" @brief test log(time=0s) """ import sys import os import unittest import warnings try: import src except ImportError: path = os.path.normpath( os.path.abspath( os.path.join( os.path.split(__file__)[0], "..", ".."))) if path not in sys.path: sys.path.append(path) import src try: import pyquickhelper as skip_ except ImportError: path = os.path.normpath( os.path.abspath( os.path.join( os.path.split(__file__)[0], "..", "..", "..", "pyquickhelper", "src",))) if path not in sys.path: sys.path.append(path) import pyquickhelper as skip_ from pyquickhelper.loghelper import fLOG from pyquickhelper.pycode import check_pep8 from pyquickhelper.pycode.utils_tests_helper import _extended_refectoring class TestFlake8(unittest.TestCase): def test_flake8_src(self): fLOG( __file__, self._testMethodName, OutputPrint=__name__ == "__main__") if sys.version_info[0] == 2 or "Anaconda" in sys.executable \ or "condavir" in sys.executable: warnings.warn( "skipping test_flake8 because of Python 2 or " + sys.executable) return thi = os.path.abspath(os.path.dirname(__file__)) src_ = os.path.normpath(os.path.join(thi, "..", "..", "src")) check_pep8(src_, fLOG=fLOG, extended=[("fLOG", _extended_refectoring)]) def test_flake8_test(self): fLOG( __file__, self._testMethodName, OutputPrint=__name__ == "__main__") if sys.version_info[0] == 2 or "Anaconda" in sys.executable \ or "condavir" in sys.executable: warnings.warn( "skipping test_flake8 because of Python 2 or " + sys.executable) return thi = os.path.abspath(os.path.dirname(__file__)) test = os.path.normpath(os.path.join(thi, "..", )) check_pep8(test, fLOG=fLOG, neg_filter="temp_.*", skip=["'src' imported but unused", "'skip_' imported but unused", "'skip__' imported but unused", "'skip___' imported but unused", ], extended=[("fLOG", _extended_refectoring)]) if __name__ == "__main__": unittest.main()
Python
0.000001
@@ -920,25 +920,25 @@ extended_ref -e +a ctoring%0A%0A%0Acl
8129477b704e75bd90288aa4a672d55237bb8263
Use local memory cache instead of Memcached
election/settings.py
election/settings.py
""" Django settings for election project. Generated by 'django-admin startproject' using Django 1.9.2. For more information on this file, see https://docs.djangoproject.com/en/1.9/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.9/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) MEMCACHED_HOST = os.environ.get('MEMCACHED_HOST') # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = os.environ.get('SECRET_KEY') # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.humanize', 'endorsements', 'debug_toolbar', ] # Only add cache-related middleware if the MEMCACHED_HOST env var is set. MIDDLEWARE_CLASSES = [] if MEMCACHED_HOST: MIDDLEWARE_CLASSES.append('django.middleware.cache.UpdateCacheMiddleware') MIDDLEWARE_CLASSES.extend([ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'debug_toolbar.middleware.DebugToolbarMiddleware', ]) if MEMCACHED_HOST: MIDDLEWARE_CLASSES.append('django.middleware.cache.FetchFromCacheMiddleware') ROOT_URLCONF = 'election.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'election.wsgi.application' # Database # https://docs.djangoproject.com/en/1.9/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql', 'USER': os.environ.get('DB_USER'), 'NAME': os.environ.get('DB_NAME'), 'PASSWORD': os.environ.get('DB_PASSWORD'), 'HOST': os.environ.get('DB_HOST'), 'PORT': os.environ.get('DB_PORT'), } } # Password validation # https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.9/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.9/howto/static-files/ STATIC_URL = '/static/' JQUERY_URL = '' INTERNAL_IPS = ['127.0.0.1'] if MEMCACHED_HOST: CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', 'LOCATION': MEMCACHED_HOST + ':11211', } }
Python
0.000001
@@ -461,59 +461,8 @@ ))%0A%0A -MEMCACHED_HOST = os.environ.get('MEMCACHED_HOST')%0A%0A %0A# Q @@ -1115,155 +1115,35 @@ %0A%5D%0A%0A -# Only add cache-related middleware if the MEMCACHED_HOST env var is set.%0AMIDDLEWARE_CLASSES = %5B%5D%0Aif MEMCACHED_HOST:%0A MIDDLEWARE_CLASSES.append( +MIDDLEWARE_CLASSES = %5B%0A 'dja @@ -1189,38 +1189,9 @@ are' -)%0A%0AMIDDLEWARE_CLASSES.extend(%5B +, %0A @@ -1712,61 +1712,12 @@ e',%0A -%5D)%0A%0Aif MEMCACHED_HOST:%0A MIDDLEWARE_CLASSES.append( + 'dja @@ -1762,17 +1762,18 @@ dleware' -) +%0A%5D %0A%0AROOT_U @@ -3600,31 +3600,8 @@ %5D%0A%0A%0A -if MEMCACHED_HOST:%0A CACH @@ -3607,20 +3607,16 @@ HES = %7B%0A - 'def @@ -3628,28 +3628,24 @@ : %7B%0A - 'BACKEND': ' @@ -3675,98 +3675,33 @@ nds. -memcached.MemcachedCache',%0A 'LOCATION': MEMCACHED_HOST + ':11211',%0A %7D%0A +locmem.LocMemCache',%0A %7D%0A %7D%0A
767b18237f0c8c4c3ead2b298ecfefcf90155a4b
update and clean up FM and WM.
agility/oodt/workflow.py
agility/oodt/workflow.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import xmlrpclib from oodt_metadata import MetaData class OODTWorkFlowWrapper(object): def __init__(self, host): self._server = xmlrpclib.Server(host) #String executeDynamicWorkflow(Vector<String> taskIds, Hashtable metadata) def executeDynamicWorkflow(self, taskIds, metadata): return self._server.workflowmgr.executeDynamicWorkflow(taskIds, metadata) #Hashtable getConditionById(String conditionId) def getConditionById(self, conditionId): return self._server.workflowmgr.getConditionById(conditionId) #Hashtable getFirstPage() def getFirstPage(self): return self._server.workflowmgr.getFirstPage() #Hashtable getLastPage() def getLastPage(self): return self._server.workflowmgr.getLastPage() #Hashtable getNextPage(Hashtable currentPage) def getNextPage(self, currentPage): return self._server.workflowmgr.getNextPage() #int getNumWorkflowInstances() def getNumWorkflowInstances(self): return self._server.workflowmgr.getNumWorkflowInstances() #int getNumWorkflowInstancesByStatus(String status) def getNumWorkflowInstancesByStatus(self, status): return self._server.workflowmgr.getNumWorkflowInstancesByStatus(status) #Hashtable getPrevPage(Hashtable currentPage) def getPrevPage(self, currentPage): return self._server.workflowmgr.getPrevPage(currentPage) #Vector getRegisteredEvents() def getEventNames(self): return self._server.workflowmgr.getRegisteredEvents() #Hashtable getTaskById(String taskId) def getTaskById(self, taskId): return self._server.workflowmgr.getTaskById(taskId) #Hashtable getWorkflowById(String workflowId) def getWorkflowById(self, workflowId): return self._server.workflowmgr.getWorkflowById(workflowId) #double getWorkflowCurrentTaskWallClockMinutes(String workflowInstId) def getWorkflowCurrentTaskWallClockMinutes(self, workflowInstId): return self._server.workflowmgr.getWorkflowCurrentTaskWallClockMinutes(workflowInstId) #Hashtable getWorkflowInstanceById(String wInstId) def getWorkflowInstanceById(self,wInstId): return self._server.workflowmgr.getWorkflowInstanceById(wInstId) #Hashtable getWorkflowInstanceMetadata(String wInstId) def getWorkflowInstanceMetadata(self, wInstId): return self._server.workflowmgr.getWorkflowInstanceMetadata(wInstId) #Vector getWorkflowInstances() def getWorkflowInstances(self): return self._server.workflowmgr.getWorkflowInstances() #Vector getWorkflowInstancesByStatus(String status) def getWorkflowInstancesByStatus(self, status): return self._server.workflowmgr.getWorkflowInstancesByStatus(status) #Vector getWorkflows() def getWorkflows(self): return self._server.workflowmgr.getWorkflows() #Vector getWorkflowsByEvent(String eventName) def getWorkflowsByEvent(self, eventName): return self._server.workflowmgr.getWorkflowsByEvent(eventName) #double getWorkflowWallClockMinutes(String workflowInstId) def getWorkflowWallClockMinutes(self,workflowInstId): return getWorkflowWallClockMinutes(workflowInstId) #boolean handleEvent(String eventName, Hashtable metadata) def startEvent(self, eventName, metaData): return self._server.workflowmgr.handleEvent(eventName, metaData) #Hashtable paginateWorkflowInstances(int pageNum) def paginateWorkflowInstances(self, pageNum): return self._server.workflowmgr.paginateWorkflowInstances(pageNum) #Hashtable paginateWorkflowInstances(int pageNum, String status)(self, workflowInstId) def paginateWorkflowInstances(self, pageNum, status): return self._server.workflowmgr.paginateWorkflowInstances(pageNum, status) #boolean pauseWorkflowInstance(String workflowInstId) def pauseWorkflowInstance(self, workflowInstId): return self._server.workflowmgr.pauseWorkflowInstance(workflowInstId) #boolean resumeWorkflowInstance(String workflowInstId) def resumeWorkflowInstance(self, workflowInstId): return self._server.workflowmgr.resumeWorkflowInstance(workflowInstId) #boolean setWorkflowInstanceCurrentTaskEndDateTime(String wInstId, String endDateTimeIsoStr) def setWorkflowInstanceCurrentTaskEndDateTime(self, workflowInstId, endDateTimeIsoStr): return self._server.workflowmgr.setWorkflowInstanceCurrentTaskEndDateTime(workflowInstId, endDateTimeIsoStr) #boolean setWorkflowInstanceCurrentTaskStartDateTime(String wInstId, String startDateTimeIsoStr) def setWorkflowInstanceCurrentTaskStartDateTime(self, workflowInstId, startDateTimeIsoStr): return self._server.workflowmgr.setWorkflowInstanceCurrentTaskStartDateTime(workflowInstId, startDateTimeIsoStr) #boolean stopWorkflowInstance(String workflowInstId) def stopWorkflowInstance(self, workflowInstId): return self._server.workflowmgr.stopWorkflowInstance(workflowInstId) #boolean updateMetadataForWorkflow(String workflowInstId, Hashtable metadata) def updateMetadataForWorkflow(self, workflowInstId, metadata): return self._server.workflowmgr.updateMetadataForWorkflow(workflowInstId, metadata) #boolean updateWorkflowInstance(Hashtable workflowInst) def updateWorkflowInstance(workflowInst): return self._server.workflowmgr.updateWorkflowInstance(workflowInst) #boolean updateWorkflowInstanceStatus(String workflowInstanceId, String status) def updateWorkflowInstanceStatus(self, workflowInstId, status): return self._server.workflowmgr.updateWorkflowInstanceStatus(workflowInstId, status) def main(): # create instance oodt = OODTWorkFlowWrapper("http://localhost:9200") # get event info events = oodt.getEventNames() # create metadata object to invoke an event met = MetaData() met.addMetaData("hello", "world") # print available events print 'available events:', events # oodt.startEvent(events[0], met.toXmlRpc()) if __name__ == '__main__': main()
Python
0
@@ -804,13 +804,8 @@ rom -oodt_ meta
c2f3ca6c2e3c2810b3c881e09cc613bfe15e598c
Fix up some small coding errors
alerts/geomodel_alert.py
alerts/geomodel_alert.py
#!/usr/bin/env python # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. # Copyright (c) 2015 Mozilla Corporation import json import os import sys import traceback from lib.alerttask import AlertTask from mozdef_util.query_models import SearchQuery, QueryStringMatch as QSMatch from mozdef_util.utilities.logger import logger import alerts.geomodel.alert as alert import alerts.geomodel.config as config import alerts.geomodel.locality as locality _CONFIG_FILE = os.path.join( os.path.dirname(__file__), 'geomodel_alert.json') class AlertGeoModel(AlertTask): '''GeoModel alert runs a set of configured queries for events and constructs locality state for users performing authenticated actions. When activity is found that indicates a potential compromise of an account, an alert is produced. ''' def main(self): cfg = self._load_config() for query_index in range(len(cfg.events)): try: self._process(cfg, query_index) except Exception as err: traceback.print_exc(file=sys.stdout) logger.error( 'Error process events; query="{0}"; error={1}'.format( cfg.events[query_index].lucene_query, err.message)) def onAggregation(self, agg): username = agg['value'] events = agg['events'] cfg = agg['config'] localities = list(filter(map(locality.from_event, events))) new_state = locality.State('locality', username, localities) query = locality.wrap_query(self.es) journal = locality.wrap_journal(self.es) entry = locality.find(query, username, cfg.localities.es_index) if entry is None: entry = locality.Entry( '', locality.State('localities', username, [])) updated = locality.Update.flat_map( lambda state: locality.remove_outdated( state, cfg.localities.valid_duration_days), locality.update(entry.state, new_state)) if updated.did_update: entry.state = updated.state journal(entry, cfg.localities.es_index) new = alert.alert(entry.state, cfg.alerts.whitelist) if new is not None: # TODO: When we update to Python 3.7+, change to asdict(alert_produced) alert_dict = self.createAlertDict( new.summary, new.category, new.tags, events) alert_dict['details'] = { 'username': new.username, 'sourceipaddress': new.sourceipaddress, 'origin': dict(new.origin._asdict()) } return alert_dict return None def _process(self, cfg: config.Config, qindex: int): evt_cfg = cfg.events[qindex] search = SearchQuery(minutes=evt_cfg.search_window.minutes) search.add_must(QSMatch(evt_cfg.lucene_query)) self.filtersManual(search) self.searchEventsAggregated(evt_cfg.username_path, samplesLimit=1000) self.walkAggregations(threshold=1, config=cfg) def _load_config(self): with open(_CONFIG_FILE) as cfg_file: cfg = json.load(cfg_file) cfg['localities'] = config.Localities(**cfg['localities']) for i, event in enumerate(cfg['events']): cfg['events'][i]['search_window'] = config.SearchWindow( **cfg['events'][i]['search_window']) cfg['events'] = [config.Events(**dat) for dat in cfg['events']] cfg['alerts']['whitelist'] = config.Whitelist( **cfg['alerts']['whitelist']) cfg['alerts'] = config.Alerts(**cfg['alerts']) return config.Config(**cfg)
Python
0.999999
@@ -1435,16 +1435,8 @@ err -.message ))%0A%0A @@ -1594,16 +1594,74 @@ (filter( +%0A lambda state: state is not None,%0A map(loca
4f040d1d7730ee611f0c4a6768ecc181c6a43ff7
Fix broken view test for select seats
karspexet/ticket/tests/test_views.py
karspexet/ticket/tests/test_views.py
# coding: utf-8 from django.shortcuts import reverse from django.test import TestCase, RequestFactory from django.utils import timezone from karspexet.show.models import Show, Production from karspexet.ticket import views from karspexet.venue.models import Venue, SeatingGroup import pytest class TestHome(TestCase): def setUp(self): rf = RequestFactory() self.request = rf.get(reverse(views.home)) self.tomorrow = timezone.now() + timezone.timedelta(days=1) def test_home_lists_visible_upcoming_shows(self): venue = Venue.objects.create(name="Teater 1") production = Production.objects.create(name="Uppsättningen") yesterday = timezone.now() - timezone.timedelta(days=1) show = Show.objects.create(date=self.tomorrow, production=production, venue=venue) invisible_show = Show.objects.create(date=self.tomorrow, production=production, venue=venue, visible=False) old_show = Show.objects.create(date=yesterday, production=production, venue=venue) response = views.home(self.request) shows = response.context_data["upcoming_shows"] assert show in shows assert old_show not in shows def test_home_contains_only_visible_shows(self): venue = Venue.objects.create(name="Teater 1") production = Production.objects.create(name="Uppsättningen") show = Show.objects.create(date=self.tomorrow, production=production, venue=venue) invisible_show = Show.objects.create(date=self.tomorrow, production=production, venue=venue, visible=False) response = views.home(self.request) shows = response.context_data["upcoming_shows"] assert show in shows assert invisible_show not in shows class TestSelect_seats(TestCase): def test_select_seats(self): venue = Venue.objects.create(name="Teater 1") seatinggroup = SeatingGroup.objects.create(name="prisgrupp 1", venue=venue) production = Production.objects.create(name="Uppsättningen") show = Show.objects.create(date=timezone.now(), production=production, venue=venue) response = self.client.get(reverse(views.select_seats, args=[show.id])) self.assertContains(response, "Köp biljetter för Uppsättningen")
Python
0.000001
@@ -2200,10 +2200,12 @@ how. -id +slug %5D))%0A
e57e4152229132c9a8a8a13bf0904b58f7edf6f8
Update send_email.py
keepercommander/custom/send_email.py
keepercommander/custom/send_email.py
# _ __ # | |/ /___ ___ _ __ ___ _ _ ® # | ' </ -_) -_) '_ \/ -_) '_| # |_|\_\___\___| .__/\___|_| # |_| # # Keeper Commander # Copyright 2022 Keeper Security Inc. # Contact: commander@keepersecurity.com # # Example code to retrieve the password for a record # stored in the vault. This example also pulls configuration # from config.json or writes the config file if it does not exist. # # Usage: # python send_email.py import base64 import getpass import json import os import ssl from smtplib import SMTP from keepercommander import api, vault_extensions, vault from keepercommander.commands.enterprise import SecurityAuditReportCommand from keepercommander.params import KeeperParams email_message = ''' From: {0} Subject: Keeper BreachWatch Alert Keeper BreachWatch detected the records at risk in your vault. Please login to Keeper and review the records marked "At Risk" ''' def read_config_file(params): params.config_filename = os.path.join(os.path.dirname(__file__), 'config.json') if os.path.isfile(params.config_filename): with open(params.config_filename, 'r') as f: params.config = json.load(f) if 'user' in params.config: params.user = params.config['user'] if 'password' in params.config: params.password = params.config['password'] if 'mfa_token' in params.config: params.mfa_token = params.config['mfa_token'] if 'server' in params.config: params.server = params.config['server'] if 'device_id' in params.config: device_id = base64.urlsafe_b64decode(params.config['device_id'] + '==') params.rest_context.device_id = device_id my_params = KeeperParams() read_config_file(my_params) while not my_params.user: my_params.user = getpass.getpass(prompt='User(Email): ', stream=None) while not my_params.password: my_params.password = getpass.getpass(prompt='Master Password: ', stream=None) report_command = SecurityAuditReportCommand() report_json = report_command.execute(my_params, breachwatch=True, format='json') report = json.loads(report_json) emails = [x['email'] for x in report if x.get('at_risk') > 5] if emails: api.sync_down(my_params) smtp_record = next(vault_extensions.find_records(my_params, search_str='smtp', record_type='serverCredentials'), None) if isinstance(smtp_record, vault.TypedRecord): smtp_host = None smtp_port = 0 username = None password = None field = smtp_record.get_typed_field('host') if field: host_value = field.get_default_value() if isinstance(host_value, dict): smtp_host = host_value.get('hostName') port = host_value.get('port') if port: try: smtp_port = int(port) except ValueError: pass if smtp_host: field = smtp_record.get_typed_field('login') if field: username = field.get_default_value() field = smtp_record.get_typed_field('password') if field: password = field.get_default_value() if smtp_host: with SMTP(host=smtp_host, port=smtp_port) as connection: if username: connection.starttls(context=ssl.create_default_context()) connection.login(user=username, password=password) connection.sendmail(my_params.user, emails, email_message.format(my_params.user))
Python
0.000001
@@ -237,65 +237,211 @@ to r -etrieve the password for a record%0A# stored in the vault. +un a BreachWatch status report for %0A# all users, and send users an email reminder to address their %0A# found issues. SMTP credentials must be supplied via a vault record%0A# in order to send the mail. %0A# %0A# Thi
14f0ed32b62e2d00443e99428516a2d17a68bc58
Use COMPLEX_TEST_STRING for testing
coalib/tests/processes/communication/LogMessageTest.py
coalib/tests/processes/communication/LogMessageTest.py
""" This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import sys sys.path.insert(0, ".") from coalib.misc.i18n import _ from coalib.processes.communication.LOG_LEVEL import LOG_LEVEL from coalib.processes.communication.LogMessage import LogMessage import unittest class LogMessageTestCase(unittest.TestCase): def setUp(self): self.uut = LogMessage() def test_construction(self): # take a look if defaults are good self.assertEqual(self.uut.log_level, LOG_LEVEL.DEBUG) self.assertEqual(self.uut.message, "") # see that arguments are processed right self.uut = LogMessage(LOG_LEVEL.WARNING, "a msg") self.assertEqual(self.uut.log_level, LOG_LEVEL.WARNING) self.assertEqual(self.uut.message, "a msg") def test_to_str(self): self.uut.message = "test message änd umlauts!" self.uut.log_level = LOG_LEVEL.ERROR self.assertEqual(str(self.uut), "[{}] test message änd umlauts!".format(_("ERROR"))) self.uut.log_level = LOG_LEVEL.WARNING self.assertEqual(str(self.uut), "[{}] test message änd umlauts!".format(_("WARNING"))) self.uut.log_level = LOG_LEVEL.DEBUG self.assertEqual(str(self.uut), "[{}] test message änd umlauts!".format(_("DEBUG"))) if __name__ == '__main__': unittest.main(verbosity=2)
Python
0.000001
@@ -678,16 +678,72 @@ mport _%0A +from coalib.misc.StringConstants import StringConstants%0A from coa @@ -1447,35 +1447,43 @@ e = -%22test message %C3%A4nd umlauts!%22 +StringConstants.COMPLEX_TEST_STRING %0A @@ -1574,52 +1574,66 @@ %7B%7D%5D -test message %C3%A4nd umlauts!%22.format(_(%22ERROR%22) +%7B%7D%22.format(_(%22ERROR%22), StringConstants.COMPLEX_TEST_STRING ))%0A @@ -1728,54 +1728,68 @@ %7B%7D%5D -test message %C3%A4nd umlauts!%22.format(_(%22WARNING%22) +%7B%7D%22.format(_(%22WARNING%22), StringConstants.COMPLEX_TEST_STRING ))%0A @@ -1882,52 +1882,66 @@ %7B%7D%5D -test message %C3%A4nd umlauts!%22.format(_(%22DEBUG%22) +%7B%7D%22.format(_(%22DEBUG%22), StringConstants.COMPLEX_TEST_STRING ))%0A%0A
d02e8ce821edbdcd0fa3409b82650f2f72dae29d
support None
chainercv/links/model/sequential_feature_extractor.py
chainercv/links/model/sequential_feature_extractor.py
import chainer class SequentialFeatureExtractor(chainer.Chain): """A feature extractor model with a single-stream forward pass. This class is a base class that can be used for an implementation of a feature extractor model. Callabel objects, such as :class:`chainer.Link` and :class:`chainer.Function`, can be registered to this link with :meth:`init_scope`. This link keeps the order of registerations and conducts the computation in the same order when :meth:`__call__` is called. A :class:`chainer.Link` object in the sequence will be added as a child link of this object. :meth:`__call__` returns single or multiple features that are picked up through a stream of computation. These features can be specified by :obj:`layer_names`, which contains the names of the layers whose outputs are collected. When :obj:`layer_names` is a string, single value is returned. When :obj:`layer_names` is an iterable of strings, a tuple of values is returned. The order of the values is the same as the order of the strings in :obj:`layer_names`. When :obj:`layer_names` is :obj:`None`, the output of the last layer is returned. Examples: >>> import collections >>> import chainer.functions as F >>> import chainer.links as L >>> model = SequentialFeatureExtractor() >>> with model.init_scope(): >>> model.l1 = L.Linear(None, 1000) >>> model.l1_relu = F.relu >>> model.l2 = L.Linear(None, 1000) >>> model.l2_relu = F.relu >>> model.l3 = L.Linear(None, 10) >>> model.layer_names = ['l2_relu', 'l1_relu'] >>> # These are outputs of layer l2_relu and l1_relu. >>> feat1, feat2 = model(x) >>> # The layer_names can be dynamically changed. >>> model.layer_names = 'l3' >>> # This is an output of layer l1. >>> feat3 = model(x) Params: layer_names (string or iterable of strings): Names of layers whose outputs will be collected in the forward pass. """ def __init__(self): super(SequentialFeatureExtractor, self).__init__() self._order = list() def __setattr__(self, name, value): super(SequentialFeatureExtractor, self).__setattr__(name, value) if self.within_init_scope and callable(value): self._order.append(name) def __delattr__(self, name): super(SequentialFeatureExtractor, self).__delattr__(name) try: self._order.remove(name) except ValueError: pass @property def layer_names(self): return self._layer_names @layer_names.setter def layer_names(self, layer_names): if layer_names is None: layer_names = self._order[-1] if (not isinstance(layer_names, str) and all(isinstance(name, str) for name in layer_names)): return_tuple = True else: return_tuple = False layer_names = [layer_names] if any(name not in self._order for name in layer_names): raise ValueError('Invalid layer name') self._return_tuple = return_tuple self._layer_names = layer_names def __call__(self, x): """Forward this model. Args: x (chainer.Variable or array): Input to the model. Returns: chainer.Variable or tuple of chainer.Variable: The returned values are determined by :obj:`layer_names`. """ # The biggest index among indices of the layers that are included # in self._layer_names. last_index = max(self._order.index(name) for name in self._layer_names) features = {} h = x for name in self._order[:last_index + 1]: h = self[name](h) if name in self._layer_names: features[name] = h if self._return_tuple: features = tuple( [features[name] for name in self._layer_names]) else: features = list(features.values())[0] return features
Python
0.000075
@@ -2231,16 +2231,48 @@ = list() +%0A self.layer_names = None %0A%0A de @@ -2852,37 +2852,90 @@ -layer_names = self._order%5B-1%5D +self._return_tuple = False%0A self._layer_names = None%0A return %0A%0A @@ -3665,16 +3665,157 @@ %22%22%22%0A + if self._layer_names is None:%0A layer_names = %5Bself._order%5B-1%5D%5D%0A else:%0A layer_names = self._layer_names%0A%0A @@ -3889,30 +3889,24 @@ # in -self._ layer_names. @@ -3959,38 +3959,32 @@ me) for name in -self._ layer_names)%0A%0A @@ -4112,38 +4112,32 @@ if name in -self._ layer_names:%0A @@ -4274,22 +4274,16 @@ name in -self._ layer_na
7cbf46b1c44791b6a1466b08e049b568d32cf2d3
fix soil.tests.test_download_base:TestBlobDownload
corehq/ex-submodules/soil/tests/test_download_base.py
corehq/ex-submodules/soil/tests/test_download_base.py
from __future__ import absolute_import from __future__ import unicode_literals from io import BytesIO from uuid import uuid4 from django.test import TestCase from soil import BlobDownload from soil.util import expose_blob_download from corehq.blobs.tests.util import new_meta, TemporaryFilesystemBlobDB class TestBlobDownload(TestCase): identifier = 'identifier' @classmethod def setUpClass(cls): super(TestBlobDownload, cls).setUpClass() cls.db = TemporaryFilesystemBlobDB() @classmethod def tearDownClass(cls): cls.db.close() super(TestBlobDownload, cls).tearDownClass() def test_expose_blob_download(self): ref = expose_blob_download( self.identifier, expiry=60, content_disposition='text/xml', ) self.db.put(BytesIO(b'content'), meta=new_meta(key=ref.download_id)) response = BlobDownload.get(ref.download_id).toHttpResponse() self.assertEqual(next(response.streaming_content), 'content') def test_expose_blob_download_with_legacy_download_id(self): self.db.put(BytesIO(b'legacy-blob'), self.identifier) ref = BlobDownload( self.identifier, mimetype='text/plain', content_disposition='text/xml', ) ref.download_id = uuid4().hex # old download id format ref.save(60) response = BlobDownload.get(ref.download_id).toHttpResponse() self.assertEqual(next(response.streaming_content), 'legacy-blob')
Python
0.000001
@@ -1015,16 +1015,17 @@ ntent), +b 'content @@ -1517,16 +1517,17 @@ ntent), +b 'legacy-
e09e53d289b555487167a5800865cb727d10c0c2
use relative path (still hardcoded) change default number of iterations.
planck_dust_correlation.py
planck_dust_correlation.py
import astropy.units as u import healpy as hp import numpy as np import numpy.random as random import numpy.random.mtrand from astropy.coordinates import SkyCoord, Longitude, Latitude from mpi4py import MPI from mpi_helper import r_print comm = MPI.COMM_WORLD def angular_distance(ar1, ar2): theta1, phi1 = hp.pix2ang(2048, ar1) theta2, phi2 = hp.pix2ang(2048, ar2) ra1 = phi1 * 180. / np.pi dec1 = 90. - (theta1 * 180. / np.pi) ra2 = phi2 * 180. / np.pi dec2 = 90. - (theta2 * 180. / np.pi) coord1 = SkyCoord(ra=ra1 * u.degree, dec=dec1 * u.degree) coord2 = SkyCoord(ra=ra2 * u.degree, dec=dec2 * u.degree) return coord1.separation(coord2).to(u.rad).value _ = np.random.mtrand.RandomState(comm.rank) ar_map_shape = None ar_map_0 = None ar_map_0_log = None if comm.rank == 0: ar_map_0 = hp.fitsfunc.read_map("/Users/yishay/Downloads/COM_CompMap_Dust-DL07-AvMaps_2048_R2.00.fits", field=0) # ar_map_0_log = np.log(ar_map_0) mock = False if mock: ar_mock = ar_map_0 nside_signal = 32 radius = hp.nside2resol(nside_signal) / 2 / np.sqrt(2) for i in range(hp.nside2npix(nside_signal)): vec1 = hp.pix2vec(nside_signal, i) mask = hp.query_disc(2048, vec=vec1, radius=radius) ar_mock[mask] *= 100 ar_mock /= np.sqrt(100) ar_map = comm.bcast(ar_map_0) num_bins = 100 ar_product_total = np.zeros(shape=(10, num_bins)) ar_weights_total = np.zeros(shape=(10, num_bins)) def ra_dec2ang(ra, dec): return (90. - dec) * np.pi / 180., ra / 180. * np.pi def main_loop(max_angle, disc_part_mean, disc_part, disc_part_pixel_coords, max_angular_separation): ar_product = np.zeros(shape=num_bins) ar_weights = np.zeros(shape=num_bins) ar_product_reduce = np.zeros(shape=num_bins) ar_weights_reduce = np.zeros(shape=num_bins) chosen_indices = np.random.choice(np.arange(disc_part_pixel_coords.shape[0]), size=100, replace=False) for index in chosen_indices: vec_a = hp.pix2vec(2048, index) disc2 = hp.query_disc(2048, vec=vec_a, radius=max_angular_separation.to(u.rad).value) vec_b = hp.pix2vec(2048, disc2) ar_ang_dist_with_zero = hp.rotator.angdist(vec_a, vec_b) a = index b = disc2[ar_ang_dist_with_zero > 0] ar_ang_dist_with_zero = ar_ang_dist_with_zero[ar_ang_dist_with_zero > 0] ar_bins_float = ar_ang_dist_with_zero / max_angle * num_bins # type: np.ndarray ar_bins = ar_bins_float.astype(int) pair_product = np.nan_to_num((ar_map[a] - disc_part_mean) * (ar_map[b] - disc_part_mean)) ar_product += np.bincount(ar_bins, weights=pair_product, minlength=num_bins) ar_weights += np.bincount(ar_bins, minlength=num_bins) comm.Reduce( [ar_product, MPI.DOUBLE], [ar_product_reduce, MPI.DOUBLE], op=MPI.SUM, root=0) comm.Reduce( [ar_weights, MPI.DOUBLE], [ar_weights_reduce, MPI.DOUBLE], op=MPI.SUM, root=0) return ar_product_reduce, ar_weights_reduce num_directions = 4 stripe_step_deg = 10 for current_direction_index in np.arange(num_directions): center_ra = 180. center_dec = 30. center_coord = SkyCoord(ra=center_ra * u.degree, dec=center_dec * u.degree) center_galactic = center_coord.galactic galactic_l = Longitude(center_galactic.l + 0 * u.degree) galactic_b = Latitude( center_galactic.b - (current_direction_index - num_directions * 0.0) * stripe_step_deg * u.degree) r_print("galactic l-value:", galactic_l.value) r_print("galactic l-value:", galactic_b.value) center_theta, center_phi = ra_dec2ang(ra=galactic_l.value, dec=galactic_b.value) vec = hp.ang2vec(theta=center_theta, phi=center_phi) r_print("unit vector:", vec) disc = hp.query_disc(2048, vec=vec, radius=10 / 180. * np.pi) r_print("disc has ", disc.shape[0], " pixels") max_angle_fixed = 5. / 180. * np.pi disc_mean = np.nanmean(ar_map[disc]) ar_dec, ar_ra = hp.pix2ang(2048, disc) pixel_coords = SkyCoord(ra=ar_ra * u.rad, dec=ar_dec * u.rad) global_max_angular_separation = 5. * u.degree # build initial kd-tree # __ = matching.search_around_sky(pixel_coords[0:1], # pixel_coords, # global_max_angular_separation) for i in np.arange(1): ar_product_iter, ar_weights_iter = main_loop( max_angle=max_angle_fixed, disc_part_mean=disc_mean, disc_part=disc, disc_part_pixel_coords=pixel_coords, max_angular_separation=global_max_angular_separation ) if comm.rank == 0: r_print("Finished direction ", current_direction_index, ", Iteration ", i) ar_product_total[current_direction_index] += ar_product_iter ar_weights_total[current_direction_index] += ar_weights_iter r_print("total weight: ", ar_weights_total.sum()) angular_separation_bins = np.arange(num_bins, dtype=float) / num_bins * max_angle_fixed * 180. / np.pi np.savez( '../../data/planck_dust_correlation.npz', angular_separation_bins=angular_separation_bins, ar_product_total=ar_product_total, ar_weights_total=ar_weights_total) # plt.plot(angular_separation_bins, ar_corr) # plt.show() # plt.plot(angular_separation_bins, ar_weights_total) # plt.show()
Python
0
@@ -853,31 +853,18 @@ ap(%22 -/Users/yishay/Downloads +../../data /COM @@ -4368,16 +4368,17 @@ arange(1 +0 ):%0A%0A
b15450fb774b8c6dbb1b1c181555b29f5846bc40
test mqttclient
ambulances/mqttupdate.py
ambulances/mqttupdate.py
import atexit, sys, os, time from rest_framework.parsers import JSONParser from rest_framework.renderers import JSONRenderer from .mqttclient import BaseClient, MQTTException from .models import client, Ambulance, Equipment, \ HospitalEquipment, Hospital from .serializers import AmbulanceSerializer, HospitalSerializer, \ HospitalEquipmentSerializer, EquipmentSerializer, \ ExtendedProfileSerializer # UpdateClient class class UpdateClient(BaseClient): def on_disconnect(self, client, userdata, rc): # Exception is generated only if never connected if not self.connected and rc: raise MQTTException('Disconnected', rc) def publish(self, topic, message, *vargs, **kwargs): self.client.publish(topic, message, *vargs, **kwargs) def update_topic(self, topic, serializer, qos=0, retain=False): # Publish to topic self.publish(topic, JSONRenderer().render(serializer.data), qos=qos, retain=retain) def remove_topic(self, topic, serializer, qos=0): # Publish null to retained topic self.publish(topic, null, qos=qos, retain=True) def update_profile(self, profile, qos=2, retain=True): self.update_topic('user/{}/profile'.format(profile.user.username), ExtendedProfileSerializer(profile), qos=qos, retain=retain) def update_ambulance(self, ambulance, qos=2, retain=True): self.update_topic('ambulance/{}/data'.format(ambulance.id), AmbulanceSerializer(ambulance), qos=qos, retain=retain) def remove_ambulance(self, ambulance): self.remove_topic('ambulance/{}/data'.format(ambulance.id)) def update_hospital(self, hospital, qos=2, retain=True): self.update_topic('hospital/{}/data'.format(hospital.id), HospitalSerializer(hospital), qos=qos, retain=retain) def remove_hospital(self, hospital): self.remove_topic('hospital/{}/data'.format(hospital.id)) self.remove_topic('hospital/{}/metadata'.format(hospital.id)) def update_hospital_metadata(self, hospital, qos=2, retain=True): hospital_equipment = hospital.hospitalequipment_set.values('equipment') equipment = Equipment.objects.filter(id__in=hospital_equipment) self.update_topic('hospital/{}/metadata'.format(hospital.id), EquipmentSerializer(equipment, many=True), qos=qos, retain=retain) def update_hospital_equipment(self, equipment, qos=2, retain=True): self.update_topic('hospital/{}/equipment/{}/data'.format(equipment.hospital.id, equipment.equipment.name), HospitalEquipmentSerializer(equipment), qos=qos, retain=retain) def remove_hospital_equipment(self, equipment): self.remove_topic('hospital/{}/equipment/{}/data'.format(equipment.hospital.id, equipment.equipment.name)) # Start client from django.core.management.base import OutputWrapper from django.core.management.color import color_style, no_style from django.conf import settings stdout = OutputWrapper(sys.stdout) style = color_style() # Instantiate broker broker = { 'HOST': 'localhost', 'PORT': 1883, 'KEEPALIVE': 60, 'CLEAN_SESSION': True } broker.update(settings.MQTT) broker['CLIENT_ID'] = 'mqttupdate_' + str(os.getpid()) try: # try to connect print('Connecting to MQTT brocker...') local_client = UpdateClient(broker, stdout, style, 0) # wait for connection while not local_client.connected: local_client.loop() # start loop local_client.loop_start() # register atexit handler to make sure it disconnects at exit atexit.register(local_client.disconnect) client = local_client except Exception as e: print('Could not connect to MQTT brocker. Using dumb client...')
Python
0.000001
@@ -705,128 +705,8 @@ %0A - def publish(self, topic, message, *vargs, **kwargs):%0A self.client.publish(topic, message, *vargs, **kwargs)%0A%0A
c3367eaa7bccf5843abd12a438e14518d533cdbe
Allow API on Windows
platformio_api/__init__.py
platformio_api/__init__.py
# Copyright 2014-present Ivan Kravets <me@ikravets.com> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging.config import os from time import tzset VERSION = (1, 18, 1) __version__ = ".".join([str(s) for s in VERSION]) __title__ = "platformio-api" __description__ = ("An API for PlatformIO") __url__ = "https://github.com/ivankravets/platformio-api" __author__ = "Ivan Kravets" __email__ = "me@ikravets.com" __license__ = "MIT License" __copyright__ = "Copyright (C) 2014-2017 Ivan Kravets" config = dict( SQLALCHEMY_DATABASE_URI=None, GITHUB_LOGIN=None, GITHUB_PASSWORD=None, DL_PIO_DIR=None, DL_PIO_URL=None, MAX_DLFILE_SIZE=1024 * 1024 * 150, # 150 Mb # Fuzzy search will not be applied to words shorter than the value below SOLR_FUZZY_MIN_WORD_LENGTH=3, LOGGING=dict(version=1) ) assert "PIOAPI_CONFIG_PATH" in os.environ with open(os.environ.get("PIOAPI_CONFIG_PATH")) as f: config.update(json.load(f)) # configure logging for packages logging.basicConfig() logging.config.dictConfig(config['LOGGING']) # setup time zone to UTC globally os.environ['TZ'] = "+00:00" tzset()
Python
0.000001
@@ -643,31 +643,8 @@ os%0A -from time import tzset%0A %0A%0AVE @@ -1617,11 +1617,76 @@ 0%22%0At -zset() +ry:%0A from time import tzset%0A tzset()%0Aexcept ImportError:%0A pass %0A
7169cbb9aff103854cad7e8167446c56c7bcc5f4
Revise to internal helper function _findKth()
lc004_median_of_two_sorted_arrays.py
lc004_median_of_two_sorted_arrays.py
"""Leetcode 4. Median of Two Sorted Arrays Hard There are two sorted arrays nums1 and nums2 of size m and n respectively. Find the median of the two sorted arrays. The overall run time complexity should be O(log (m+n)). You may assume nums1 and nums2 cannot be both empty. Example 1: nums1 = [1, 3] nums2 = [2] The median is 2.0 Example 2: nums1 = [1, 2] nums2 = [3, 4] The median is (2 + 3)/2 = 2.5 """ class Solution(object): def findMedianSortedArrays(self, nums1, nums2): """ :type nums1: List[int] :type nums2: List[int] :rtype: float Time complexity: O(log(m + n)) Space complexity: O(1) """ l = len(nums1) + len(nums2) if l % 2 == 1: # l // 2 + 1 - 1 => median index of even numbers return self.findKth(nums1, nums2, l // 2) else: return ( self.findKth(nums1, nums2, l // 2 - 1) + self.findKth(nums1, nums2, l // 2)) / 2.0 def findKth(self, nums1, nums2, k): # Base cases for the divide-and-conquer method. if not nums1: return nums2[k] if not nums2: return nums1[k] i1, i2 = len(nums1) // 2, len(nums2) // 2 n1, n2 = nums1[i1], nums2[i2] # When k is smaller than or equal to the sum of nums1 & nums2's # middle indices. if k <= i1 + i2: # When nums1's middle element is bigger than nums2's, # the 2nd half of nums1 does not contain the kth. if n1 > n2: return self.findKth(nums1[:i1], nums2, k) else: return self.findKth(nums1, nums2[:i2], k) # When k is bigger than the sum of nums1 & nums2's middle indices. else: # When nums1's middle element is bigger than nums2's, # the 1st half of nums2 does not contain the kth. if n1 > n2: return self.findKth(nums1, nums2[(i2 + 1):], k - i2 - 1) else: return self.findKth(nums1[(i1 + 1):], nums2, k - i1 - 1) def main(): import time start_time = time.time() nums1 = [1, 3] nums2 = [2] print(Solution().findMedianSortedArrays(nums1, nums2)) nums1 = [1, 2] nums2 = [3, 4] print(Solution().findMedianSortedArrays(nums1, nums2)) print('Time: {}'.format(time.time() - start_time)) if __name__ == '__main__': main()
Python
0.000804
@@ -440,569 +440,9 @@ def -findMedianSortedArrays(self, nums1, nums2):%0A %22%22%22%0A :type nums1: List%5Bint%5D%0A :type nums2: List%5Bint%5D%0A :rtype: float%0A%0A Time complexity: O(log(m + n))%0A Space complexity: O(1)%0A %22%22%22%0A l = len(nums1) + len(nums2)%0A if l %25 2 == 1:%0A # l // 2 + 1 - 1 =%3E median index of even numbers%0A return self.findKth(nums1, nums2, l // 2)%0A else:%0A return (%0A self.findKth(nums1, nums2, l // 2 - 1)%0A + self.findKth(nums1, nums2, l // 2)) / 2.0%0A%0A %0A def +_ find @@ -1013,32 +1013,33 @@ return self. +_ findKth(nums1%5B:i @@ -1090,32 +1090,33 @@ return self. +_ findKth(nums1, n @@ -1390,32 +1390,33 @@ return self. +_ findKth(nums1, n @@ -1490,16 +1490,17 @@ rn self. +_ findKth( @@ -1537,16 +1537,575 @@ 1 - 1)%0A%0A + def findMedianSortedArrays(self, nums1, nums2):%0A %22%22%22%0A :type nums1: List%5Bint%5D%0A :type nums2: List%5Bint%5D%0A :rtype: float%0A%0A Time complexity: O(log(m + n))%0A Space complexity: O(1)%0A %22%22%22%0A l = len(nums1) + len(nums2)%0A if l %25 2 == 1:%0A # l // 2 + 1 - 1 =%3E median index of even numbers%0A return self._findKth(nums1, nums2, l // 2)%0A else:%0A return (%0A self._findKth(nums1, nums2, l // 2 - 1)%0A + self._findKth(nums1, nums2, l // 2)) / 2.0%0A%0A %0Adef mai @@ -2160,16 +2160,30 @@ time()%0A%0A + # Ans: 2.%0A nums @@ -2265,24 +2265,40 @@ 1, nums2))%0A%0A + # Ans: 2.5.%0A nums1 =
dc743c63c52c7ef0bcab73d7b4fcf8f3f4a54ea6
make median transmittance optional in plot_mean_transmittance.
plot_mean_transmittance.py
plot_mean_transmittance.py
import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import host_subplot import common_settings import mean_transmittance import median_transmittance lya_center = 1215.67 settings = common_settings.Settings() def do_plot(): m = mean_transmittance.MeanTransmittance.from_file(settings.get_mean_transmittance_npy()) med = median_transmittance.MedianTransmittance.from_file(settings.get_median_transmittance_npy()) ar_z, mean = m.get_weighted_mean_with_minimum_count(1) ar_z_med, ar_median = med.get_weighted_median_with_minimum_count(1) ar_z_med, ar_unweighted_median = med.get_weighted_median_with_minimum_count(1, weighted=False) # low_pass_mean = m.get_low_pass_mean()[1] fig = plt.figure(figsize=(14, 10)) ax1 = fig.add_subplot(2, 1, 1) ax2 = ax1.twiny() ax1.plot(ar_z, mean) # ax1.plot(ar_z, low_pass_mean, color='red') ax1.plot(ar_z_med, ar_median, color='orange') ax1.plot(ar_z_med, ar_unweighted_median, color='green') ax1.set_ylabel(r"$\left< f_q(z)/C_q(z) \right> $") plt.ylim(0.0, 1.2) # add wavelength tick marks on top x_lim2 = tuple([lya_center * (1 + z) for z in ax1.get_xlim()]) ax2.set_xlim(x_lim2) plt.axis() ax3 = host_subplot(2, 1, 2) ax4 = ax3.twinx() ax4.set_ylabel(r"$N_{Spectra}$") ax3.plot(m.ar_z, m.ar_total_flux, color='blue', label=r"Total flux$\times$ weight") ax3.plot(m.ar_z, m.ar_weights, ':', color='green', label='Total weight') ax4.plot(m.ar_z, m.ar_count, ':', color='red', label='Spectra count') ax3.set_xlim(ax1.get_xlim()) ax3.set_ylabel(r"$\sum_q f_q(z)/C_q(z)$") ax3.set_xlabel(r"$z$") ax3.legend(loc='best') plt.show() if __name__ == '__main__': do_plot()
Python
0.000001
@@ -213,16 +213,52 @@ tings()%0A +enable_median_transmittance = False%0A %0A%0Adef do @@ -356,24 +356,342 @@ ance_npy())%0A + ar_z, mean = m.get_weighted_mean_with_minimum_count(1)%0A # low_pass_mean = m.get_low_pass_mean()%5B1%5D%0A%0A fig = plt.figure(figsize=(14, 10))%0A ax1 = fig.add_subplot(2, 1, 1)%0A ax2 = ax1.twiny()%0A ax1.plot(ar_z, mean)%0A # ax1.plot(ar_z, low_pass_mean, color='red')%0A%0A if enable_median_transmittance:%0A med = me @@ -789,67 +789,12 @@ -ar_z, mean = m.get_weighted_mean_with_minimum_count(1)%0A + - ar_z @@ -853,24 +853,28 @@ um_count(1)%0A + ar_z_med @@ -964,226 +964,13 @@ se)%0A +%0A -# low_pass_mean = m.get_low_pass_mean()%5B1%5D%0A%0A fig = plt.figure(figsize=(14, 10))%0A ax1 = fig.add_subplot(2, 1, 1)%0A ax2 = ax1.twiny()%0A ax1.plot(ar_z, mean)%0A # ax1.plot(ar_z, low_pass_mean, color='red')%0A @@ -1015,16 +1015,20 @@ range')%0A + ax1.
4173221d72356fc336be63273a7252c81831fd54
fix datetime_to_string
ephim/utils.py
ephim/utils.py
from datetime import datetime import string def to_base(num, b, numerals=string.digits + string.ascii_lowercase): return ((num == 0) and numerals[0]) or (to_base(num // b, b, numerals).lstrip(numerals[0]) + numerals[num % b]) def datetime_to_string(dt: datetime): delta = dt - datetime.fromtimestamp(0) ### 0 # return dt.strftime('%Y-%m-%d %H.%M.%S.') ### 1 # ts = int(dt.timestamp()) # return '{sign}{ts}'.format( # sign='N' if ts < 0 else 'P', # ts=abs(ts), # ) ### 2 return '{sign}{days}_{seconds}'.format( sign='0' if delta.days < 0 else '', days=to_base(abs(delta.days), 36, string.digits + string.ascii_uppercase), seconds=str(delta.seconds).zfill(5), ) # return '{sign}{days}_{seconds}'.format( # sign='n' if delta.days < 0 else 'p', # days=to_base(abs(delta.days), 36), # seconds=str(delta.seconds).zfill(5), # ) # return str(dt.strftime('%Y%m%d')) + '_' + str(delta.seconds).zfill(5) # return str(int(dt.timestamp())) # return to_base(int(dt.timestamp()), 36) # return '{days}_{seconds}'.format( # days=to_base(abs(delta.days), 26, string.ascii_uppercase), # seconds=str(delta.seconds).zfill(5), # ) # print(delta.seconds) # return '{days}{seconds}'.format( # days=to_base(abs(delta.days), 26, string.ascii_uppercase), # # hours=to_base(delta.seconds // 3600, 26, string.ascii_uppercase), # seconds=str(delta.seconds).zfill(5), # # seconds=dt.strftime('%H%M'), # ) # return to_base(int(dt.timestamp()), 26, string.ascii_lowercase)
Python
0.004109
@@ -291,16 +291,19 @@ atetime. +utc fromtime
a4e14854339cc8f0677e32390d9a974266007c01
Refactor, split into functions
django_smoke_tests/management/commands/smoke_tests.py
django_smoke_tests/management/commands/smoke_tests.py
from django.core.management import BaseCommand, call_command try: from django.urls import get_resolver except ImportError: from django.core.urlresolvers import get_resolver from ...tests import SmokeTests def _test_generator(url, method, detail_url=False): def test(self): if method == 'GET': response = self.client.get(url) elif method == 'POST': response = self.client.post(url, {}) elif method == 'DELETE': response = self.client.delete(url) allowed_status_codes = [200, 201, 301, 302, 304, 405] if detail_url: allowed_status_codes.append(404) self.assertIn(response.status_code, allowed_status_codes) return test class Command(BaseCommand): help = "Smoke" METHODS_TO_TEST = ['GET', 'POST', 'DELETE'] def handle(self, *args, **options): all_endpoints = get_resolver(None).reverse_dict for endpoint, endpoint_params in all_endpoints.items(): if isinstance(endpoint, str): [(url_as_str, url_params)], url_pattern, _ = endpoint_params mocked_params = {param: 'random' for param in url_params} ready_url = url_as_str % mocked_params ready_url = ready_url if ready_url.startswith('/') else '/{}'.format(ready_url) self.create_tests_for_endpoint(ready_url, endpoint, detail_url=bool(url_params)) call_command('test', 'django_smoke_tests') def create_tests_for_endpoint(self, url, endpoint, detail_url=False): for method in self.METHODS_TO_TEST: test = _test_generator(url, method, detail_url) setattr(SmokeTests, 'test_smoke_{}_{}'.format(method, endpoint), test)
Python
0.999773
@@ -1,24 +1,53 @@ +import random%0Aimport string%0A%0A from django.core.managem @@ -239,16 +239,420 @@ Tests%0A%0A%0A +class Command(BaseCommand):%0A help = %22Smoke%22%0A%0A METHODS_TO_TEST = %5B'GET', 'POST', 'DELETE'%5D%0A%0A def handle(self, *args, **options):%0A%0A all_endpoints = get_resolver(None).reverse_dict%0A%0A for endpoint, endpoint_params in all_endpoints.items():%0A self.create_tests_for_endpoint(endpoint, endpoint_params)%0A%0A call_command('test', 'django_smoke_tests')%0A%0A @staticmethod%0A def _tes @@ -691,24 +691,28 @@ url=False):%0A + def test @@ -719,16 +719,20 @@ (self):%0A + @@ -755,32 +755,36 @@ T':%0A + response = self. @@ -799,32 +799,36 @@ et(url)%0A + elif method == ' @@ -838,32 +838,36 @@ T':%0A + response = self. @@ -887,32 +887,36 @@ rl, %7B%7D)%0A + elif method == ' @@ -928,32 +928,36 @@ E':%0A + response = self. @@ -976,16 +976,20 @@ e(url)%0A%0A + @@ -1050,16 +1050,20 @@ + if detai @@ -1069,16 +1069,20 @@ il_url:%0A + @@ -1118,16 +1118,20 @@ nd(404)%0A + @@ -1188,16 +1188,21 @@ _codes)%0A +%0A retu @@ -1214,272 +1214,76 @@ st%0A%0A -%0Aclass Command(BaseCommand):%0A help = %22Smoke%22%0A%0A METHODS_TO_TEST = %5B'GET', 'POST', 'DELETE'%5D%0A%0A def handle(self, *args, **options):%0A%0A all_endpoints = get_resolver(None).reverse_dict%0A%0A for endpoint, endpoint_params in all_endpoints.items():%0A + def create_tests_for_endpoint(self, endpoint, endpoint_params):%0A @@ -1316,20 +1316,16 @@ , str):%0A - @@ -1406,18 +1406,12 @@ - moc +fa ke -d _par @@ -1428,16 +1428,35 @@ am: -'random' +self.create_random_string() for @@ -1477,17 +1477,16 @@ params%7D%0A -%0A @@ -1489,32 +1489,38 @@ - ready +url = self.create _url - = +( url_as_s @@ -1525,27 +1525,23 @@ _str - %25 moc +, fa ke -d _params +) %0A - @@ -1552,237 +1552,395 @@ - ready_url = ready_url if ready_url.startswith('/') else '/%7B%7D'.format(ready_url)%0A self.create_tests_for_endpoint(ready_url, endpoint, detail_url=bool(url_params))%0A%0A call_command('test', 'django_smoke_tests' +self.create_tests_for_http_methods(url, endpoint, detail_url=bool(url_params))%0A%0A @staticmethod%0A def create_random_string(length=5):%0A return ''.join(random.choice(string.ascii_lowercase) for _ in range(length))%0A%0A @staticmethod%0A def create_url(url_as_str, parameters):%0A url = url_as_str %25 parameters%0A return url if url.startswith('/') else '/%7B%7D'.format(url )%0A%0A @@ -1959,32 +1959,36 @@ e_tests_for_ -endpoint +http_methods (self, url, @@ -2079,16 +2079,21 @@ test = +self. _test_ge
f96cb9a60882bc19fef5f7b3be4a8063f2e99fa2
Add exception handling to scheduler start experiment
api/spawner/scheduler.py
api/spawner/scheduler.py
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function import json import uuid from django.conf import settings from polyaxon_schemas.utils import TaskType from rest_framework import fields from api.utils import config from experiments.serializers import ExperimentJobSerializer from spawner import K8SSpawner from experiments.models import ExperimentJob from spawner.utils.constants import ExperimentLifeCycle def start_experiment(experiment): # Update experiment status to show that its started experiment.set_status(ExperimentLifeCycle.SCHEDULED) project = experiment.project group = experiment.experiment_group # Use spawner to start the experiment spawner = K8SSpawner(project_name=project.unique_name, experiment_name=experiment.unique_name, experiment_group_name=group.unique_name if group else None, project_uuid=project.uuid.hex, experiment_group_uuid=group.uuid.hex if group else None, experiment_uuid=experiment.uuid.hex, spec_config=experiment.config, k8s_config=settings.K8S_CONFIG, namespace=settings.K8S_NAMESPACE, in_cluster=True, use_sidecar=True, sidecar_config=config.get_requested_params(to_str=True)) resp = spawner.start_experiment() # Get the number of jobs this experiment started master = resp[TaskType.MASTER] job_uuid = master['pod']['metadata']['labels']['job_uuid'] job_uuid = uuid.UUID(job_uuid) def get_definition(definition): serializer = ExperimentJobSerializer(data={ 'definition': json.dumps(definition, default=fields.DateTimeField().to_representation) }) serializer.is_valid() return json.loads(serializer.validated_data['definition']) ExperimentJob.objects.create(uuid=job_uuid, experiment=experiment, definition=get_definition(master)) for worker in resp[TaskType.WORKER]: job_uuid = worker['pod']['metadata']['labels']['job_uuid'] job_uuid = uuid.UUID(job_uuid) ExperimentJob.objects.create(uuid=job_uuid, experiment=experiment, definition=get_definition(worker)) for ps in resp[TaskType.PS]: job_uuid = ps['pod']['metadata']['labels']['job_uuid'] job_uuid = uuid.UUID(job_uuid) ExperimentJob.objects.create(uuid=job_uuid, experiment=experiment, definition=get_definition(ps)) def stop_experiment(experiment, update_status=False): project = experiment.project group = experiment.experiment_group spawner = K8SSpawner(project_name=project.unique_name, experiment_name=experiment.unique_name, experiment_group_name=group.unique_name if group else None, project_uuid=project.uuid.hex, experiment_group_uuid=group.uuid.hex if group else None, experiment_uuid=experiment.uuid.hex, spec_config=experiment.config, k8s_config=settings.K8S_CONFIG, namespace=settings.K8S_NAMESPACE, in_cluster=True, use_sidecar=True, sidecar_config=config.get_requested_params(to_str=True)) spawner.stop_experiment() if update_status: # Update experiment status to show that its deleted experiment.set_status(ExperimentLifeCycle.DELETED)
Python
0.000001
@@ -95,16 +95,31 @@ rt json%0A +import logging%0A import u @@ -122,16 +122,16 @@ rt uuid%0A - %0Afrom dj @@ -155,16 +155,64 @@ settings +%0Afrom kubernetes.client.rest import ApiException %0A%0Afrom p @@ -372,16 +372,59 @@ rializer +%0Afrom repos.dockerize import get_image_info %0A%0Afrom s @@ -550,16 +550,66 @@ eCycle%0A%0A +logger = logging.getLogger('polyaxon.scheduler')%0A%0A %0Adef sta @@ -823,16 +823,442 @@ _group%0A%0A + job_docker_image = None # This will force the spawner to use the default docker image%0A if experiment.compiled_spec.run_exec:%0A image_name, image_tag = get_image_info(experiment=experiment)%0A job_docker_image = '%7B%7D:%7B%7D'.format(image_name, image_tag)%0A logger.info('Start experiment with built image %60%7B%7D%60'.format(job_docker_image))%0A else:%0A logger.info('Start experiment with default image.')%0A%0A # Us @@ -1906,32 +1906,92 @@ n_cluster=True,%0A + job_docker_image=job_docker_image,%0A @@ -2095,24 +2095,37 @@ _str=True))%0A + try:%0A resp = s @@ -2149,16 +2149,205 @@ riment() +%0A except ApiException:%0A logger.warning('Could not start the experiment, please check your polyaxon spec.')%0A experiment.set_status(ExperimentLifeCycle.FAILED)%0A return %0A%0A #
58ab8c5ebafad2109b8d8f19c44adbb11fe18c02
Fix broken or_else implementation
pygow/maybe.py
pygow/maybe.py
class Just: a = None def __init__(self, a): self.a = a def __eq__(self, other): return (isinstance(other, self.__class__) and self.a == other.a) def __ne__(self, other): return not self.__eq__(other) def __str__(self): return 'Just(%s)' % self.a def is_just(self): return True def map(self, f): return Just(f(self.a)) def flat_map(self, f): return f(self.a) def or_else(self, x): return self def get_or_else(self, x): return self.a class Nothing: def __eq__(self, other): return isinstance(other, self.__class__) def __ne__(self, other): return not self.__eq__(other) def is_just(self): return False def __str__(self): return 'Nothing()' def map(self, f): return Nothing() def flat_map(self, f): return Nothing() def or_else(self, x): return self def get_or_else(self, x): return x def get_maybe_env(name): from os import getenv value = getenv(name) if value is None: return Nothing() else: return Just(value) def non_empty_string(x): if len(x.strip()) is 0: return Nothing() else: return Just(x) def parse_int(x): try: return Just(int(x)) except: return Nothing() def maybe(x): if x is None: return Nothing() else: return Just(x)
Python
0.000003
@@ -451,32 +451,36 @@ self.a)%0A def +get_ or_else(self, x) @@ -492,32 +492,34 @@ return self +.a %0A def get_or_ @@ -503,36 +503,32 @@ self.a%0A def -get_ or_else(self, x) @@ -548,18 +548,16 @@ urn self -.a %0A%0Aclass @@ -903,32 +903,36 @@ thing()%0A def +get_ or_else(self, x) @@ -948,20 +948,17 @@ return -self +x %0A def @@ -950,36 +950,32 @@ eturn x%0A def -get_ or_else(self, x)
e185f187b6d8188e05f763fb2627a3fa9757cca6
Add untranslate
emoticry/emoticry.py
emoticry/emoticry.py
#!/usr/bin/python # -*- coding: utf-8 -*- import os """emoticry.emoticry This file contains the bulk of logic for the emoticry library. """ class Translation(object): """ A generic Translation class for translating filenames. By default Translates names into hex. """ def __init__(self, table=[hex(_)[2:] for _ in range(256)]): self.table = table def translate(self, name): new_name = '' for c in name: if len(c) == 1: new_name += self.table[ord(c)] else: new_name += c return new_name emoji_translation = Translation( table=[ '😁', '😂', '😃', '😄', '😅', '😆', '😉', '😊', '😋', '😌', '😍', '😏', '😒', '😓', '😔', '😖', '😘', '😚', '😜', '😝', '😞', '😠', '😡', '😢', '😣', '😤', '😥', '😨', '😩', '😪', '😫', '😭', '😰', '😱', '😲', '😳', '😵', '😷', '😸', '😹', '😺', '😻', '😼', '😽', '😾', '😿', '🙀', '🙅', '🙆', '🙇', '🙈', '🙉', '🙊', '🙋', '🙌', '🙍', '🙎', '✂', '✅', '✈', '✉', '✊', '✋', '✌', '✏', '✒', '✔', '✖', '✨', '✳', '✴', '❄', '❇', '❌', '❎', '❓', '❔', '❕', '❗', '❤', '➕', '➖', '➗', '➡', '➰', '🚀', '🚃', '🚄', '🚅', '🚇', '🚉', '🚌', '🚏', '🚑', '🚒', '🚓', '🚕', '🚗', '🚙', '🚚', '🚢', '🚤', '🚥', '🚧', '🚨', '🚩', '🚪', '🚫', '🚬', '🚭', '🚲', '🚶', '🚹', '🚺', '🚻', '🚼', '🚽', '🚾', '🛀', 'Ⓜ', '©', '®', '‼', '⁉', '8⃣', '9⃣', '7⃣', '6⃣', '1⃣', '0⃣', '2⃣', '3⃣', '5⃣', '4⃣', '#⃣', '™', 'ℹ', '↔', '↕', '↖', '↗', '↘', '↙', '↩', '↪', '⌚', '⌛', '⏩', '⏪', '⏫', '⏬', '⏰', '⏳', '▪', '▫', '▶', '◀', '◻', '◼', '◽', '◾', '☀', '☁', '☎', '☔', '☕', '☝', '☺', '♈', '♉', '♊', '♋', '♌', '♍', '♎', '♏', '♐', '♑', '♒', '♓', '♠', '♥', '♦', '♨', '♿', '⚓', '⚠', '⚡', '⚪', '⚫', '⚾', '⛄', '⛅', '⛔', '⛪', '⛲', '⛵', '⛺', '⛽', '⤵', '⬅', '⬆', '⬛', '⬜', '⭐', '〰', '〽', '㊗', '🃏', '🌀', '🌁', '🌂', '🌄', '🌅', '🌆', '🌈', '🌉', '🌊', '🌋', '🌑', '🌓', '🌔', '🌕', '🌙', '🌛', '🌟', '🌠', '🌰', '🌱', '🌴', '🌸', '🌹', '🌺', '🌼', '🌽', '🌾', '🍀', '🍁', '🍂', '🍄', '🍅', '🍆', '🍈', '🍉', '🍊', '🍌', '🍎', '🍏', '🍑', '🍓', '🍔', '🍕', '🍖', '🍗', '🍙', '🍚'] ) def emojify(directory='.', recursive=False, translation=emoji_translation): """ For the given directory, iterate over all files and folders within it (optionally recursively) and translate the file name characters to emoji. """ if recursive: for files in os.walk(directory, topdown=True): path = files[0] directories = files[1] filenames = files[2] for i, d in enumerate(directories): os.rename(os.path.join(path, d), os.path.join(path, translation.translate(d))) directories[i] = translation.translate(d) for i, f in enumerate(filenames): os.rename(os.path.join(path, f), os.path.join(path, translation.translate(f))) filenames[i] = translation.translate(f) else: for f in os.listdir(directory): os.rename(os.path.join(directory, f), os.path.join(directory, translation.translate(f)))
Python
0.000204
@@ -598,16 +598,228 @@ ew_name%0A + %0A def untranslate(self, name):%0A new_name = ''%0A for c in name:%0A if len(c) == 1:%0A new_name += chr(self.table.index(c))%0A else:%0A new_name += c%0A %0A%0Aemoji_
2aca9f77b6f5b8171ec33906a66cd805f57937a0
Fix mistake with previous commit.
localeurl/templatetags/localeurl_tags.py
localeurl/templatetags/localeurl_tags.py
# Copyright (c) 2008 Joost Cassee # Licensed under the terms of the MIT License (see LICENSE.txt) from django import template from django.template import Node, Token, TemplateSyntaxError from django.template import resolve_variable, defaulttags from django.template.defaultfilters import stringfilter from django.conf import settings from django.utils import translation import localeurl from localeurl.utils import strip_locale_prefix, get_language register = template.Library() def chlocale(path, locale): """ Changes the path's locale prefix if the path is not locale-independent. Otherwise removes locale prefix. """ if not localeurl.PREFIX_DEFAULT_LOCALE and \ get_language(locale) == get_language(settings.LANGUAGE_CODE): return rmlocale(path) if is_locale_independent(rmed): return rmlocale(path) else: return '/' + get_language(locale) + rmlocale(path) chlocale = stringfilter(chlocale) register.filter('chlocale', chlocale) def rmlocale(url): """Removes the locale prefix from the path.""" return strip_locale_prefix(url) rmlocale = stringfilter(rmlocale) register.filter('rmlocale', rmlocale) def locale_url(parser, token): """ Renders the url for the view with another locale prefix. The syntax is like the 'url' tag, only with a locale before the view. Examples: {% locale_url "de" cal.views.day day %} {% locale_url "nl" cal.views.home %} {% locale_url "en-gb" cal.views.month month as month_url %} """ bits = token.split_contents() if len(bits) < 3: raise TemplateSyntaxError("'%s' takes at least two arguments:" " the locale and a view" % bits[0]) urltoken = Token(token.token_type, bits[0] + ' ' + ' '.join(bits[2:])) urlnode = defaulttags.url(parser, urltoken) return LocaleURLNode(bits[1], urlnode) class LocaleURLNode(Node): def __init__(self, locale, urlnode): self.locale = locale self.urlnode = urlnode def render(self, context): locale = resolve_variable(self.locale, context) path = self.urlnode.render(context) if self.urlnode.asvar: self.urlnode.render(context) context[self.urlnode.asvar] = chlocale(context[self.urlnode.asvar], locale) return '' else: return chlocale(path, locale) register.tag('locale_url', locale_url)
Python
0.999999
@@ -405,24 +405,47 @@ utils import + is_locale_independent, strip_local @@ -453,16 +453,26 @@ _prefix, + %5C%0A get_lan @@ -663,24 +663,59 @@ ix.%0A %22%22%22%0A + stripped_path = rmlocale(path)%0A if not l @@ -836,38 +836,37 @@ return -rmlocale( +stripped_ path -) %0A if is_l @@ -887,12 +887,21 @@ ent( -rmed +stripped_path ):%0A @@ -914,30 +914,29 @@ return -rmlocale( +stripped_ path -) %0A els @@ -982,30 +982,29 @@ cale) + -rmlocale( +stripped_ path -) %0A%0Achloca
ce339d471b5f9a15ac666c968cd8c191e6cf21ee
Check if schedule is str instance
pyup/config.py
pyup/config.py
# -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals import yaml try: # pragma: no cover basestring except NameError: # pragma: no cover basestring = str import re SCHEDULE_REGEX = re.compile( # has to begin with every r"^every " # followed by day/month "((day|month)$" # or week/two weeks "|(week|two weeks))" # with an optional weekday "( on (monday|tuesday|wednesday|thursday|friday|saturday|sunday))?", re.IGNORECASE ) class Config(object): UPDATE_ALL = "all" UPDATE_INSECURE = "insecure" # the docs had a typo at some point that incorrectly reffered to 'security' # instead of 'insecure'. UPDATE_INSECURE_TYPO = "security" UPDATE_NONE = ["False", "false", False, None] def __init__(self): self.close_prs = True self.branch = "master" self.branch_prefix = "pyup-" self.pr_prefix = "" self.pin = True self.search = True self.requirements = [] self.label_prs = False self.schedule = "" self.assignees = [] self.commit_message_template_pin = "Pin {package_name} to latest version {new_version}" self.commit_message_template_update = "Update {package_name} from {old_version} to {new_version}" self.gitlab = GitlabConfig() self.update = Config.UPDATE_ALL self.update_hashes = True def update_config(self, d): """ Updates the config object. :param d: dict """ for key, value in d.items(): if hasattr(self, key): if key == "requirements": items, value = value, [] for item in items: if isinstance(item, basestring): req = RequirementConfig(path=item) elif isinstance(item, dict): path, item = item.popitem() req = RequirementConfig( path=path, pin=item.get("pin", None), compile=item.get("compile", False), update=item.get("update", Config.UPDATE_ALL) ) value.append(req) # add constraint requirement files to config if req.compile: for spec in req.compile.specs: value.append(RequirementConfig(path=spec, pin=False)) elif key == "assignees": # assignees can be a string or a list. If it's a string, convert it to a list # to make things consistent if isinstance(value, basestring): value = [value, ] elif key == 'gitlab': value = GitlabConfig(**value) elif key == 'pr_prefix': # make sure that pr prefixes don't contain a PIPE if "|" in value: continue # cast ints and floats to str if isinstance(value, (int, float)) and not isinstance(value, bool): value = str(value) setattr(self, key, value) @staticmethod def generate_config_file(config): return "\n\n".join([ "# autogenerated pyup.io config file \n# see https://pyup.io/docs/configuration/ " "for all available options", yaml.safe_dump(config, default_flow_style=False) ]) def _get_requirement_attr(self, attr, path): """ Gets the attribute for a given requirement file in path :param attr: string, attribute :param path: string, path :return: The attribute for the requirement, or the global default """ for req_file in self.requirements: if path.strip("/") == req_file.path.strip("/"): return getattr(req_file, attr) return getattr(self, attr) def can_pin(self, path): """ Checks if requirements in `path` can be pinned. :param path: string, path to requirement file :return: bool """ return self._get_requirement_attr(attr="pin", path=path) def can_update_all(self, path): """ Checks if requirements in `path` can be updated. :param path: string, path to requirement file :return: bool """ return self._get_requirement_attr("update", path=path) == Config.UPDATE_ALL def can_update_insecure(self, path): """ Checks if requirements in `path` can be updated if insecure. :param path: string, path to requirement file :return: bool """ return self._get_requirement_attr("update", path=path) in (Config.UPDATE_ALL, Config.UPDATE_INSECURE, Config.UPDATE_INSECURE_TYPO) def is_valid_schedule(self): return SCHEDULE_REGEX.search(self.schedule) if type(self.schedule) == str else None def __repr__(self): return str(self.__dict__) class RequirementConfig(object): def __init__(self, path, pin=None, compile=False, update=Config.UPDATE_ALL): self.path = path self.pin = pin self.compile = CompileConfig(specs=compile.get("specs", [])) if compile else False self.update = update # set pin default if self.pin is None: # don't pin pipfiles by default if self.path.endswith("Pipfile"): self.pin = False else: self.pin = True def __repr__(self): return str(self.__dict__) class GitlabConfig(object): def __init__(self, should_remove_source_branch=False, merge_when_pipeline_succeeds=False): self.should_remove_source_branch = should_remove_source_branch self.merge_when_pipeline_succeeds = merge_when_pipeline_succeeds def __repr__(self): return str(self.__dict__) class CompileConfig(object): def __init__(self, specs=list()): self.specs = specs def __repr__(self): return str(self.__dict__)
Python
0.00001
@@ -5153,32 +5153,79 @@ schedule(self):%0A + if isinstance(self.schedule, str):%0A return S @@ -5263,43 +5263,41 @@ ule) - if type(self.schedule) == str else +%0A else:%0A return Non
f18d675f2877e8f9356dc64a96bf8fba364cddd3
Add search field to admin Terms.
controlled_vocabularies/admin.py
controlled_vocabularies/admin.py
from django.contrib import admin from django import forms from controlled_vocabularies.models import Vocabulary, Term, Property class PropertyInline(admin.TabularInline): model = Property fk_name = "term_key" extra = 1 class VocabularyAdmin(admin.ModelAdmin): """ Vocabulary class that determines how comment appears in admin """ list_display = ('name', 'label', 'order', 'maintainer', 'created', 'modified') fieldsets = ( (None, { 'classes': 'wide extrapretty', 'fields': ('name', 'label', 'order', 'maintainer', 'maintainerEmail', 'definition') }), ) class TermAdmin(admin.ModelAdmin): """ Term class that determines how comment appears in admin """ list_display = ('id', 'name', 'get_vocab', 'label', 'order',) fieldsets = ( (None, { 'classes': 'wide extrapretty', 'fields': ('vocab_list', 'name', 'label', 'order') }), ) list_filter = ('vocab_list',) inlines = [PropertyInline] class PropertyAdmin(admin.ModelAdmin): """ Property class that determines how comment appears in admin """ list_display = ('property_name', 'get_vocab', 'get_term', 'label',) fieldsets = ( (None, { 'classes': 'wide extrapretty', 'fields': ('term_key', 'property_name', 'label') }), ) def has_spaces(name): """ Make sure there are no spaces """ if ' ' in name: raise forms.ValidationError("Spaces are not allowed.") else: return name class VocabularyAdminForm(forms.ModelForm): """ Vocabulary class to specify how form data is handled in admin """ class Meta: model = Vocabulary fields = '__all__' def clean_name(self): """ Make sure there are no spaces in the name field """ return has_spaces(self.cleaned_data["name"]) admin.site.register(Vocabulary, VocabularyAdmin) admin.site.register(Term, TermAdmin) admin.site.register(Property, PropertyAdmin)
Python
0
@@ -785,24 +785,62 @@ , 'order',)%0A + search_fields = %5B'name', 'label'%5D%0A fieldset
5e6fc92bd627cb30d59929a91122301314554989
use .xml instead of .atom as a file extension
createCatalogBySubject.py
createCatalogBySubject.py
#!/usr/bin/env python #Copyright(c)2009 Internet Archive. Software license GPL version 3. """ This script creates a set of OPDS catalog files in Atom format from a CSV file. """ import csv import os import sys import datetime import codecs, cStringIO import xml.etree.ElementTree as ET # You can customize these: pubInfo = { 'name' : 'Internet Archive', 'uri' : 'http://archive.org', 'email' : 'info@archive.org' , 'title' : "Internet Archive's Online Catalog", 'subtitle' : "Download and read all the public domain books on the Internet Archive" } csvfile = 'prelinger.csv' outdir = 'catalog' # UTF-8 wrappers for csv.reader # From http://docs.python.org/library/csv.html#csv-examples #______________________________________________________________________________ class UTF8Recoder: """ Iterator that reads an encoded stream and reencodes the input to UTF-8 """ def __init__(self, f, encoding): self.reader = codecs.getreader(encoding)(f) def __iter__(self): return self def next(self): return self.reader.next().encode("utf-8") class UnicodeReader: """ A CSV reader which will iterate over lines in the CSV file "f", which is encoded in the given encoding. """ def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds): f = UTF8Recoder(f, encoding) self.reader = csv.reader(f, dialect=dialect, **kwds) def next(self): row = self.reader.next() return [unicode(s, "utf-8") for s in row] def __iter__(self): return self # createTextElement() #______________________________________________________________________________ def createTextElement(parent, name, value): element = ET.SubElement(parent, name) element.text = value # createCatalogXml() #______________________________________________________________________________ def createCatalogXml(pubInfo, updated, subject=None): ### TODO: add updated element and uuid element catalogXml = ET.Element("feed") title = pubInfo['title'] if None != subject: title += ' - ' + subject createTextElement(catalogXml, "title", title) createTextElement(catalogXml, "subtitle", pubInfo['subtitle']) createTextElement(catalogXml, "updated", updated) author = ET.SubElement(catalogXml, "author") createTextElement(author, "name", pubInfo['name']) createTextElement(author, "uri", pubInfo['uri']) createTextElement(author, "email", pubInfo['email']) return catalogXml # addCatalogEntry() #______________________________________________________________________________ def addCatalogEntry(catalog, title, updated): entry = ET.SubElement(catalog, "entry") createTextElement(entry, "title", title) href = 'subjects/' + title + '.atom' link = ET.SubElement(entry, "link", {'type': 'application/atom+xml', 'href': href}) createTextElement(entry, "updated", updated) # addBookEntry() #______________________________________________________________________________ def addBookEntry(catalog, id, author, title, description, updated, pubInfo): entry = ET.SubElement(catalog, "entry") createTextElement(entry, "title", title) authorET = ET.SubElement(entry, "author") createTextElement(authorET, "name", author) urnNID = pubInfo['name'].replace(' ', '-') urn = 'urn:%s:%s' % (urnNID, id) createTextElement(entry, "id", urn) href = 'http://www.archive.org/download/%s/%s.pdf' % (id, id) link = ET.SubElement(entry, "link", {'type': 'application/pdf', 'href': href}) createTextElement(entry, "updated", updated) # writeXml() #______________________________________________________________________________ # uses minidom to pretty print an ET Element def writeXml(xml, path): txt = ET.tostring(xml) import xml.dom.minidom as minidom prettyXml = minidom.parseString(txt).toprettyxml().encode( "utf-8" ) f = open(path, 'w') f.write(prettyXml) f.close() # createIndexXml() #______________________________________________________________________________ # creates a top-level catalog file at outdir/subjects.xml def createIndexXml(subjectSet, outdir, pubInfo, updated): subjectsXml = createCatalogXml(pubInfo, updated) for subject in subjectSet: addCatalogEntry(subjectsXml, subject, updated) writeXml(subjectsXml, outdir + '/subjects.xml') # createSubjectXml() #______________________________________________________________________________ def createSubjectXml(csvfile, subject, outdir, pubInfo, updated): reader = UnicodeReader(open(csvfile, "rb")) reader.next() #the first row is a header xml = createCatalogXml(pubInfo, updated, subject) for row in reader: (id, author, title, subjectStr, description) = row if '' == subjectStr: subjectStr = 'unclassified' subjects = subjectStr.split(';') for s in subjects: s = s.replace(' ', '_'); if s == subject: addBookEntry(xml, id, author, title, description, updated, pubInfo) writeXml(xml, "%s/subjects/%s.xml" % (outdir, subject)) # main() #______________________________________________________________________________ if not os.path.exists(csvfile): sys.exit('input csv file %s does not exist' % (csvfile)) if os.path.exists(outdir): sys.exit('output directory %s already exists' % (outdir)) os.mkdir(outdir) os.mkdir(outdir+'/subjects') #atom <updated> elements use ISO 8601 format for time updated = datetime.datetime.utcnow().isoformat() reader = csv.reader(open(csvfile, "rb")) reader.next() #the first row is a header subjectSet = set() for row in reader: (id, author, title, subjectStr, description) = row if '' == subjectStr: subjectStr = 'unclassified' subjects = subjectStr.split(';') for subject in subjects: subject = subject.replace(' ', '_'); subjectSet.add(subject) createIndexXml(subjectSet, outdir, pubInfo, updated) for subject in subjectSet: createSubjectXml(csvfile, subject, outdir, pubInfo, updated)
Python
0
@@ -2834,20 +2834,19 @@ tle + '. -atom +xml '%0A li
d68f3251c7605220da13a308ebc794a35a0c12e6
This is 0.9.1
rb/__init__.py
rb/__init__.py
""" rb ~~ The redis blaster. :copyright: (c) 2015 Functional Software Inc. :license: Apache License 2.0, see LICENSE for more details. """ from rb.cluster import Cluster from rb.clients import RoutingClient, MappingClient, FanoutClient from rb.router import BaseRouter, ConsistentHashingRouter, PartitionRouter from rb.promise import Promise __version__ = '1.0.dev.0' __all__ = [ # cluster 'Cluster', # client 'RoutingClient', 'MappingClient', 'FanoutClient', # router 'BaseRouter', 'ConsistentHashingRouter', 'PartitionRouter', # promise 'Promise', ]
Python
0.999998
@@ -378,17 +378,13 @@ = ' -1.0.dev.0 +0.9.1 '%0A%0A_
726f24cf2f4a62196ba6c13d129cbb13cf9a248e
Update tutorial_mnist_mlp_static.py
examples/basic_tutorials/tutorial_mnist_mlp_static.py
examples/basic_tutorials/tutorial_mnist_mlp_static.py
import time import numpy as np import tensorflow as tf import tensorlayer as tl from tensorlayer.layers import Dense, Dropout, Input from tensorlayer.models import Model ## enable debug logging tl.logging.set_verbosity(tl.logging.DEBUG) ## prepare MNIST data X_train, y_train, X_val, y_val, X_test, y_test = tl.files.load_mnist_dataset(shape=(-1, 784)) ## define the network # the softmax is implemented internally in tl.cost.cross_entropy(y, y_) to # speed up computation, so we use identity here. # see tf.nn.sparse_softmax_cross_entropy_with_logits() def get_model(inputs_shape): ni = Input(inputs_shape) nn = Dropout(keep=0.8)(ni) nn = Dense(n_units=800, act=tf.nn.relu)(nn) # in_channels is optional in this case as it can be inferred by the previous layer nn = Dropout(keep=0.8)(nn) nn = Dense(n_units=800, act=tf.nn.relu)(nn) # in_channels is optional in this case as it can be inferred by the previous layer nn = Dropout(keep=0.8)(nn) nn = Dense(n_units=10, act=tf.nn.relu)(nn) # in_channels is optional in this case as it can be inferred by the previous layer M = Model(inputs=ni, outputs=nn, name="mlp") return M MLP = get_model([None, 784]) import pprint pprint.pprint(MLP.config) ## start training n_epoch = 500 batch_size = 500 print_freq = 5 train_weights = MLP.trainable_weights optimizer = tf.optimizers.Adam(lr=0.0001) ## the following code can help you understand SGD deeply for epoch in range(n_epoch): ## iterate the dataset n_epoch times start_time = time.time() ## iterate over the entire training set once (shuffle the data via training) for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True): MLP.train() # enable dropout with tf.GradientTape() as tape: ## compute outputs _logits = MLP(X_batch) # alternatively, you can use MLP(x, is_train=True) and remove MLP.train() ## compute loss and update model _loss = tl.cost.cross_entropy(_logits, y_batch, name='train_loss') grad = tape.gradient(_loss, train_weights) optimizer.apply_gradients(zip(grad, train_weights)) ## use training and evaluation sets to evaluate the model every print_freq epoch if epoch + 1 == 1 or (epoch + 1) % print_freq == 0: MLP.eval() # disable dropout print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time)) train_loss, train_acc, n_iter = 0, 0, 0 for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=False): _logits = MLP(X_batch) # alternatively, you can use MLP(x, is_train=False) and remove MLP.eval() train_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') train_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) n_iter += 1 print(" train loss: {}".format(train_loss / n_iter)) print(" train acc: {}".format(train_acc / n_iter)) val_loss, val_acc, n_iter = 0, 0, 0 for X_batch, y_batch in tl.iterate.minibatches(X_val, y_val, batch_size, shuffle=False): _logits = MLP(X_batch) # is_train=False, disable dropout val_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss') val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) n_iter += 1 print(" val loss: {}".format(val_loss / n_iter)) print(" val acc: {}".format(val_acc / n_iter)) ## use testing data to evaluate the model MLP.eval() test_loss, test_acc, n_iter = 0, 0, 0 for X_batch, y_batch in tl.iterate.minibatches(X_test, y_test, batch_size, shuffle=False): _logits = MLP(X_batch) test_loss += tl.cost.cross_entropy(_logits, y_batch, name='test_loss') test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch)) n_iter += 1 print(" test loss: {}".format(test_loss / n_iter)) print(" test acc: {}".format(test_acc / n_iter))
Python
0.000001
@@ -692,32 +692,16 @@ its=800, -%0D%0A act=tf. @@ -718,90 +718,8 @@ n) -# in_channels is optional in this case as it can be inferred by the previous layer %0D%0A @@ -775,32 +775,16 @@ its=800, -%0D%0A act=tf. @@ -799,92 +799,8 @@ (nn) - # in_channels is optional in this case as it can be inferred by the previous layer %0D%0A @@ -859,24 +859,8 @@ =10, -%0D%0A act @@ -879,92 +879,8 @@ (nn) - # in_channels is optional in this case as it can be inferred by the previous layer %0D%0A
d3677042d17d3bc641c84981d5332f7fd0ddfe5d
Update process-schedules.py
cron/process-schedules.py
cron/process-schedules.py
#!/usr/bin/env python import MySQLdb #import datetime #import urllib2 #import os import datetime servername = "localhost" username = "pi" password = "password" dbname = "pi_heating_db" now = datetime.datetime.now() cnx = MySQLdb.connect(host=servername, user=username, passwd=password, db=dbname) cursorselect = cnx.cursor() # Check schedule time and date query = ("SELECT * FROM schedules WHERE enabled ='1';") cursorselect.execute(query) results_schedules =cursorselect.fetchall() cursorselect.close() for result in results_schedules: print("***") SCHED_TEST_TIME = False SCHED_TEST_DAY = False SCHED_TEST_SENSORS = False SCHED_TEST_MODES = False SCHED_TEST_TIMERS = False SCHED_ID = result[0] SCHED_START = result[2] SCHED_END = result[3] SCHED_MON = result[4] SCHED_TUE = result[5] SCHED_WED = result[6] SCHED_THU = result[7] SCHED_FRI = result[8] SCHED_SAT = result[9] SCHED_SUN = result[10] print( SCHED_ID ) #print( now ) #print( SCHED_START ) #print( SCHED_END ) #print( type(SCHED_END) ) #print("---") SCHED_START_HOUR, remainder = divmod(SCHED_START.seconds,3600) SCHED_START_MINUTE, sec = divmod(remainder, 60) SCHED_END_HOUR, remainder = divmod(SCHED_END.seconds,3600) SCHED_END_MINUTE, sec = divmod(remainder, 60) #print( type(SCHED_START_MINUTE)) #print( SCHED_START_MINUTE) #print("---") SCHED_START_STR = str(SCHED_START_HOUR)+":"+str(SCHED_START_MINUTE) SCHED_END_STR = str(SCHED_END_HOUR) + ":"+str(SCHED_END_MINUTE) #print( SCHED_START_STR ) TIME_NOW = datetime.datetime.strptime(str(now.hour)+":"+str(now.minute), "%H:%M") TIME_START = datetime.datetime.strptime(SCHED_START_STR, "%H:%M") TIME_END = datetime.datetime.strptime(SCHED_END_STR, "%H:%M") MIN_TO_START = TIME_NOW - TIME_START MIN_TO_END = TIME_END - TIME_NOW #print( MIN_TO_START.total_seconds() ) #print( MIN_TO_END.total_seconds() ) #print( SCHED_TEST_TIME ) if ( MIN_TO_START.total_seconds() > 0 and MIN_TO_END.total_seconds() > 0 ): SCHED_TEST_TIME = True print( SCHED_TEST_TIME ) # Check senso values # Check modes # Check timers if ( SCHED_TEST_TIME and SCHED_TEST_DAY and SCHED_TEST_SENSORS and SCHED_TEST_MODES and SCHED_TEST_TIMERS == True): print( "activate" ) else: print( "deactivate" ) cnx.commit() cnx.close()
Python
0.000001
@@ -556,17 +556,25 @@ print(%22* -* + * * * * *%22)%0A SC @@ -2130,16 +2130,129 @@ T_TIME ) +%0A print( SCHED_TEST_DAY )%0A print( SCHED_TEST_SENSORS )%0A print( SCHED_TEST_MODES )%0A print( SCHED_TEST_TIMERS ) %0A%0A # Ch
8071bb66af3f0b661c07ef9f3ac260b30b856d04
Allow the profile boolean to the Alerts controller
fedoracommunity/mokshaapps/alerts/controllers/root.py
fedoracommunity/mokshaapps/alerts/controllers/root.py
from tg import expose, tmpl_context from pylons import cache, request from datetime import datetime, timedelta from moksha.lib.base import Controller from moksha.api.widgets import ContextAwareWidget from moksha.api.connectors import get_connector class AlertsContainer(ContextAwareWidget): template = 'mako:fedoracommunity.mokshaapps.alerts.templates.alertscontainer' def update_params(self, d): super(AlertsContainer, self).update_params(d) # FIXME: Alerts need to be dynamic but for right now # we will have it query directly and cache the results c = cache.get_cache('fedoracommunity_alerts_global') # cache for a minute users = [] if d.get('profile') or d.get('userid'): label = 'Error if you see this label' if d.get('profile'): label = 'Your Recent Packages' creds = request.environ.get('repoze.what.credentials') if creds and creds.get('repoze.what.userid'): userid = creds.get('repoze.what.userid') else: userid = d.get('userid') label = '%s\'s Recent Packages' % userid users_data = c.get_value(key=userid, createfunc=lambda : self.get_user_entries(userid), expiretime=3600) d['alerts'] = [{'label': label, 'items': users_data}] else: # cache for 5 minutes today = c.get_value(key='today', createfunc=self.get_todays_entries, expiretime=300) # cache for a day this_week = c.get_value(key='this_week', createfunc=self.get_this_week_entries, expiretime=3600) # add today's results to this_week as an optimization # e.g. this week only contains a count up to 11:59 of the # previous day for w, t in zip(this_week, today): w['count'] += t['count'] d['alerts'] = [{'label': 'This Week', 'items': this_week}, {'label': 'Today', 'items': today}] def query_builds_count(self, userid, before, after, state): # FIXME: Add this as an alerts query to the connector builds = get_connector('koji') id = None if userid: user = builds.call('getUser', params={'userInfo':userid}) if user: id = user['id'] if before: before = str(before) if after: after = str(after) params = dict(userID=id, state=state, completeBefore=before, completeAfter=after, queryOpts={'countOnly': True}) count = builds.call('listBuilds', params) if state == 1: label = 'builds succeeded' icon = '16_success_build.png' elif state == 3: label = 'builds failed' icon = '16_failure_build.png' return {'count': count, 'label': label, 'state': state, 'icon': icon} def get_this_week_entries(self): bodhi = get_connector('bodhi') now = datetime.utcnow() a_day_ago = now - timedelta(days=1) a_day_ago = a_day_ago.replace(hour = 23, minute = 59, second = 59) week_start = now - timedelta(weeks=1) results = [] complete_builds = self.query_builds_count(None, a_day_ago, week_start, 1) failed_builds = self.query_builds_count(None, a_day_ago, week_start, 3) stable_updates = bodhi.query_updates_count('stable', before=a_day_ago, after=week_start) testing_updates = bodhi.query_updates_count('testing', before=a_day_ago, after=week_start) complete_builds['url'] = '/package_maintenance/builds/success' failed_builds['url'] = '/package_maintenance/builds/fail' stable_updates['url'] = '/package_maintenance/updates/stable' testing_updates['url'] = '/package_maintenance/updates/testing' stable_updates['icon'] = testing_updates['icon'] = '16_bodhi.png' results.append(complete_builds) results.append(failed_builds) results.append(stable_updates) results.append(testing_updates) return results def get_todays_entries(self): bodhi = get_connector('bodhi') today_start = datetime.utcnow() today_start = today_start.replace(hour = 0) results = [] complete_builds = self.query_builds_count(None, None, today_start, 1) failed_builds = self.query_builds_count(None, None, today_start, 3) stable_updates = bodhi.query_updates_count('stable', after=today_start) testing_updates = bodhi.query_updates_count('testing',after=today_start) complete_builds['url'] = '/package_maintenance/builds/successful' failed_builds['url'] = '/package_maintenance/builds/failed' stable_updates['url'] = '/package_maintenance/updates/stable' testing_updates['url'] = '/package_maintenance/updates/testing' stable_updates['icon'] = testing_updates['icon'] = '16_bodhi.png' results.append(complete_builds) results.append(failed_builds) results.append(stable_updates) results.append(testing_updates) return results def get_user_entries(self, userid): bodhi = get_connector('bodhi') now = datetime.utcnow() week_start = now - timedelta(weeks=1) results = [] complete_builds = self.query_builds_count(userid, None, week_start, 1) failed_builds = self.query_builds_count(userid, None, week_start, 3) stable_updates = bodhi.query_updates_count('stable', username=userid, after=week_start) testing_updates = bodhi.query_updates_count('testing', username=userid, after=week_start) complete_builds['url'] = '/profile/builds/my_successful' failed_builds['url'] = '/profile/builds/my_failed' stable_updates['url'] = '/profile/updates/stable' testing_updates['url'] = '/profile/updates/testing' stable_updates['icon'] = testing_updates['icon'] = '16_bodhi.png' results.append(complete_builds) results.append(failed_builds) results.append(stable_updates) results.append(testing_updates) return results alerts_container = AlertsContainer('alerts') class RootController(Controller): @expose('mako:moksha.templates.widget') def index(self, username=None): tmpl_context.widget = alerts_container return dict(options={'userid': username})
Python
0.000001
@@ -28,16 +28,26 @@ _context +, validate %0Afrom py @@ -73,16 +73,50 @@ request%0A +from formencode import validators%0A from dat @@ -7115,16 +7115,68 @@ idget')%0A + @validate(%7B'profile': validators.StringBool()%7D)%0A def @@ -7200,16 +7200,31 @@ ame=None +, profile=False ):%0A @@ -7312,11 +7312,31 @@ username +, 'profile': profile %7D)%0A
e60ce628029e3100d6f2a8a8f7260e2ed229e6ac
Add helper method to retrieve review count per user in a skeleton
django/applications/catmaid/control/review.py
django/applications/catmaid/control/review.py
from collections import defaultdict from catmaid.models import Review def get_treenodes_to_reviews(treenode_ids=None, skeleton_ids=None, umap=lambda r: r): """ Returns a dictionary that contains all reviewed nodes of the passed <treenode_ids> and/or <skeleton_ids> lists as keys. The reviewer user IDs are kept in a list as values. A function can be passed to which is executed for every reviewer_id to change the value stored result (e.g. to use user names instead of an ID. It defaults to the identity and therefore reviewer IDs. """ # Set up filters reviews = Review.objects.all() if treenode_ids: reviews = reviews.filter(treenode_id__in=treenode_ids) if skeleton_ids: reviews = reviews.filter(skeleton_id__in=skeleton_ids) # Only request treenode ID and reviewer ID reviews = reviews.values_list('treenode_id', 'reviewer_id') # Build dictionary treenode_to_reviews = defaultdict(list) for tid, rid in reviews: treenode_to_reviews[tid].append(umap(rid)) return treenode_to_reviews
Python
0
@@ -65,16 +65,50 @@ Review%0A%0A +from django.db import connection%0A%0A %0Adef get @@ -1138,8 +1138,691 @@ reviews%0A +%0Adef get_review_count(skeleton_ids):%0A %22%22%22 Returns a dictionary that maps skelton IDs to dictonaries that map%0A user_ids to a review count for this particular skeleton.%0A %22%22%22%0A # Count nodes that have been reviewed by each user in each partner skeleton%0A cursor = connection.cursor()%0A cursor.execute('''%0A SELECT skeleton_id, reviewer_id, count(skeleton_id)%0A FROM review%0A WHERE skeleton_id IN (%25s)%0A GROUP BY reviewer_id, skeleton_id%0A ''' %25 %22,%22.join(str(skid) for skid in skeleton_ids))%0A # Build dictionary%0A reviews = defaultdict(lambda: defaultdict(int))%0A for row in cursor.fetchall():%0A reviews%5Brow%5B0%5D%5D%5Brow%5B1%5D%5D = row%5B2%5D%0A%0A return reviews%0A
a36a7a0eb6560156c5be6f0cc5523c17e79591e4
fix import errors
deepchem/models/tests/test_normalizing_flow_pytorch.py
deepchem/models/tests/test_normalizing_flow_pytorch.py
""" Test for Pytorch Normalizing Flow model and its transformations """ import pytest import numpy as np import unittest try: import torch from torch.distributions import MultivariateNormal from deepchem.models.torch_models.normalizing_flows_pytorch import Affine has_torch = True except: has_torch = False @unittest.skipIf(not has_torch, 'torch is not installed') @pytest.mark.torch def test_Affine(): """ This test should evaluate if the transformation its being applied correctly. When computing the logarithm of the determinant jacobian matrix the result must be zero for any distribution as input when performing the first forward and inverse pass (initialized). This is the expected behavior because nothing is learned yet. input shape: (samples, dim) output shape: (samples, dim) """ dim = 2 samples = 96 data = MultivariateNormal(torch.zeros(dim), torch.eye(dim)) tensor = data.sample(torch.Size((samples, dim))) _, log_det_jacobian = Affine(dim).forward(tensor) _, inverse_log_det_jacobian = Affine(dim).inverse(tensor) # The first pass of the transformation should be 0 log_det_jacobian = log_det_jacobian.detach().numpy() inverse_log_det_jacobian = inverse_log_det_jacobian.detach().numpy() zeros = np.zeros((samples,)) assert np.array_equal(log_det_jacobian, zeros) assert np.array_equal(inverse_log_det_jacobian, zeros)
Python
0.000025
@@ -230,33 +230,14 @@ els. -normalizing_flows_pytorch +layers imp @@ -594,17 +594,8 @@ ion -as input when @@ -609,18 +609,16 @@ ming the -%0A first f @@ -623,16 +623,18 @@ forward +%0A and inv @@ -694,14 +694,12 @@ ior -becaus +sinc e no @@ -707,16 +707,22 @@ hing is +being learned
40855d9be867119bdcaf7f531d5368dc7c91c3ee
Change internal representation of links chain to a tuple
polymer_states/__init__.py
polymer_states/__init__.py
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import numpy class Link(int): """A polymer chain link on a 2D lattice.""" VALID_LINK_VALUES = {1 << i for i in range(5)} def __init__(self, value): if value not in Link.VALID_LINK_VALUES: raise ValueError("invalid link value {}".format(value)) Link.UP, Link.DOWN, Link.LEFT, Link.RIGHT, Link.SLACK = \ map(Link, Link.VALID_LINK_VALUES) class Polymer: """A chain of N links.""" def __init__(self, links): arr_links = numpy.array(list(map(Link, links))) if len(arr_links) < 1: raise ValueError(("polymer chain must contain at least one " + "link, {} given").format(len(arr_links))) self.__links = arr_links def __hash__(self): return 0 def __eq__(self, other): if self.__links.shape != other._Polymer__links.shape: return False return numpy.equal(self.__links, other._Polymer__links).all() def __ne__(self, other): return not self == other @classmethod def all_curled_up(cls, link_count): """Polymer.all_curled_up(len) -> a Polymer Creates a Polymer that has link_count links (ie, link_count + 1 reptons) and all reptons placed in a single cell. """ return cls([Link.SLACK] * link_count) def reachable_from(self): reachable = {self} for i, pair in enumerate(self.link_pairs()): first, second = pair if Polymer.both_slacks(pair): reachable.update(self.__create_hernias_at(i)) elif i == 0 and first == Link.SLACK: reachable.update(self.__make_slack_end_taut(i)) elif Link.SLACK in pair: reachable.add(self.__reptate_at(i)) elif Polymer.is_hernia(pair): reachable.add(self.__annihilate_hernia_at(i)) return reachable def contains_hernia(self): """P.contains_hernia() -> a bool Returns True if the Polymer contains a hernia. """ return any(Polymer.is_hernia(pair) for pair in self.link_pairs()) def contains_slack_pair(self): """P.contains_slack_pair() -> a bool Returns True if the Polymer contains a pair of consecutive slacks that could be turned into a hernia. """ return any(Polymer.both_slacks(pair) for pair in self.link_pairs()) def __create_hernias_at(self, i): out = set() for first, second in [(Link.UP, Link.DOWN), (Link.DOWN, Link.UP), (Link.LEFT, Link.RIGHT), (Link.RIGHT, Link.LEFT)]: new_links = self.__links.copy() new_links[i] = first new_links[i + 1] = second out.add(Polymer(new_links)) return out def __annihilate_hernia_at(self, i): new_links = self.__links.copy() new_links[i:i+2] = Link.SLACK return Polymer(new_links) def __reptate_at(self, i): new_links = self.__links.copy() new_links[i], new_links[i + 1] = new_links[i + 1], new_links[i] return Polymer(new_links) def __make_slack_end_taut(self, i): out = set() for taut_link in [Link.UP, Link.DOWN, Link.LEFT, Link.RIGHT]: new_links = self.__links.copy() new_links[i] = taut_link out.add(Polymer(new_links)) return out def link_pairs(self): return zip(self.__links, self.__links[1:]) @staticmethod def is_hernia(pair): """Polymer.is_hernia(pair) -> a bool Returns True if the given pair of links forms a hernia. """ return set(pair) in [{Link.UP, Link.DOWN}, {Link.LEFT, Link.RIGHT}] @staticmethod def both_slacks(pair): return pair == (Link.SLACK, Link.SLACK) HERNIAS = { Polymer([Link.UP, Link.DOWN]), Polymer([Link.DOWN, Link.UP]), Polymer([Link.LEFT, Link.RIGHT]), Polymer([Link.RIGHT, Link.LEFT]), }
Python
0.000001
@@ -666,20 +666,16 @@ -arr_ links = nump @@ -674,43 +674,20 @@ s = -numpy.array(list(map(Link, +tuple( links) -)) %0A%0A @@ -695,28 +695,24 @@ if len( -arr_ links) %3C 1:%0A @@ -845,20 +845,16 @@ mat(len( -arr_ links))) @@ -882,17 +882,31 @@ s = -arr_ +tuple(map(Link, links +)) %0A%0A @@ -942,17 +942,34 @@ return -0 +hash(self.__links) %0A%0A de @@ -1003,114 +1003,15 @@ -if self.__links.shape != other._Polymer__links.shape:%0A return False%0A return numpy.equal( +return self @@ -1018,17 +1018,19 @@ .__links -, + == other._ @@ -1047,15 +1047,8 @@ inks -).all() %0A%0A @@ -2730,98 +2730,178 @@ s = -self.__links.copy()%0A new_links%5Bi%5D = first%0A new_links%5Bi + 1%5D = second +(%0A first if j == i%0A else second if j == i + 1%0A else link%0A for j, link in enumerate(self.__links)%0A ) %0A @@ -3022,65 +3022,122 @@ s = -self.__links.copy()%0A new_links%5Bi:i+2%5D = Link.SLACK +(%0A Link.SLACK if j in (i, i + 1) else link%0A for j, link in enumerate(self.__links)%0A ) %0A @@ -3223,99 +3223,185 @@ s = -self.__links.copy()%0A new_links%5Bi%5D, new +(%0A self.__links%5Bj + 1%5D if j == i%0A else self._ _links%5B -i + +j - 1%5D -= new_links%5Bi + 1%5D, new_links%5Bi%5D +if j == i + 1%0A else link%0A for j, link in enumerate(self.__links)%0A ) %0A @@ -3590,64 +3590,40 @@ s = -self.__links.copy()%0A new_links%5Bi%5D = taut_link +(taut_link, ) + self.__links%5B1:%5D %0A
3dc54a1c845cf0b99fd0dfc6fd454659895ba888
Fix import.
django_elasticsearch/contrib/restframework/__init__.py
django_elasticsearch/contrib/restframework/__init__.py
from rest_framework import VERSION from django_elasticsearch.contrib.restframework.restframework import AutoCompletionMixin if int(VERSION[0]) < 3: from django_elasticsearch.contrib.restframework.restframework2 import IndexableModelMixin from django_elasticsearch.contrib.restframework.restframework2 import ElasticsearchFilterBackend else: from django_elasticsearch.contrib.restframework.restframework3 import IndexableModelMixin from django_elasticsearch.contrib.restframework.restframework3 import ElasticsearchFilterBackend __all__ = [ElasticsearchFilterBackend, IndexableModelMixin, AutoCompletionMixin]
Python
0
@@ -77,29 +77,20 @@ amework. -restframework +base import
572fdb51cb692344d35d561d2efaf723f839221b
Update use_interoperable_auth detection.
grow/deployments/destinations/google_cloud_storage.py
grow/deployments/destinations/google_cloud_storage.py
from . import base from . import messages as deployment_messages from boto.gs import key from boto.s3 import connection from gcloud import storage from protorpc import messages import boto import cStringIO import dns.resolver import logging import mimetypes import os import webapp2 class TestCase(base.DestinationTestCase): def test_domain_cname_is_gcs(self): bucket_name = self.deployment.config.bucket CNAME = 'c.storage.googleapis.com' message = deployment_messages.TestResultMessage() message.title = 'CNAME for {} is {}'.format(bucket_name, CNAME) dns_resolver = dns.resolver.Resolver() dns_resolver.nameservers = ['8.8.8.8'] # Use Google's DNS. try: content = str(dns_resolver.query(bucket_name, 'CNAME')[0]) except: text = "Can't verify CNAME for {} is mapped to {}" message.result = deployment_messages.Result.WARNING message.text = text.format(bucket_name, CNAME) if not content.startswith(CNAME): text = 'CNAME mapping for {} is not GCS! Found {}, expected {}' message.result = deployment_messages.Result.WARNING message.text = text.format(bucket_name, content, CNAME) else: text = 'CNAME for {} -> {}'.format(bucket_name, content, CNAME) message.text = text.format(text, content, CNAME) return message class Config(messages.Message): bucket = messages.StringField(1) access_key = messages.StringField(2) access_secret = messages.StringField(3) project = messages.StringField(4) email = messages.StringField(5) key_path = messages.StringField(6) class GoogleCloudStorageDestination(base.BaseDestination): NAME = 'gcs' TestCase = TestCase Config = Config def __str__(self): return 'gs://{}'.format(self.config.bucket) @property def use_interoperable_auth(self): return self.config.access_key is not None @webapp2.cached_property def bucket(self): if self.use_interoperable_auth: gs_connection = boto.connect_gs( self.config.access_key, self.config.access_secret, calling_format=connection.OrdinaryCallingFormat()) else: gs_connection = storage.get_connection( self.config.project, self.config.email, self.config.key_path) return gs_connection.get_bucket(self.config.bucket) def prelaunch(self, dry_run=False): if dry_run: return logging.info('Configuring GS bucket: {}'.format(self.config.bucket)) if self.use_interoperable_auth: self.bucket.set_acl('public-read') self.bucket.configure_versioning(False) self.bucket.configure_website(main_page_suffix='index.html', error_key='404.html') else: acl = self.bucket.get_default_object_acl() acl.all().grant_read().revoke_write() acl.save() self.bucket.configure_website(main_page_suffix='index.html', not_found_page='404.html') def write_control_file(self, path, content): path = os.path.join(self.control_dir, path.lstrip('/')) return self.write_file(path, content, policy='private') def read_file(self, path): if self.use_interoperable_auth: file_key = key.Key(self.bucket) file_key.key = path try: return file_key.get_contents_as_string() except boto.exception.GSResponseError, e: if e.status != 404: raise raise IOError('File not found: {}'.format(path)) else: file_key = self.bucket.get_key(path) if not file_key: raise IOError('File not found: {}'.format(path)) return file_key.get_contents_as_string() def delete_file(self, path): if self.use_interoperable_auth: file_key = key.Key(self.bucket) file_key.key = path.lstrip('/') self.bucket.delete_key(file_key) else: self.bucket.delete_key(path) def write_file(self, path, content, policy='public-read'): if isinstance(content, unicode): content = content.encode('utf-8') path = path.lstrip('/') mimetype = mimetypes.guess_type(path)[0] fp = cStringIO.StringIO() fp.write(content) size = fp.tell() try: if self.use_interoperable_auth: file_key = key.Key(self.bucket) file_key.key = path headers = {'Cache-Control': 'no-cache'} # TODO(jeremydw): Better headers. if mimetype: headers['Content-Type'] = mimetype file_key.set_contents_from_file(fp, headers=headers, replace=True, policy=policy, size=size, rewind=True) else: file_key = self.bucket.new_key(path) file_key.set_contents_from_file(fp, content_type=mimetype, size=size, rewind=True) if policy == 'private': acl = file_key.get_acl() acl.all().revoke_read().revoke_write() file_key.save_acl(acl) finally: fp.close()
Python
0
@@ -1833,25 +1833,16 @@ fig. -access_key is not +email is Non
01cf61de5fc2c649fd5cd503df49aa880613c7c2
change potcar_data to potcar_spec
pymatgen/apps/borg/tests/test_hive.py
pymatgen/apps/borg/tests/test_hive.py
# coding: utf-8 from __future__ import division, unicode_literals """ Created on Mar 18, 2012 """ __author__ = "Shyue Ping Ong" __copyright__ = "Copyright 2012, The Materials Project" __version__ = "0.1" __maintainer__ = "Shyue Ping Ong" __email__ = "shyue@mit.edu" __date__ = "Mar 18, 2012" import unittest import os from pymatgen.apps.borg.hive import VaspToComputedEntryDrone, \ SimpleVaspToComputedEntryDrone, GaussianToComputedEntryDrone from pymatgen.entries.computed_entries import ComputedStructureEntry from pymatgen.entries.compatibility import MITCompatibility class VaspToComputedEntryDroneTest(unittest.TestCase): def setUp(self): self.test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", 'test_files') self.drone = VaspToComputedEntryDrone(data=["efermi"]) self.structure_drone = VaspToComputedEntryDrone(True) def test_get_valid_paths(self): for path in os.walk(self.test_dir): if path[0] == self.test_dir: self.assertTrue(len(self.drone.get_valid_paths(path)) > 0) def test_assimilate(self): entry = self.drone.assimilate(self.test_dir) for p in ["hubbards", "is_hubbard", "potcar_data", "run_type"]: self.assertIn(p, entry.parameters) self.assertAlmostEqual(entry.data["efermi"], 1.8301027) self.assertEqual(entry.composition.reduced_formula, "LiFe4(PO4)4") self.assertAlmostEqual(entry.energy, -269.38319884) entry = self.structure_drone.assimilate(self.test_dir) self.assertEqual(entry.composition.reduced_formula, "LiFe4(PO4)4") self.assertAlmostEqual(entry.energy, -269.38319884) self.assertIsInstance(entry, ComputedStructureEntry) self.assertIsNotNone(entry.structure) self.assertEqual(len(entry.parameters["history"]), 2) compat = MITCompatibility(check_potcar_hash=False) self.assertIsNone(compat.process_entry(entry)) def test_to_from_dict(self): d = self.structure_drone.as_dict() drone = VaspToComputedEntryDrone.from_dict(d) self.assertEqual(type(drone), VaspToComputedEntryDrone) class SimpleVaspToComputedEntryDroneTest(unittest.TestCase): def setUp(self): self.test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", 'test_files') self.drone = SimpleVaspToComputedEntryDrone() self.structure_drone = SimpleVaspToComputedEntryDrone(True) def test_get_valid_paths(self): for path in os.walk(self.test_dir): if path[0] == self.test_dir: self.assertTrue(len(self.drone.get_valid_paths(path)) > 0) def test_to_from_dict(self): d = self.structure_drone.as_dict() drone = SimpleVaspToComputedEntryDrone.from_dict(d) self.assertEqual(type(drone), SimpleVaspToComputedEntryDrone) class GaussianToComputedEntryDroneTest(unittest.TestCase): def setUp(self): self.test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", 'test_files', "molecules") self.drone = GaussianToComputedEntryDrone(data=["corrections"]) self.structure_drone = GaussianToComputedEntryDrone(True) def test_get_valid_paths(self): for path in os.walk(self.test_dir): if path[0] == self.test_dir: self.assertTrue(len(self.drone.get_valid_paths(path)) > 0) def test_assimilate(self): test_file = os.path.join(self.test_dir, "methane.log") entry = self.drone.assimilate(test_file) for p in ["functional", "basis_set", "charge", "spin_mult", 'route']: self.assertIn(p, entry.parameters) for p in ["corrections"]: self.assertIn(p, entry.data) self.assertEqual(entry.composition.reduced_formula, "H4C") self.assertAlmostEqual(entry.energy, -39.9768775602) entry = self.structure_drone.assimilate(test_file) self.assertEqual(entry.composition.reduced_formula, "H4C") self.assertAlmostEqual(entry.energy, -39.9768775602) self.assertIsInstance(entry, ComputedStructureEntry) self.assertIsNotNone(entry.structure) for p in ["properly_terminated", "stationary_type"]: self.assertIn(p, entry.data) def test_to_from_dict(self): d = self.structure_drone.as_dict() drone = GaussianToComputedEntryDrone.from_dict(d) self.assertEqual(type(drone), GaussianToComputedEntryDrone) if __name__ == "__main__": #import sys;sys.argv = ['', 'Test.testName'] unittest.main()
Python
0.000011
@@ -1252,20 +1252,20 @@ %22potcar_ -data +spec %22, %22run_
03ee56092d7f258dd37016ac06b6daf2439811f3
change article abstract count to limit to in_doaj only
portality/models/search.py
portality/models/search.py
from portality.dao import DomainObject from portality.models.cache import Cache from portality.models import Journal, Article class JournalArticle(DomainObject): __type__ = 'journal,article' __readonly__ = True # TODO actually heed this attribute in all DomainObject methods which modify data @classmethod def site_statistics(cls): stats = Cache.get_site_statistics() if stats is not None: return stats # we didn't get anything from the cache, so we need to generate and # cache a new set # prep the query and result objects stats = { # Note these values all have to be strings "journals" : "0", "countries" : "0", "abstracts" : "0", "new_journals" : "0", "no_apc" : "0" } # get the journal data q = JournalStatsQuery() journal_data = Journal.query(q=q.stats) stats["journals"] = "{0:,}".format(journal_data.get("hits", {}).get("total", 0)) stats["countries"] = "{0:,}".format(len(journal_data.get("aggregations", {}).get("countries", {}).get("buckets", []))) apc_buckets = journal_data.get("aggregations", {}).get("apcs", {}).get("buckets", []) for b in apc_buckets: if b.get("key") == "No": stats["no_apc"] = "{0:,}".format(b.get("doc_count")) break stats["new_journals"] = "{0:,}".format(journal_data.get("aggregations", {}).get("creation", {}).get("buckets", [])[0].get("doc_count", 0)) # get the article data qa = ArticleStatsQuery() article_data = Article.query(q=qa.q) stats["abstracts"] = "{0:,}".format(article_data.get("aggregations", {}).get("abstracts", {}).get("value", 0)) # now cache and return Cache.cache_site_statistics(stats) return stats class JournalStatsQuery(object): stats = { "query": { "bool": { "must": [ {"term": {"admin.in_doaj": True}} ] } }, "size": 0, "aggs": { "countries" : { "terms" : {"field" : "index.country.exact", "size" : 500} }, "apcs" : { "terms" : {"field" : "index.has_apc.exact"} }, "creation" : { "date_range" : { "field" : "created_date", "ranges" : [ {"from" : "now-1M"} ] } } } } class ArticleStatsQuery(object): q = { "query" : {"match_all" : {}}, "size" : 0, "aggs" : { "abstracts" : { "value_count" : {"field" : "bibjson.abstract.exact"} } } }
Python
0
@@ -2661,28 +2661,154 @@ ery%22 - : %7B%22match_all%22 : %7B%7D +: %7B%0A %22bool%22: %7B%0A %22must%22: %5B%0A %7B%22term%22: %7B%22admin.in_doaj%22: True%7D%7D%0A %5D%0A %7D%0A %7D,%0A
6c4cd9e9f49c295fbb9738062e05bdf566d470a9
Use default client name for cinder resources
heat/engine/resources/openstack/cinder/volume_type.py
heat/engine/resources/openstack/cinder/volume_type.py
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from heat.common import exception from heat.common.i18n import _ from heat.engine import constraints from heat.engine import properties from heat.engine import resource from heat.engine import support class CinderVolumeType(resource.Resource): """A resource for creating cinder volume types. Volume type resource allows to define, whether volume, which will be use this type, will public and which projects are allowed to work with it. Also, there can be some user-defined metadata. Note that default cinder security policy usage of this resource is limited to being used by administrators only. """ support_status = support.SupportStatus(version='2015.1') default_client_name = 'cinder' entity = 'volume_types' required_service_extension = 'os-types-manage' PROPERTIES = ( NAME, METADATA, IS_PUBLIC, DESCRIPTION, PROJECTS, ) = ( 'name', 'metadata', 'is_public', 'description', 'projects', ) properties_schema = { NAME: properties.Schema( properties.Schema.STRING, _('Name of the volume type.'), required=True, update_allowed=True, ), METADATA: properties.Schema( properties.Schema.MAP, _('The extra specs key and value pairs of the volume type.'), update_allowed=True ), IS_PUBLIC: properties.Schema( properties.Schema.BOOLEAN, _('Whether the volume type is accessible to the public.'), default=True, support_status=support.SupportStatus(version='5.0.0'), update_allowed=True ), DESCRIPTION: properties.Schema( properties.Schema.STRING, _('Description of the volume type.'), update_allowed=True, support_status=support.SupportStatus(version='5.0.0'), ), PROJECTS: properties.Schema( properties.Schema.LIST, _('Projects to add volume type access to. NOTE: This ' 'property is only supported since Cinder API V2.'), support_status=support.SupportStatus(version='5.0.0'), update_allowed=True, schema=properties.Schema( properties.Schema.STRING, constraints=[ constraints.CustomConstraint('keystone.project') ], ), default=[], ), } def _add_projects_access(self, projects): for project in projects: project_id = self.client_plugin('keystone').get_project_id(project) self.cinder().volume_type_access.add_project_access( self.resource_id, project_id) def handle_create(self): args = { 'name': self.properties[self.NAME], 'is_public': self.properties[self.IS_PUBLIC], 'description': self.properties[self.DESCRIPTION] } volume_type = self.client().volume_types.create(**args) self.resource_id_set(volume_type.id) vtype_metadata = self.properties[self.METADATA] if vtype_metadata: volume_type.set_keys(vtype_metadata) self._add_projects_access(self.properties[self.PROJECTS]) def handle_update(self, json_snippet, tmpl_diff, prop_diff): """Update the name, description and metadata for volume type.""" update_args = {} # Update the name, description, is_public of cinder volume type is_public = self.properties[self.IS_PUBLIC] if self.DESCRIPTION in prop_diff: update_args['description'] = prop_diff.get(self.DESCRIPTION) if self.NAME in prop_diff: update_args['name'] = prop_diff.get(self.NAME) if self.IS_PUBLIC in prop_diff: is_public = prop_diff.get(self.IS_PUBLIC) update_args['is_public'] = is_public if update_args: self.client().volume_types.update(self.resource_id, **update_args) # Update the key-value pairs of cinder volume type. if self.METADATA in prop_diff: volume_type = self.client().volume_types.get(self.resource_id) old_keys = volume_type.get_keys() volume_type.unset_keys(old_keys) new_keys = prop_diff.get(self.METADATA) if new_keys is not None: volume_type.set_keys(new_keys) # Update the projects access for volume type if self.PROJECTS in prop_diff and not is_public: old_access_list = self.cinder().volume_type_access.list( self.resource_id) old_projects = [ac._info['project_id'] for ac in old_access_list] new_projects = prop_diff.get(self.PROJECTS) # first remove the old projects access for project_id in (set(old_projects) - set(new_projects)): self.cinder().volume_type_access.remove_project_access( self.resource_id, project_id) # add the new projects access self._add_projects_access(set(new_projects) - set(old_projects)) def validate(self): super(CinderVolumeType, self).validate() if self.properties[self.PROJECTS]: if self.cinder().volume_api_version == 1: raise exception.NotSupported( feature=_('Using Cinder API V1, volume type access')) if self.properties[self.IS_PUBLIC]: msg = (_('Can not specify property "%s" ' 'if the volume type is public.') % self.PROJECTS) raise exception.StackValidationFailed(message=msg) def resource_mapping(): return { 'OS::Cinder::VolumeType': CinderVolumeType }
Python
0.000003
@@ -3195,37 +3195,37 @@ self.c -inder +lient ().volume_type_a @@ -5115,29 +5115,29 @@ ist = self.c -inder +lient ().volume_ty @@ -5460,29 +5460,29 @@ self.c -inder +lient ().volume_ty @@ -5822,21 +5822,21 @@ f self.c -inder +lient ().volum
e4596497dc57870a8ea3433e97f32d61bb7eb4e5
Version 3.14.3dev
engineio/__init__.py
engineio/__init__.py
import sys from .client import Client from .middleware import WSGIApp, Middleware from .server import Server if sys.version_info >= (3, 5): # pragma: no cover from .asyncio_server import AsyncServer from .asyncio_client import AsyncClient from .async_drivers.asgi import ASGIApp try: from .async_drivers.tornado import get_tornado_handler except ImportError: get_tornado_handler = None else: # pragma: no cover AsyncServer = None AsyncClient = None get_tornado_handler = None ASGIApp = None __version__ = '3.14.2' __all__ = ['__version__', 'Server', 'WSGIApp', 'Middleware', 'Client'] if AsyncServer is not None: # pragma: no cover __all__ += ['AsyncServer', 'ASGIApp', 'get_tornado_handler', 'AsyncClient'],
Python
0
@@ -564,9 +564,12 @@ .14. -2 +3dev '%0A%0A_
89dac8b14610f08b12db0ab6e00b7432b527fd89
Remove trailing whitespace
python/balcaza/activity/local/text.py
python/balcaza/activity/local/text.py
from balcaza.t2types import * from balcaza.t2activity import BeanshellCode ByteArrayToString = BeanshellCode( '''if ((bytes == void) || (bytes == null)) { throw new RuntimeException("The 'bytes' parameter must be specified"); } if (encoding == void) { string = new String(bytes); } else { string = new String(bytes, encoding); } ''', inputs = dict( bytes = String, encoding = Optional[String] ), outputs = dict( string = String ), defaultInput = 'bytes', name = 'ByteArrayToString' )
Python
0.999999
@@ -492,13 +492,12 @@ oString' -%09 %0A%09)%0A
d5e17c094c722cee291d493fd05b46040c5d8284
Enable python machine learning logging
python/machine_weighting_predictor.py
python/machine_weighting_predictor.py
""" Entry point for Python service, to which data is POSTed to train the ABRAID Ensemble Chain, and from which a prediction for a new datapoint is requested. """ from chain import Chain from flask import Flask, request from sklearn.externals import joblib import numpy as np app = Flask(__name__) PREDICTORS = {} FEED_CLASSES = {} PICKLES_SUBFOLDER_PATH = 'pickles/' @app.route('/<int:disease_group_id>/train', methods=['POST']) def train(disease_group_id): """ Use data extracted from request JSON to create structure in a training phase. """ try: data = request.json['points'] except KeyError: return ('Invalid JSON', 400) predictor = Chain() if len(data) > 50: try: X = _convert_json_to_matrix(disease_group_id, data) y = np.array(_pluck('expertWeighting', data)) except KeyError: return ('Invalid JSON', 400) else: predictor.train(X, y) _save_predictor(disease_group_id, predictor) return ('Trained predictor saved', 200) else: _save_predictor(disease_group_id, predictor) return ('Insufficient training data - empty predictor saved', 200) @app.route('/<int:disease_group_id>/predict', methods=['POST']) def predict(disease_group_id): """ Return the prediction of the provided disease occurrence point. """ # Use the predictor in memory, otherwise load from backup pickle version if disease_group_id in PREDICTORS: predictor = PREDICTORS[disease_group_id] else: try: filename = _get_pickled_predictor_filename(disease_group_id) predictor = joblib.load(filename) except IOError as e: return ('Unable to load predictor for disease group - ' + e.strerror, 400) # Use the feed classes map in memory, otherwise load from backup pickle version if disease_group_id in FEED_CLASSES: feed_classes = FEED_CLASSES[disease_group_id] else: try: filename = _get_pickled_feed_classes_filename(disease_group_id) feed_classes = joblib.load(filename) except IOError as e: return ('Unable to load feeds for disease group - ' + e.strerror, 400) try: x = np.zeros(2 + len(feed_classes) + 1) x[0] = request.json['environmentalSuitability'] x[1] = request.json['distanceFromExtent'] feed = _get_feed_class(disease_group_id, request.json['feedId']) x[feed + 2] = 1 except KeyError: return ('Invalid JSON', 400) prediction = predictor.predict(x) if prediction is None: return ('No prediction', 200) else: return (str(prediction), 200) def _convert_json_to_matrix(disease_group_id, json): feeds = [_get_feed_class(disease_group_id, feed_id) for feed_id in _pluck('feedId', json)] n = 2 + len(FEED_CLASSES[disease_group_id]) + 1 X = np.zeros((len(json), n)) X[:, 0] = _pluck('environmentalSuitability', json) X[:, 1] = _pluck('distanceFromExtent', json) for i, f in enumerate(feeds): X[i, f + 2] = 1 return X def _get_feed_class(disease_group_id, feed_id): """ Map from feed id (which could be any integer and skew the data) to an incremental class number """ if disease_group_id not in FEED_CLASSES: FEED_CLASSES[disease_group_id] = {} if feed_id not in FEED_CLASSES[disease_group_id]: FEED_CLASSES[disease_group_id][feed_id] = len(FEED_CLASSES[disease_group_id]) return FEED_CLASSES[disease_group_id][feed_id] def _pluck(name, json): """ Extract the named feature from each item in json, as an array """ return [x[name] for x in json] def _save_predictor(disease_group_id, predictor): """ Save to dict, and back up a pickled version on disk """ PREDICTORS[disease_group_id] = predictor try: joblib.dump(predictor, _get_pickled_predictor_filename(disease_group_id)) joblib.dump(FEED_CLASSES, _get_pickled_feed_classes_filename(disease_group_id)) except IOError as e: print 'Unable to save pickle - ' + e.strerror def _get_pickled_predictor_filename(disease_group_id): return PICKLES_SUBFOLDER_PATH + str(disease_group_id) + '_predictor.pkl' def _get_pickled_feed_classes_filename(disease_group_id): return PICKLES_SUBFOLDER_PATH + str(disease_group_id) + '_feed_classes.pkl' if __name__ == '__main__': app.run(host='localhost', debug=True, use_debugger=True)
Python
0.000007
@@ -267,16 +267,75 @@ py as np +%0Aimport logging, sys%0Alogging.basicConfig(stream=sys.stderr) %0A%0Aapp =
b324031ee683005be0307e3b323c4709ce3a01eb
Disable those new requirements because pip requires gcc to install them
python_apps/airtime_analyzer/setup.py
python_apps/airtime_analyzer/setup.py
from setuptools import setup from subprocess import call import sys # Allows us to avoid installing the upstart init script when deploying airtime_analyzer # on Airtime Pro: if '--no-init-script' in sys.argv: data_files = [] sys.argv.remove('--no-init-script') # super hax else: data_files = [('/etc/init', ['install/upstart/airtime_analyzer.conf'])] print data_files setup(name='airtime_analyzer', version='0.1', description='Airtime Analyzer Worker and File Importer', url='http://github.com/sourcefabric/Airtime', author='Albert Santoni', author_email='albert.santoni@sourcefabric.org', license='MIT', packages=['airtime_analyzer'], scripts=['bin/airtime_analyzer'], install_requires=[ 'mutagen', 'pika', 'python-magic', 'nose', 'coverage', 'mock', 'python-daemon', 'requests', # These next 3 are required for requests to support SSL with SNI. This is extremely important. Learned this the hard way... 'ndg-httpsclient', 'pyasn1', 'pyopenssl' ], zip_safe=False, data_files=data_files) # Reload the initctl config so that "service start airtime_analyzer" works if data_files: print "Reloading initctl configuration" call(['initctl', 'reload-configuration']) print "Run \"sudo service airtime_analyzer restart\" now." # TODO: Should we start the analyzer here or not?
Python
0.000002
@@ -1005,37 +1005,8 @@ SNI. - This is extremely important. Lea @@ -1041,16 +1041,87 @@ +# What sucks is that GCC is required to pip install these. %0A # 'ndg-htt @@ -1141,16 +1141,17 @@ +# 'pyasn1' @@ -1162,16 +1162,17 @@ +# 'pyopens
2c1811fad85d6bacf8d3fcaf1299994bfc5efb78
Support serializer path instead of "self" keyword
drf_extra_fields/relations.py
drf_extra_fields/relations.py
from collections import OrderedDict from rest_framework.relations import PrimaryKeyRelatedField, SlugRelatedField class PresentableRelatedFieldMixin(object): def __init__(self, **kwargs): self.presentation_serializer = kwargs.pop("presentation_serializer", None) self.presentation_serializer_kwargs = kwargs.pop( "presentation_serializer_kwargs", dict() ) assert self.presentation_serializer is not None, ( self.__class__.__name__ + " must provide a `presentation_serializer` argument" ) super(PresentableRelatedFieldMixin, self).__init__(**kwargs) def use_pk_only_optimization(self): """ Instead of sending pk only object, return full object. The object already retrieved from db by drf. This doesn't cause an extra query. It even might save from making an extra query on serializer.to_representation method. Related source codes: - https://github.com/tomchristie/django-rest-framework/blob/master/rest_framework/relations.py#L41 - https://github.com/tomchristie/django-rest-framework/blob/master/rest_framework/relations.py#L132 """ return False def get_choices(self, cutoff=None): queryset = self.get_queryset() if queryset is None: # Ensure that field.choices returns something sensible # even when accessed with a read-only field. return {} if cutoff is not None: queryset = queryset[:cutoff] return OrderedDict([(item.pk, self.display_value(item)) for item in queryset]) def to_representation(self, data): return self.presentation_serializer( data, context=self.context, **self.presentation_serializer_kwargs ).data def bind(self, field_name, parent): if self.presentation_serializer == "self": self.presentation_serializer = parent.__class__ super(PresentableRelatedFieldMixin, self).bind(field_name, parent) class PresentablePrimaryKeyRelatedField( PresentableRelatedFieldMixin, PrimaryKeyRelatedField ): """ Override PrimaryKeyRelatedField to represent serializer data instead of a pk field of the object. """ pass class PresentableSlugRelatedField(PresentableRelatedFieldMixin, SlugRelatedField): """ Override SlugRelatedField to represent serializer data instead of a slug field of the object. """ pass
Python
0
@@ -30,16 +30,70 @@ edDict%0A%0A +from django.utils.module_loading import import_string%0A from res @@ -1727,23 +1727,30 @@ -return +if isinstance( self.pre @@ -1769,17 +1769,23 @@ rializer -( +, str): %0A @@ -1793,129 +1793,83 @@ -data, context=self.context, **self.presentation_serializer_kwargs%0A ).data%0A%0A def bind +self.presentation_serializer = import_string (self -, field_name, parent): +.presentation_serializer)%0A %0A @@ -1873,18 +1873,22 @@ -if +return self.pr @@ -1912,154 +1912,102 @@ izer - == %22self%22:%0A self.presentation_serializer = parent.__class__%0A super(PresentableRelatedFieldMixin, self).bind(field_name, parent) +(%0A data, context=self.context, **self.presentation_serializer_kwargs%0A ).data %0A%0A%0Ac
461b7c5bf5541fc3a56039d6756262d6b99e8428
Add null count.
problem/column_explorer/column_explorer.py
problem/column_explorer/column_explorer.py
#! /usr/bin/env python3 # Copyright 2019 John Hanley. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # The software is provided "AS IS", without warranty of any kind, express or # implied, including but not limited to the warranties of merchantability, # fitness for a particular purpose and noninfringement. In no event shall # the authors or copyright holders be liable for any claim, damages or # other liability, whether in an action of contract, tort or otherwise, # arising from, out of or in connection with the software or the use or # other dealings in the software. """Systematically finds aggregate stats for a table's columns.""" import sqlalchemy as sa import uszipcode def get_zipcode_session(): return uszipcode.SearchEngine().ses def get_zipcode_cs(): """Returns a JDBC connect string for the zipcode database.""" # typical value: sqlite:////Users/foo/.uszipcode/simple_db.sqlite return get_zipcode_session().connection().engine.url class ColumnExplorer: def __init__(self, cs_or_engine): self.engine = sa.create_engine(cs_or_engine) def report(self, table_name): for column in self._get_col_names(table_name): print('\n## ' + column) for agg in ['min', 'avg', 'max', 'count(distinct ']: if '(' not in agg: agg += '(' select = f'select {agg}{column}) from {table_name}' stat, = self.engine.execute(select).fetchone() print('-', agg.replace('(', ' '), stat) cnt, = self.engine.execute(f'select count(*) from {table_name}').fetchone() print(f'\n{cnt} rows in {table_name}') def _get_col_names(self, table_name): meta = sa.MetaData(bind=self.engine) tbl = sa.Table(table_name, meta, autoload=True) return map(str, tbl.columns) if __name__ == '__main__': ColumnExplorer(get_zipcode_cs()).report('simple_zipcode')
Python
0.000001
@@ -1636,34 +1636,307 @@ self, table_name -): +, round_digits=3):%0A%0A meta = sa.MetaData(bind=self.engine)%0A tbl = sa.Table(table_name, meta, autoload=True)%0A%0A cnt, = self.engine.execute(f'select count(*) from %7Btable_name%7D').fetchone()%0A print(f'# %7Btable_name%7D%5Cn%7Bcnt%7D rows, %7Blen(tbl.c)%7D columns%5Cn')%0A %0A for col @@ -1963,25 +1963,18 @@ _names(t -a bl -e_name ):%0A @@ -2065,16 +2065,25 @@ istinct +', 'nulls '%5D:%0A @@ -2212,16 +2212,150 @@ _name%7D'%0A + if agg.startswith('nulls'):%0A select = f'select count(*) from %7Btable_name%7D where %7Bcolumn%7D is null'%0A%0A @@ -2401,32 +2401,33 @@ ect).fetchone()%0A +%0A @@ -2430,132 +2430,298 @@ -print('-', agg.replace('(', ' '), stat)%0A%0A cnt, = self.engine.execute(f'select count(*) from %7Btable_name%7D').fetchone() +if agg.startswith('avg'):%0A stat = round(stat, round_digits)%0A if agg.startswith('nulls'):%0A pct = round(100 * stat / cnt, round_digits)%0A stat = f'%7Bstat%7D (%7Bpct%7D %25)'%0A print('-', agg.replace('(', ' '), stat)%0A %0A @@ -2799,21 +2799,16 @@ f, table -_name ):%0A @@ -2814,137 +2814,74 @@ -meta = sa.MetaData(bind=self.engine)%0A tbl = sa.Table(table_name, meta, autoload=True)%0A return map(str, tbl.columns) +for col in table.columns:%0A yield str(col).split('.')%5B1%5D %0A%0A%0Ai
88dc575f366a95275c414569c613a9df34c35968
Update account_analytic_account.py
program_budget/account_analytic_account.py
program_budget/account_analytic_account.py
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2014 Savoir-faire Linux (<www.savoirfairelinux.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import orm from openerp.tools.translate import _ class account_analytic_account(orm.Model): _inherit = 'account.analytic.account' def _get_propagatable_fields(self): return { 'name', 'code', } def _get_related_result(self, cr, uid, account_id, context=None): """ Return the parent result analytic id.""" if not account_id: return False else: result_pool = self.pool.get('program.result') parent_result_id = result_pool.search(cr, uid, [ ('account_analytic_id', '=', account_id) ], context=context) if not parent_result_id: return False return result_pool.browse( cr, uid, parent_result_id[0], context=context) def create(self, cr, uid, vals, context=None): """ Create an analytic account Create the related result if this account is under the root program node""" context = context or {} account_id = super(account_analytic_account, self).create( cr, uid, vals, context=context ) if context.get('result_written', False): return account_id parent_id = vals.get('parent_id', False) # If there is no parent ID it cannot be under the root # program node. if not parent_id: return account_id # If the parent analytic account is related has an associated # result, we assume we are under the root program node. parent_result = self._get_related_result(cr, uid, parent_id, context=context) # If there are NO parent results we don't need to create the related # result if not parent_result: return account_id result_pool = self.pool.get('program.result') if parent_result: parent_result_id = parent_result.id else: parent_result_id = False propagated_fields = { i: j for i, j in vals.items() if i in self._get_propagatable_fields() } result_pool.create( cr, uid, dict(propagated_fields.items() + { 'name': vals.get('name', False), 'code': vals.get('code', False), 'account_analytic_id': account_id, 'parent_id': parent_result_id, }.items()), context=dict(context, account_written=True) ) return account_id def write(self, cr, uid, ids, values, context=None): result_pool = self.pool.get('program.result') if context is None: context = {} if type(ids) not in (list, ): ids = [ids] res = super(account_analytic_account, self).write( cr, uid, ids, values, context=context ) if context.get('result_written', False): return res for account in self.browse(cr, uid, ids, context=context): propagated_fields = { i: j for i, j in values.items() if i in self._get_propagatable_fields() } if values.get('parent_id'): propagated_fields['parent_id'] = False related_result = self._get_related_result( cr, uid, values['parent_id'], context=context ) if related_result: propagated_fields['parent_id'] = related_result.id if propagated_fields: related_result = result_pool.search( cr, uid, [('account_analytic_id', '=', account.id), ], context=context ) result_pool.write( cr, uid, related_result, propagated_fields, context=dict(context, account_written=True) ) return res def unlink(self, cr, uid, ids, context=None): result_pool = self.pool.get('program.result') if type(ids) is not list: ids = [ids] if result_pool.search(cr, uid, [('account_analytic_id', 'in', ids)], context=context): raise orm.except_orm( _('Error'), _('Account still associated with a Program Result')) return super(account_analytic_account, self).unlink( cr, uid, ids, context=context )
Python
0.000011
@@ -2020,16 +2020,22 @@ ontext = + %7B%7D if context @@ -2039,13 +2039,28 @@ ext -or %7B%7D +is None else context %0A%0A
f65c2466cf58c4024584423da3d927ad9522eec9
Fix __repr__ if no person linked
indico/modules/events/contributions/models/persons.py
indico/modules/events/contributions/models/persons.py
# This file is part of Indico. # Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN). # # Indico is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # Indico is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Indico; if not, see <http://www.gnu.org/licenses/>. from __future__ import unicode_literals from indico.core.db.sqlalchemy import db, PyIntEnum from indico.util.string import return_ascii, format_repr from indico.modules.events.models.persons import PersonLinkBase from indico.util.struct.enum import IndicoEnum class AuthorType(int, IndicoEnum): none = 0 primary = 1 secondary = 2 class ContributionPersonLink(PersonLinkBase): """Association between EventPerson and Contribution.""" __tablename__ = 'contribution_person_links' __auto_table_args = {'schema': 'events'} person_link_backref_name = 'contribution_links' person_link_unique_columns = ('contribution_id',) contribution_id = db.Column( db.Integer, db.ForeignKey('events.contributions.id'), primary_key=True, index=True ) is_speaker = db.Column( db.Boolean, nullable=False, default=False ) author_type = db.Column( PyIntEnum(AuthorType), nullable=False, default=AuthorType.none ) # relationship backrefs: # - contribution (Contribution.person_links) @return_ascii def __repr__(self): return format_repr(self, 'contribution_id', 'person_id', is_speaker=False, author_type=AuthorType.none, _text=self.person.full_name) class SubContributionPersonLink(PersonLinkBase): """Association between EventPerson and SubContribution.""" __tablename__ = 'subcontribution_person_links' __auto_table_args = {'schema': 'events'} person_link_backref_name = 'subcontribution_links' person_link_unique_columns = ('subcontribution_id',) # subcontribution persons are always speakers and never authors # we provide these attributes to make subcontribution links # compatible with contribution links is_speaker = True author_type = AuthorType.none subcontribution_id = db.Column( db.Integer, db.ForeignKey('events.subcontributions.id'), primary_key=True, index=True ) # relationship backrefs: # - subcontribution (SubContribution.person_links) @return_ascii def __repr__(self): return format_repr(self, 'subcontribution_id', 'person_id', _text=self.person.full_name)
Python
0.00004
@@ -2039,16 +2039,41 @@ ull_name + if self.person else None )%0A%0A%0Aclas @@ -2974,16 +2974,43 @@ son_id', +%0A _text=s @@ -3029,10 +3029,35 @@ ull_name + if self.person else None )%0A
2943d0f68340bc16ab29a42072751b263cf76192
Fix deleting images/pages with a legacy mapping
indico/modules/events/layout/models/legacy_mapping.py
indico/modules/events/layout/models/legacy_mapping.py
# This file is part of Indico. # Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN). # # Indico is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # Indico is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Indico; if not, see <http://www.gnu.org/licenses/>. from __future__ import unicode_literals from indico.core.db import db from indico.util.string import return_ascii class LegacyImageMapping(db.Model): """Legacy image id mapping Legacy images had event-unique numeric ids. Using this mapping we can resolve old ones to their new id. """ __tablename__ = 'legacy_image_id_map' __table_args__ = {'schema': 'events'} event_id = db.Column( db.Integer, db.ForeignKey('events.events.id'), primary_key=True, index=True, autoincrement=False ) legacy_image_id = db.Column( db.Integer, primary_key=True, index=True, autoincrement=False ) image_id = db.Column( db.Integer, db.ForeignKey('events.image_files.id'), nullable=False ) image = db.relationship( 'ImageFile', lazy=False, backref=db.backref('legacy_mapping', uselist=False, lazy=True) ) @return_ascii def __repr__(self): return '<LegacyImageMapping({}, {})>'.format(self.legacy_image_id, self.image_id) class LegacyPageMapping(db.Model): """Legacy page id mapping Legacy pages had event-unique numeric ids. Using this mapping we can resolve old ones to their new id. """ __tablename__ = 'legacy_page_id_map' __table_args__ = {'schema': 'events'} event_id = db.Column( db.Integer, db.ForeignKey('events.events.id'), primary_key=True, index=True, autoincrement=False ) legacy_page_id = db.Column( db.Integer, primary_key=True, index=True, autoincrement=False ) page_id = db.Column( db.Integer, db.ForeignKey('events.pages.id'), nullable=False ) page = db.relationship( 'EventPage', lazy=False, backref=db.backref('legacy_mapping', uselist=False, lazy=True) ) @return_ascii def __repr__(self): return '<LegacyPageMapping({}, {})>'.format(self.legacy_page_id, self.image_id)
Python
0.000006
@@ -1628,32 +1628,45 @@ kref=db.backref( +%0A 'legacy_mapping' @@ -1658,32 +1658,86 @@ legacy_mapping', +%0A cascade='all, delete-orphan',%0A uselist=False, @@ -1727,32 +1727,44 @@ uselist=False, +%0A lazy=True)%0A @@ -1749,32 +1749,41 @@ lazy=True +%0A )%0A )%0A%0A @re @@ -2693,16 +2693,29 @@ backref( +%0A 'legacy_ @@ -2723,16 +2723,70 @@ apping', +%0A cascade='all, delete-orphan',%0A uselist @@ -2792,16 +2792,28 @@ t=False, +%0A lazy=Tr @@ -2814,16 +2814,25 @@ azy=True +%0A )%0A )%0A
86d088835a88c00af69090b6b7f1bae42ff5c09a
remove monetdb typo
lib/sqlalchemy/databases/__init__.py
lib/sqlalchemy/databases/__init__.py
# __init__.py # Copyright (C) 2005, 2006, 2007, 2008 Michael Bayer mike_mp@zzzcomputing.com # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php __all__ = [ 'sqlite', 'postgres', 'mysql', 'oracle', 'mssql', 'firebird', 'sybase', 'access', 'maxdb', 'monetdb' ]
Python
0.999923
@@ -330,19 +330,8 @@ xdb' -, 'monetdb' %0A
4dc1f2b85c7728102a6cc4b149fb5200a0ffa736
Fix get_user_command to comply with new AirWatch logic TelekomLabs-DCO-1.1-Signed-off-by: Łukasz Biernot <lukasz.biernot@gmail.com> (github: ElmoVanKielmo)
src/tenants/management/commands/get_user.py
src/tenants/management/commands/get_user.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2014, Deutsche Telekom AG - Laboratories (T-Labs) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from django.core.management.base import BaseCommand from optparse import make_option from provisioning import okta from provisioning.models import Okta from litedesk.lib.airwatch import user from litedesk.lib.airwatch import group from provisioning.models import AirWatch import json class Command(BaseCommand): help = 'Get information about a user.' option_list = BaseCommand.option_list + ( make_option('--username', default="bruce.wayne", help='Username to find. Default="bruce.wayne"'), ) def handle(self, *args, **options): result = {'okta': {}, 'airwatch': {}} okta_service = Okta.objects.all().get() client = okta.Client(okta_service.domain, okta_service.api_token) okta_user = client.get(okta.User, options["username"]) if okta_user: # self.stdout.write("got the Okta user with the id") result['okta']['id'] = okta_user.id result['okta']['status'] = okta_user.status result['okta']['applications'] = [] okta_apps = client.user_applications(okta_user) for app in okta_apps: result['okta']['applications'].append(app['name']) airwatch_service = AirWatch.objects.all().get() airwatch_client = airwatch_service.get_client() airwatch_user = user.User.get_remote(airwatch_client, options["username"]) if airwatch_user != None: result['airwatch']['id'] = airwatch_user.id result['airwatch']['Status'] = airwatch_user.Status result['airwatch']['applications'] = [] aw_assets = airwatch_service.airwatch.tenantserviceasset_set.all() for asset in aw_assets: group_id = asset.metadata['group_id'] if options["username"] in group.UserGroup.usernames_by_group_id(airwatch_client, group_id): result['airwatch']['applications'].append(asset.asset.name) self.stdout.write(json.dumps(result))
Python
0
@@ -2413,16 +2413,21 @@ +smart group_id @@ -2445,16 +2445,21 @@ tadata%5B' +smart group_id @@ -2499,16 +2499,63 @@ ername%22%5D + in (%0A user%5B'Name'%5D for user in grou @@ -2560,40 +2560,55 @@ oup. -User +Smart Group. -usernames_by_group_id( +get_remote(%0A airw @@ -2620,24 +2620,29 @@ client, +smart group_id ):%0A @@ -2633,16 +2633,69 @@ group_id +%0A ).UserAdditions%0A ):%0A
71b2fcdd85187520a38ead5736d1b64b1e69afa4
Fix hedu enrolled, entrant and graduate columns
app/models/hedu.py
app/models/hedu.py
from sqlalchemy import Column, Integer, String, func from app import db class Hedu(db.Model): __tablename__ = 'hedu' year = Column(Integer, primary_key=True) region = Column(String(1), primary_key=True) mesoregion = Column(String(4), primary_key=True) microregion = Column(String(5), primary_key=True) state = Column(String(2), primary_key=True) municipality = Column(String(7), primary_key=True) university = Column(String(5), primary_key=True) university_campus = Column(String(7), primary_key=True) funding_type = Column(String(1), primary_key=True) school_type = Column(String(1), primary_key=True) hedu_course_field = Column(String(2), primary_key=True) hedu_course = Column(String(6), primary_key=True) enrolled = Column(String(12), primary_key=True) graduates = Column(String(1), primary_key=True) entrants = Column(String(1), primary_key=True) academic_degree = Column(String(2), primary_key=True) distance_learning = Column(String(1), primary_key=True) shift = Column(String(2), primary_key=True) gender = Column(String(2), primary_key=True) age = Column(Integer) ethnicity = Column(String(2), primary_key=True) state_of_birth = Column(String(2), primary_key=True) municipality_of_birth = Column(String(7), primary_key=True) admission_year = Column(String(4)) admission_month = Column(String(2)) @classmethod def dimensions(cls): return [ 'year', 'region', 'mesoregion', 'microregion', 'state', 'municipality', 'university', 'university_campus', 'funding_type', 'school_type', 'hedu_course_field', 'hedu_course', 'enrolled', 'graduates', 'entrants', 'academic_degree', 'distance_learning', 'shift', 'gender', 'age', 'ethnicity', 'state_of_birth', 'municipality_of_birth', 'admission_year', 'admission_month' ] @classmethod def aggregate(cls, value): return { 'enrolled': func.count(), 'entrants': func.sum(cls.entrants), 'graduates': func.sum(cls.graduates), 'average_age': func.avg(cls.age) }[value] @classmethod def values(cls): return ['enrolled', 'entrants', 'graduates', 'average_age']
Python
0.000001
@@ -966,17 +966,17 @@ graduate -s + @@ -1029,17 +1029,17 @@ entrant -s + @@ -2090,17 +2090,16 @@ graduate -s ',%0A @@ -2113,17 +2113,16 @@ 'entrant -s ',%0A @@ -2491,16 +2491,17 @@ enrolled +s ': func. @@ -2553,17 +2553,16 @@ .entrant -s ),%0A @@ -2602,17 +2602,16 @@ graduate -s ),%0A @@ -2731,16 +2731,17 @@ enrolled +s ', 'entr
4956868cd605faa085ae2fc4ab44076760a4fd80
Add some more auto_meta tests
metafunctions/tests/test_pipe.py
metafunctions/tests/test_pipe.py
import operator import unittest from metafunctions.tests.util import BaseTestCase from metafunctions.decorators import pipe_node class TestUnit(BaseTestCase): def test_basic_usage(self): self.assertEqual(a('_'), '_a') def test_wraps(self): @pipe_node def d(): 'a docstring for d' self.assertEqual(d.__doc__, 'a docstring for d') def test_auto_meta(self): '''If possible, we upgrade functions to meta functions on the fly.''' def y(x): return x + 'y' ay = a | y ya = y | a ayyyy = a | y | y | y | y self.assertEqual(ay('_'), '_ay') self.assertEqual(ya('_'), '_ya') self.assertEqual(ayyyy('_'), '_ayyyy') def test_basic_composition(self): composite = a | b | c | d self.assertEqual(composite('_'), '_abcd') def test_advanced_str(self): cmp = a | b + c + d | e self.assertEqual(str(cmp), '(a | ((b + c) + d) | e)') self.assertEqual(cmp('_'), '_ab_ac_ade') def test_non_callable_composition(self): '''Anything that is not callable in a composition is applied at call time (to the results of the composed functions). ''' @pipe_node def g(x): return x cmps_to_expected = ( (g + 1, 11), (g - 1, 9), (g * 2, 20), (g / 2, 5), ) for cmp, expected in cmps_to_expected: with self.subTest(): self.assertEqual(cmp(10), expected) @unittest.skip("Making this work doesn't make sense anymore") def test_or(self): '''Assert that we can still use or''' @pipe_node def return_a_set(x): return set(*x) #Just wrap anything that isn't callable in a lambda, to put it off until call time outer_set = set((1, 2, 3)) cmp = return_a_set | outer_set reverse_cmp = outer_set | return_a_set self.assertSetEqual(cmp('abc'), set('abc')) self.assertSetEqual(reverse_cmp('abc'), set('abc')) def test_single_calls(self): '''every function is only called once''' call_count = 0 @pipe_node def y(x): nonlocal call_count call_count += 1 return x + 'y' cmp = y | y * 2 | y + y | y self.assertEqual(cmp('_'), '_yy_yyy_yy_yyyy') self.assertEqual(call_count, 5) def test_repr(self): cmp = a | b | c | (lambda x: None) self.assertEqual(str(cmp), '(a | b | c | <lambda>)') ### Simple Sample Functions ### @pipe_node def a(x): return x + 'a' @pipe_node def b(x): return x + 'b' @pipe_node def c(x): return x + 'c' @pipe_node def d(x): return x + 'd' @pipe_node def e(x): return x + 'e'
Python
0
@@ -139,12 +139,19 @@ Test -Unit +Integration (Bas @@ -613,16 +613,149 @@ y %7C y%0A%0A + # Can't do this%0A #ayy = a %7C y + y%0A%0A # But this should work%0A yayy = y %7C a + y%0A yy_ya = y %7C y + a%0A%0A @@ -874,16 +874,109 @@ _ayyyy') +%0A self.assertEqual(yayy('_'), '_ya_yy')%0A self.assertEqual(yy_ya('_'), '_yy_ya') %0A%0A de
a4f475245c3af8470337fe0c25b136e58189a607
Update griddy to use CoordinatorEntity (#39392)
homeassistant/components/griddy/sensor.py
homeassistant/components/griddy/sensor.py
"""Support for August sensors.""" import logging from homeassistant.const import ENERGY_KILO_WATT_HOUR from homeassistant.helpers.entity import Entity from .const import CONF_LOADZONE, DOMAIN _LOGGER = logging.getLogger(__name__) async def async_setup_entry(hass, config_entry, async_add_entities): """Set up the August sensors.""" coordinator = hass.data[DOMAIN][config_entry.entry_id] settlement_point = config_entry.data[CONF_LOADZONE] async_add_entities([GriddyPriceSensor(settlement_point, coordinator)], True) class GriddyPriceSensor(Entity): """Representation of an August sensor.""" def __init__(self, settlement_point, coordinator): """Initialize the sensor.""" self._coordinator = coordinator self._settlement_point = settlement_point @property def unit_of_measurement(self): """Return the unit of measurement.""" return f"¢/{ENERGY_KILO_WATT_HOUR}" @property def name(self): """Device Name.""" return f"{self._settlement_point} Price Now" @property def icon(self): """Device Ice.""" return "mdi:currency-usd" @property def unique_id(self): """Device Uniqueid.""" return f"{self._settlement_point}_price_now" @property def available(self): """Return True if entity is available.""" return self._coordinator.last_update_success @property def state(self): """Get the current price.""" return round(float(self._coordinator.data.now.price_cents_kwh), 4) @property def should_poll(self): """Return False, updates are controlled via coordinator.""" return False async def async_update(self): """Update the entity. Only used by the generic entity update service. """ await self._coordinator.async_request_refresh() async def async_added_to_hass(self): """Subscribe to updates.""" self.async_on_remove( self._coordinator.async_add_listener(self.async_write_ha_state) )
Python
0
@@ -128,22 +128,45 @@ ers. -entity import +update_coordinator import Coordinator Enti @@ -581,16 +581,27 @@ eSensor( +Coordinator Entity): @@ -753,27 +753,24 @@ s -elf._coordinator = +uper().__init__( coor @@ -776,16 +776,17 @@ rdinator +) %0A @@ -1307,151 +1307,8 @@ w%22%0A%0A - @property%0A def available(self):%0A %22%22%22Return True if entity is available.%22%22%22%0A return self._coordinator.last_update_success%0A%0A @@ -1403,25 +1403,24 @@ (float(self. -_ coordinator. @@ -1453,519 +1453,4 @@ 4)%0A -%0A @property%0A def should_poll(self):%0A %22%22%22Return False, updates are controlled via coordinator.%22%22%22%0A return False%0A%0A async def async_update(self):%0A %22%22%22Update the entity.%0A%0A Only used by the generic entity update service.%0A %22%22%22%0A await self._coordinator.async_request_refresh()%0A%0A async def async_added_to_hass(self):%0A %22%22%22Subscribe to updates.%22%22%22%0A self.async_on_remove(%0A self._coordinator.async_add_listener(self.async_write_ha_state)%0A )%0A
f68764a05333d3c660c9f816f8926a692911fece
Use ColorMode enum in mystrom (#70526)
homeassistant/components/mystrom/light.py
homeassistant/components/mystrom/light.py
"""Support for myStrom Wifi bulbs.""" from __future__ import annotations import logging from pymystrom.bulb import MyStromBulb from pymystrom.exceptions import MyStromConnectionError import voluptuous as vol from homeassistant.components.light import ( ATTR_BRIGHTNESS, ATTR_EFFECT, ATTR_HS_COLOR, COLOR_MODE_HS, PLATFORM_SCHEMA, SUPPORT_EFFECT, SUPPORT_FLASH, LightEntity, ) from homeassistant.const import CONF_HOST, CONF_MAC, CONF_NAME from homeassistant.core import HomeAssistant from homeassistant.exceptions import PlatformNotReady import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType _LOGGER = logging.getLogger(__name__) DEFAULT_NAME = "myStrom bulb" EFFECT_RAINBOW = "rainbow" EFFECT_SUNRISE = "sunrise" MYSTROM_EFFECT_LIST = [EFFECT_RAINBOW, EFFECT_SUNRISE] PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_HOST): cv.string, vol.Required(CONF_MAC): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, } ) async def async_setup_platform( hass: HomeAssistant, config: ConfigType, async_add_entities: AddEntitiesCallback, discovery_info: DiscoveryInfoType | None = None, ) -> None: """Set up the myStrom light integration.""" host = config.get(CONF_HOST) mac = config.get(CONF_MAC) name = config.get(CONF_NAME) bulb = MyStromBulb(host, mac) try: await bulb.get_state() if bulb.bulb_type not in ["rgblamp", "strip"]: _LOGGER.error( "Device %s (%s) is not a myStrom bulb nor myStrom LED Strip", host, mac ) return except MyStromConnectionError as err: _LOGGER.warning("No route to myStrom bulb: %s", host) raise PlatformNotReady() from err async_add_entities([MyStromLight(bulb, name, mac)], True) class MyStromLight(LightEntity): """Representation of the myStrom WiFi bulb.""" _attr_color_mode = COLOR_MODE_HS _attr_supported_color_modes = {COLOR_MODE_HS} _attr_supported_features = SUPPORT_EFFECT | SUPPORT_FLASH def __init__(self, bulb, name, mac): """Initialize the light.""" self._bulb = bulb self._name = name self._state = None self._available = False self._brightness = 0 self._color_h = 0 self._color_s = 0 self._mac = mac @property def name(self): """Return the display name of this light.""" return self._name @property def unique_id(self): """Return a unique ID.""" return self._mac @property def brightness(self): """Return the brightness of the light.""" return self._brightness @property def hs_color(self): """Return the color of the light.""" return self._color_h, self._color_s @property def available(self) -> bool: """Return True if entity is available.""" return self._available @property def effect_list(self): """Return the list of supported effects.""" return MYSTROM_EFFECT_LIST @property def is_on(self): """Return true if light is on.""" return self._state async def async_turn_on(self, **kwargs): """Turn on the light.""" brightness = kwargs.get(ATTR_BRIGHTNESS, 255) effect = kwargs.get(ATTR_EFFECT) if ATTR_HS_COLOR in kwargs: color_h, color_s = kwargs[ATTR_HS_COLOR] elif ATTR_BRIGHTNESS in kwargs: # Brightness update, keep color color_h, color_s = self._color_h, self._color_s else: color_h, color_s = 0, 0 # Back to white try: if not self.is_on: await self._bulb.set_on() if brightness is not None: await self._bulb.set_color_hsv( int(color_h), int(color_s), round(brightness * 100 / 255) ) if effect == EFFECT_SUNRISE: await self._bulb.set_sunrise(30) if effect == EFFECT_RAINBOW: await self._bulb.set_rainbow(30) except MyStromConnectionError: _LOGGER.warning("No route to myStrom bulb") async def async_turn_off(self, **kwargs): """Turn off the bulb.""" try: await self._bulb.set_off() except MyStromConnectionError: _LOGGER.warning("The myStrom bulb not online") async def async_update(self): """Fetch new state data for this light.""" try: await self._bulb.get_state() self._state = self._bulb.state colors = self._bulb.color try: color_h, color_s, color_v = colors.split(";") except ValueError: color_s, color_v = colors.split(";") color_h = 0 self._color_h = int(color_h) self._color_s = int(color_s) self._brightness = int(color_v) * 255 / 100 self._available = True except MyStromConnectionError: _LOGGER.warning("No route to myStrom bulb") self._available = False
Python
0
@@ -310,27 +310,8 @@ OR,%0A - COLOR_MODE_HS,%0A @@ -366,16 +366,31 @@ _FLASH,%0A + ColorMode,%0A Ligh @@ -2082,26 +2082,25 @@ mode = C -OLOR_MODE_ +olorMode. HS%0A _ @@ -2134,18 +2134,17 @@ = %7BC -OLOR_MODE_ +olorMode. HS%7D%0A
8021b027ae4a617712741d1e9a0668817bad9193
Fix import
experiments/tests/test_admin.py
experiments/tests/test_admin.py
from __future__ import absolute_import import json from django.contrib.auth.models import User, Permission from django.core.urlresolvers import reverse from django.test import TestCase from experiments.models import Experiment, CONTROL_STATE, ENABLED_STATE from experiments.utils import participant class AdminTestCase(TestCase): def test_set_state(self): experiment = Experiment.objects.create(name='test_experiment', state=CONTROL_STATE) User.objects.create_superuser(username='user', email='deleted@mixcloud.com', password='pass') self.client.login(username='user', password='pass') self.assertEqual(Experiment.objects.get(pk=experiment.pk).state, CONTROL_STATE) response = self.client.post(reverse('admin:experiment_admin_set_state'), { 'experiment': experiment.name, 'state': ENABLED_STATE, }) self.assertEqual(response.status_code, 200) self.assertEqual(Experiment.objects.get(pk=experiment.pk).state, ENABLED_STATE) self.assertIsNone(Experiment.objects.get(pk=experiment.pk).end_date) response = self.client.post(reverse('admin:experiment_admin_set_state'), { 'experiment': experiment.name, 'state': CONTROL_STATE, }) self.assertEqual(response.status_code, 200) self.assertEqual(Experiment.objects.get(pk=experiment.pk).state, CONTROL_STATE) self.assertIsNotNone(Experiment.objects.get(pk=experiment.pk).end_date) def test_set_alternative(self): experiment = Experiment.objects.create(name='test_experiment', state=ENABLED_STATE) user = User.objects.create_superuser(username='user', email='deleted@mixcloud.com', password='pass') self.client.login(username='user', password='pass') participant(user=user).enroll('test_experiment', alternatives=['other1', 'other2']) for alternative in ('other2', 'control', 'other1'): response = self.client.post(reverse('admin:experiment_admin_set_alternative'), { 'experiment': experiment.name, 'alternative': alternative, }) self.assertDictEqual(json.loads(response.content.decode('utf-8')), { 'success': True, 'alternative': alternative, }) self.assertEqual(participant(user=user).get_alternative('test_experiment'), alternative) def test_permissions(self): # redirect to login if not logged in self.assertEqual(302, self.client.post(reverse('admin:experiment_admin_set_state'), {}).status_code) self.assertEqual(302, self.client.post(reverse('admin:experiment_admin_set_alternative'), {}).status_code) response = self.client.post(reverse('admin:experiment_admin_set_alternative'), {}) self.assertEqual(response.status_code, 302) # non staff user user = User.objects.create_user(username='user', password='pass') user.save() self.client.login(username='user', password='pass') self.assertEqual(302, self.client.post(reverse('admin:experiment_admin_set_state'), {}).status_code) self.assertEqual(302, self.client.post(reverse('admin:experiment_admin_set_alternative'), {}).status_code) user.is_staff = True user.save() self.assertEqual(403, self.client.post(reverse('admin:experiment_admin_set_state'), {}).status_code) self.assertEqual(403, self.client.post(reverse('admin:experiment_admin_set_alternative'), {}).status_code) permission = Permission.objects.get(codename='change_experiment') user.user_permissions.add(permission) self.assertEqual(400, self.client.post(reverse('admin:experiment_admin_set_state'), {}).status_code) self.assertEqual(400, self.client.post(reverse('admin:experiment_admin_set_alternative'), {}).status_code)
Python
0.000002
@@ -117,25 +117,27 @@ ngo. -core.urlresol +urls import re vers +e imp
86679c5b1a31d9125ebc3f55785cb9219e41ed27
Add mechanism to validate backends against buckets
edgedb/lang/common/buckets.py
edgedb/lang/common/buckets.py
## # Copyright (c) 2012, 2013 Sprymix Inc. # All rights reserved. # # See LICENSE for details. ## import weakref from metamagic.utils import abc, config class BucketMeta(abc.AbstractMeta, config.ConfigurableMeta): def __new__(mcls, name, bases, dct): cls = super().__new__(mcls, name, bases, dct) if len([base for base in bases if isinstance(base, mcls)]) > 1: raise TypeError('Bucket classes can have only one base Bucket class') cls._instances = weakref.WeakSet() return cls class Bucket(metaclass=BucketMeta): def __new__(cls, *args, **kwargs): if super().__new__ is object.__new__: instance = super().__new__(cls) else: instance = super().__new__(cls, *args, **kwargs) cls._register_instance(instance) instance._cached_implementation = None return instance def __init__(self, *, parent=None): if parent is not None: cls = type(self) mro = cls.__mro__[:-2] # Skip 'object' and 'abstract.Bucket' if type(parent) not in mro: raise ValueError('parent bucket {!r} must be an instance of one of the ' 'ancestor classes {!r}'.format(parent, mro)) parent._register_child(self) self._parent = parent self._children = [] def _register_child(self, bucket): self._children.append(bucket) def _get_implementation(self): if self._cached_implementation is None: backends = type(self).get_backends() if not backends: return self._cached_implementation = type(self).get_implementation()(backends) return self._cached_implementation def _ensure_implementation(self): impl = self._get_implementation() if not impl: raise KeyError('non-initialized bucket: no backends/implementation set') return impl @classmethod def _register_instance(cls, instance): cls._instances.add(instance) @classmethod def set_backends(cls, *backends): impl = cls.get_implementation() for p in backends: if not isinstance(p, impl.compatible_backend_classes): raise TypeError('backend {!r} is not compatible with installed implementation ' '{!r}, must be an instance of {!r}'. format(p, impl, impl.compatible_backend_classes)) @classmethod def get_backends(cls): return getattr(cls, '_backends', None) @classmethod def set_implementation(cls, implementation): if not issubclass(implementation, Implementation): raise ValueError('a subclass of Implementation was expected') if hasattr(cls, '_implementation') and '_implementation' not in cls.__dict__: holder = None for sub in cls.__mro__[1:-1]: if '_implementation' in sub.__dict__: holder = sub break raise ValueError('implementation was already defined in one of ' 'the parent buckets: {!r}'.format(holder)) cls._implementation = implementation @classmethod def get_implementation(cls): return cls._implementation class ImplementationMeta(abc.AbstractMeta): pass class Implementation(metaclass=ImplementationMeta): compatible_backend_classes = None def __init__(self, backends): self._backends = backends class BackendMeta(abc.AbstractMeta, config.ConfigurableMeta): pass class Backend(metaclass=BackendMeta): pass
Python
0.000002
@@ -2105,32 +2105,101 @@ ls, *backends):%0A + # First validate backends against the current Implementation%0A impl = c @@ -2214,32 +2214,32 @@ mplementation()%0A - for p in @@ -2560,24 +2560,283 @@ _classes))%0A%0A + # Secondly, validate backends against each child bucket class and self%0A for child in cls._iter_children(include_self=True):%0A for backend in backends:%0A child.validate_backend(backend)%0A%0A cls._backends = backends%0A%0A @classme @@ -3665,16 +3665,578 @@ tation%0A%0A + @classmethod%0A def validate_backend(cls, backend):%0A %22%22%22Called recursively for all derived buckets of a bucket on which%0A %22set_backends%22 is called%22%22%22%0A%0A @classmethod%0A def _iter_children(cls, include_self=False):%0A seen = set()%0A%0A def children(cls):%0A if cls in seen:%0A return%0A seen.add(cls)%0A%0A for child in cls.__subclasses__():%0A yield child%0A yield from children(child)%0A%0A if include_self:%0A yield cls%0A yield from children(cls)%0A%0A %0Aclass I @@ -4513,24 +4513,24 @@ %0A pass%0A%0A%0A - class Backen @@ -4531,16 +4531,37 @@ Backend( +config.Configurable, metaclas
3075e63cd3b274f15a0fe75bbd17b80ed700cb51
fix plotting CPUs of only one cluster
libs/utils/analysis/cpus_analysis.py
libs/utils/analysis/cpus_analysis.py
# SPDX-License-Identifier: Apache-2.0 # # Copyright (C) 2015, ARM Limited and contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ CPUs Analysis Module """ import matplotlib.pyplot as plt import pylab as pl from trappy.utils import listify from analysis_module import AnalysisModule # Configure logging import logging class CpusAnalysis(AnalysisModule): """ Support for CPUs Signals Analysis :param trace: input Trace object :type trace: :mod:`libs.utils.Trace` """ def __init__(self, trace): super(CpusAnalysis, self).__init__(trace) ############################################################################### # Plotting Methods ############################################################################### def plotCPU(self, cpus=None): """ Plot CPU-related signals for both big and LITTLE clusters. :param cpus: list of CPUs to be plotted :type cpus: list(int) """ if not self._trace.hasEvents('sched_load_avg_cpu'): logging.warn('Events [sched_load_avg_cpu] not found, ' 'plot DISABLED!') return # Filter on specified cpus if cpus is None: cpus = sorted(self._platform['clusters']['little'] + self._platform['clusters']['big']) cpus = listify(cpus) # Plot: big CPUs bcpus = set(cpus) & set(self._platform['clusters']['big']) self._plotCPU(bcpus, "big") # Plot: LITTLE CPUs lcpus = set(cpus) & set(self._platform['clusters']['little']) self._plotCPU(lcpus, "LITTLE") ############################################################################### # Utility Methods ############################################################################### def _plotCPU(self, cpus, label=''): """ Internal method that generates plots for all input CPUs. :param cpus: list of CPUs to be plotted :type cpus: list(int) """ if label != '': label1 = '{} '.format(label) label2 = '_{}s'.format(label.lower()) # Plot required CPUs _, pltaxes = plt.subplots(len(cpus), 1, figsize=(16, 3*(len(cpus)))) plt.suptitle("{}CPUs Signals".format(label1), y=.99, fontsize=16, horizontalalignment='center') idx = 0 for cpu in cpus: # Reference axes to be used axes = pltaxes if len(cpus) > 1: axes = pltaxes[idx] # Add CPU utilization axes.set_title('{0:s}CPU [{1:d}]'.format(label1, cpu)) df = self._dfg_trace_event('sched_load_avg_cpu') df = df[df.cpu == cpu] if len(df): df[['util_avg']].plot(ax=axes, drawstyle='steps-post', alpha=0.4) # if self._trace.hasEvents('sched_boost_cpu'): # df = self._dfg_trace_event('sched_boost_cpu') # df = df[df.cpu == cpu] # if len(df): # df[['usage', 'boosted_usage']].plot( # ax=axes, # style=['m-', 'r-'], # drawstyle='steps-post'); # Add Capacities data if avilable if self._trace.hasEvents('cpu_capacity'): df = self._dfg_trace_event('cpu_capacity') df = df[df.cpu == cpu] if len(df): # data = df[['capacity', 'tip_capacity', 'max_capacity']] # data.plot(ax=axes, style=['m', 'y', 'r'], data = df[['capacity', 'tip_capacity']] data.plot(ax=axes, style=['m', '--y'], drawstyle='steps-post') # Add overutilized signal to the plot self._trace.analysis.status.plotOverutilized(axes) axes.set_ylim(0, 1100) axes.set_xlim(self._trace.x_min, self._trace.x_max) # Disable x-axis timestamp for top-most cpus if len(cpus) > 1 and idx < len(cpus)-1: axes.set_xticklabels([]) axes.set_xlabel('') axes.grid(True) idx += 1 # Save generated plots into datadir figname = '{}/{}cpus{}.png'.format(self._trace.plots_dir, self._trace.plots_prefix, label2) pl.savefig(figname, bbox_inches='tight') # vim :set tabstop=4 shiftwidth=4 expandtab
Python
0.000001
@@ -1953,32 +1953,54 @@ sters'%5D%5B'big'%5D)%0A + if bcpus:%0A self._pl @@ -2114,24 +2114,46 @@ %5B'little'%5D)%0A + if lcpus:%0A self
331f5b0a951e13f816e752609ac348df272e1b1e
Update conf_template.py
docs/conf_template.py
docs/conf_template.py
# Statement for enabling the development environment DEBUG = True # Define the application directory import os BASE_DIR = os.path.abspath(os.path.dirname(__file__)) # Define the database - we are working with # SQLite for this example SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(BASE_DIR, 'app.db') DATABASE_CONNECT_OPTIONS = {} # Application threads. A common general assumption is # using 2 per available processor cores - to handle # incoming requests using one and performing background # operations using the other. THREADS_PER_PAGE = 2 # Enable protection agains *Cross-site Request Forgery (CSRF)* CSRF_ENABLED = True # Use a secure, unique and absolutely secret key for # signing the data. CSRF_SESSION_KEY = "secret" # Secret key for signing cookies SECRET_KEY = "secret" ORCID_CLIENT_ID = "" ORCID_SECRET = "" ORCID_API_URL = "https://orcid.org/oauth/token" ORCID_REDIRECT_URL = "http://localhost:4200/login" GITHUB_CLIENT_ID = "" GITHUB_SECRET = "" GITHUB_API_URL = "https://github.com/login/oauth/access_token" GITHUB_USER_API_URL = "https://api.github.com/user" SHARE_API_URL = "https://share.osf.io/api/v2/search/creativeworks/_search" SLIDESHARE_API_URL = "https://www.slideshare.net/api/2/get_slideshows_by_user" SLIDESHARE_PARAMS = "?api_key={api_key}&ts={ts}&hash={hash}&username_for={username}" SLIDESHARE_API_KEY = "" SLIDESHARE_SECRET = "" OPENAIRE_PUBLICATION_API_URL = "http://api.openaire.eu/search/publications?author={author}" OPENAIRE_DATASET_API_URL = "http://api.openaire.eu/search/datasets?author={author}" SPARQL_QUERY_ENDPOINT = "http://localhost:3030/ro2share/sparql" SPARQL_UPLOAD_ENDPOINT = "http://localhost:3030/ro2share/update" BASE_URI = 'http://ro2share.org/' TMP_DIR = 'tmp/'
Python
0.000002
@@ -1729,14 +1729,15 @@ _DIR = ' +/ tmp/'%0A
67f9a47c3476c189543bffba369d791035e0f159
add per-link logs
empower/apps/survey/survey.py
empower/apps/survey/survey.py
#!/usr/bin/env python3 # # Copyright (c) 2016 Roberto Riggio # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Survey App.""" from empower.core.app import EmpowerApp from empower.core.app import DEFAULT_PERIOD from empower.core.resourcepool import BANDS from empower.datatypes.etheraddress import EtherAddress DEFAULT_ADDRESS = "ff:ff:ff:ff:ff:ff" class Survey(EmpowerApp): """Survey App. Command Line Parameters: tenant_id: tenant id addr: the address to be tracked (optional, default ff:ff:ff:ff:ff:ff) every: loop period in ms (optional, default 5000ms) Example: ./empower-runtime.py apps.survey.survey \ --tenant_id=52313ecb-9d00-4b7d-b873-b55d3d9ada26 """ def __init__(self, **kwargs): self.__addr = None EmpowerApp.__init__(self, **kwargs) self.wtpup(callback=self.wtp_up_callback) @property def addr(self): """Return addr.""" return self.__addr @addr.setter def addr(self, value): """Set addr.""" self.__addr = EtherAddress(value) def wtp_up_callback(self, wtp): """New WTP.""" for block in wtp.supports: self.summary(addr=self.addr, block=block, callback=self.summary_callback) def summary_callback(self, summary): """ New stats available. """ self.log.info("New summary from %s addr %s frames %u", summary.block, summary.addr, len(summary.frames)) filename = "survey_%s_%u_%s.csv" % (summary.block.addr, summary.block.channel, BANDS[summary.block.band]) for frame in summary.frames: line = "%u,%g,%s,%d,%u,%s,%s,%s,%s,%s\n" % \ (frame['tsft'], frame['rate'], frame['rtype'], frame['rssi'], frame['length'], frame['type'], frame['subtype'], frame['ra'], frame['ta'], frame['seq']) with open(filename, 'a') as file_d: file_d.write(line) def launch(tenant_id, addr=DEFAULT_ADDRESS, every=DEFAULT_PERIOD): """ Initialize the module. """ return Survey(tenant_id=tenant_id, addr=addr, every=every)
Python
0.000001
@@ -1329,16 +1329,40 @@ kwargs)%0A + self.links = %7B%7D%0A @@ -1399,24 +1399,24 @@ p_callback)%0A - %0A @proper @@ -1809,24 +1809,215 @@ _callback)%0A%0A + def to_dict(self):%0A %22%22%22 Return a JSON-serializable dictionary representing the Summary %22%22%22%0A%0A out = super().to_dict()%0A%0A out%5B'links'%5D = self.links%0A%0A return out%0A%0A def summ @@ -2219,16 +2219,40 @@ ames))%0A%0A + # per block log%0A @@ -2442,24 +2442,24 @@ ock.band%5D)%0A%0A - for @@ -2828,16 +2828,893 @@ (line)%0A%0A + # per link log%0A for frame in summary.frames:%0A%0A link = %22%25s_%25s_%25u_%25s%22 %25 (frame%5B'ta'%5D, summary.block.addr,%0A summary.block.channel,%0A BANDS%5Bsummary.block.band%5D)%0A%0A filename = %22link_%25s.csv%22 %25 link%0A%0A if link not in self.links:%0A self.links%5Blink%5D = %7B%7D%0A%0A if frame%5B'rssi'%5D not in self.links%5Blink%5D:%0A self.links%5Blink%5D%5Bframe%5B'rssi'%5D%5D = 0%0A%0A self.links%5Blink%5D%5Bframe%5B'rssi'%5D%5D += 1%0A%0A line = %22%25u,%25g,%25s,%25d,%25u,%25s,%25s,%25s,%25s,%25s%5Cn%22 %25 %5C%0A (frame%5B'tsft'%5D, frame%5B'rate'%5D, frame%5B'rtype'%5D, frame%5B'rssi'%5D,%0A frame%5B'length'%5D, frame%5B'type'%5D, frame%5B'subtype'%5D,%0A frame%5B'ra'%5D, frame%5B'ta'%5D, frame%5B'seq'%5D)%0A%0A with open(filename, 'a') as file_d:%0A file_d.write(line)%0A%0A %0Adef lau
16f9ec9f6d611075f3e7ec37fc53b82bdc460532
fix bug
misp_modules/modules/expansion/onyphe.py
misp_modules/modules/expansion/onyphe.py
# -*- coding: utf-8 -*- import json from pymisp import MISPEvent, MISPObject try: from onyphe import Onyphe except ImportError: print("pyonyphe module not installed.") misperrors = {'error': 'Error'} mispattributes = {'input': ['ip-src', 'ip-dst', 'hostname', 'domain'], 'output': ['hostname', 'domain', 'ip-src', 'ip-dst', 'url'], 'format': 'misp_standard'} # possible module-types: 'expansion', 'hover' or both moduleinfo = {'version': '2', 'author': 'Sebastien Larinier @sebdraven', 'description': 'Query on Onyphe', 'module-type': ['expansion', 'hover']} # config fields that your code expects from the site admin moduleconfig = ['apikey'] class OnypheClient: def __init__(self, api_key, attribute): self.onyphe_client = Onyphe(api_key=api_key) self.attribute = attribute self.misp_event = MISPEvent() self.misp_event.add_attribute(**attribute) def get_results(self): event = json.loads(self.misp_event.to_json()) results = {key: event[key] for key in ('Attribute', 'Object') if key in event} return results def get_query_onyphe(self): if self.attribute['type'] == 'ip-src' or self.attribute['type'] == 'ip-dst': self.__summary_ip() def __summary_ip(self): results = self.onyphe_client.summary_ip(self.attribute['value']) if 'results' in results: for r in results['results']: if 'domain' in r: domain = r['domain'] if type(domain) == list: for d in domain: self.__get_object_domain_ip(d, 'domain') elif type(domain) == str: self.__get_object_domain_ip(domain, 'domain') if 'hostname' in r: hostname = r['hostname'] if type(hostname) == list: for d in hostname: self.__get_object_domain_ip(d, 'domain') elif type(hostname) == str: self.__get_object_domain_ip(hostname, 'domain') if 'issuer' in r: issuer = r['issuer'] self.__get_object_certificate(r) def __get_object_certificate(self, r): object_certificate = MISPObject('x509') object_certificate.add_attribute('ip', self.attribute['value']) object_certificate.add_attribute('serial-number', r['serial']) object_certificate.add_attribute('x509-fingerprint-sha256', r['fingerprint']['sha256']) object_certificate.add_attribute('x509-fingerprint-sha1', r['fingerprint']['sha1']) object_certificate.add_attribute('x509-fingerprint-md5', r['fingerprint']['md5']) signature = r['signature']['algorithm'] value = '' if 'sha256' in signature and 'RSA' in signature: value = 'SHA256_WITH_RSA_ENCRYPTION' elif 'sha1' in signature and 'RSA' in signature: value = 'SHA1_WITH_RSA_ENCRYPTION' if value: object_certificate.add_attribute('signature_algorithm', value) object_certificate.add_attribute('pubkey-info-algorithm',r['publickey']['algorithm']) if 'exponent' in r['publickey']: object_certificate.add_attribute('pubkey-info-exponent',r['publickey']['exponent']) if 'length' in r['publickey']: object_certificate.add_attribute('pubkey-info-size',r['publickey']['length']) object_certificate.add_attribute('issuer',r['issuer']['commonname']) object_certificate.add_attribute('validity-not-before',r['validity']['notbefore']) object_certificate.add_attribute('validity-not-after',r['validity']['notbefore']) object_certificate.add_reference(self.attribute['uuid'], 'related-to') self.misp_event.add_object(object_certificate) def __get_object_domain_ip(self, obs, relation): objet_domain_ip = MISPObject('domain-ip') objet_domain_ip.add_attribute(relation, obs) relation_attr = self.__get_relation_attribute() if relation_attr: objet_domain_ip.add_attribute(relation, self.attribute['value']) objet_domain_ip.add_reference(self.attribute['uuid'], 'related-to') self.misp_event.add_object(objet_domain_ip) def __get_relation_attribute(self): if self.attribute['type'] == 'ip-src': return 'ip' elif self.attribute['type'] == 'ip-dst': return 'ip' elif self.attribute['type'] == 'domain': return 'domain' elif self.attribute['type'] == 'hostname': return 'domain' def handler(q=False): if q: request = json.loads(q) attribute = request['attribute'] if not request.get('config') or not request['config'].get('apikey'): misperrors['error'] = 'Onyphe authentication is missing' return misperrors api_key = request['config'].get('apikey') onyphe_client = OnypheClient(api_key, attribute) onyphe_client.get_query_onyphe() results = onyphe_client.get_results() return {'results': results} def introspection(): return mispattributes def version(): moduleinfo['config'] = moduleconfig return moduleinfo
Python
0.000001
@@ -4287,16 +4287,21 @@ relation +_attr , self.a @@ -4311,32 +4311,36 @@ ibute%5B'value'%5D)%0A + objet_do @@ -4391,32 +4391,36 @@ , 'related-to')%0A + self.mis
2ca7e324931d6ebcdf23a7bd0073271bc0bf9475
check correct combinations for link,reports and imstuments
umibukela/forms.py
umibukela/forms.py
from django import forms from django.contrib.gis.geos import Point from widgets import AddAnotherWidgetWrapper from django.core.exceptions import ValidationError from .models import (Site, CycleResultSet, Monitor, ProgrammeResources, ProgrammeImage) class SiteForm(forms.ModelForm): latitude = forms.DecimalField( min_value=-90, max_value=90, required=False, ) longitude = forms.DecimalField( min_value=-180, max_value=180, required=False, ) class Meta(object): model = Site exclude = [] widgets = {'coordinates': forms.HiddenInput()} def __init__(self, *args, **kwargs): if args: # If args exist data = args[0] if data['latitude'] and data['longitude']: latitude = float(data['latitude']) longitude = float(data['longitude']) data['coordinates'] = Point(longitude, latitude) if 'instance' in kwargs and kwargs['instance'] is not None and kwargs['instance'].coordinates: coordinates = kwargs['instance'].coordinates.tuple initial = kwargs.get('initial', {}) initial['longitude'] = coordinates[0] initial['latitude'] = coordinates[1] kwargs['initial'] = initial super(SiteForm, self).__init__(*args, **kwargs) class CycleResultSetForm(forms.ModelForm): site_option_name = forms.CharField(widget=forms.TextInput) class Meta(object): model = CycleResultSet exclude = [] def __init__(self, *args, **kwargs): super(CycleResultSetForm, self).__init__(*args, **kwargs) crs = kwargs.get('instance', None) if crs: partner = crs.partner else: partner = None self.fields['monitors'].queryset = Monitor.objects.filter( partner=partner) self.fields[ 'site_option_name'].help_text = "This is the name of the option for this site in the form, e.g. for 'Folweni clinic' it's probably 'folweni' (without the single quotes). You can find the names of options in the relevant Survey admin page." class CRSFromKoboForm(forms.Form): def __init__(self, *args, **kwargs): facilities = kwargs.pop('facilities') super(CRSFromKoboForm, self).__init__(*args, **kwargs) for i, facility in enumerate(facilities): crs_field = forms.ModelChoiceField( queryset=CycleResultSet.objects.order_by('site__name').all(), label=facility['label']) crs_field.widget = AddAnotherWidgetWrapper(crs_field.widget, CycleResultSet) self.fields['crs_%d' % i] = crs_field self.fields['facility_%d' % i] = forms.CharField( widget=forms.HiddenInput(), initial=facility['name']) self.fields['num_facilities'] = forms.CharField( widget=forms.HiddenInput(), initial=len(facilities)) class ProgrammeResourcesForm(forms.ModelForm): class Meta: model = ProgrammeResources exclude = ('document_extension', ) def clean(self): link = self.cleaned_data.get('link') document = self.cleaned_data.get('document') order_no = self.cleaned_data.get('order') resource = self.cleaned_data.get('resource') programme = self.cleaned_data.get('programme') if link and document: raise ValidationError( "You cant have an External link and a Document") if ProgrammeResources.objects.filter( order=order_no, resource=resource, programme=programme).exists(): raise ValidationError( 'A Resource already exists for this order number') if resource.name == 'Links' and document is not None: raise ValidationError( 'A resource of type Link cannot have a document, expecting a link' ) if resource.name == 'Reports' and link is not None: raise ValidationError( 'A resource of type Reports cannot have a link, expecting a document' ) if resource.name == 'Survey Instrument' and link is not None: raise ValidationError( 'A resource of type Survey Instrument cannot have a link, expecting a document' ) return self.cleaned_data class ProgrammeImageForm(forms.ModelForm): class Meta: model = ProgrammeImage fields = '__all__' def clean(self): featured = self.cleaned_data.get('featured') programme = self.cleaned_data.get('programme') if featured: if ProgrammeImage\ .objects\ .filter(programme=programme, featured=True): raise ValidationError( "An image in this programme is already marked as a featured image" ) return self.cleaned_data
Python
0
@@ -3453,16 +3453,360 @@ amme')%0A%0A + if resource.name == 'Link' and link is None:%0A raise ValidationError('Enter a link')%0A if resource.name == 'Reports' and document is None:%0A raise ValidationError('Upload a document')%0A if resource.name == 'Survey Instrument' and document is None:%0A raise ValidationError('Upload a document')%0A%0A @@ -3827,16 +3827,16 @@ cument:%0A - @@ -4221,28 +4221,16 @@ document - is not None :%0A @@ -4397,36 +4397,24 @@ ts' and link - is not None :%0A @@ -4530,32 +4530,32 @@ '%0A )%0A + if resou @@ -4594,28 +4594,16 @@ and link - is not None :%0A
dc7158048d491e28322af443c7918ffd0de0d22d
Use logger instead of stdout and stderr
speeches/management/import_commands.py
speeches/management/import_commands.py
import os from optparse import make_option from django.core.management.base import BaseCommand, CommandError from instances.models import Instance class ImportCommand(BaseCommand): importer_class = None document_extension = '' option_list = BaseCommand.option_list + ( make_option('--commit', action='store_true', help='Whether to commit to the database or not'), make_option('--instance', action='store', help='Label of instance to add data to'), make_option('--file', action='store', help='document to import'), make_option('--dir', action='store', help='directory of documents to import'), make_option('--start-date', action='store', default='', help='earliest date to process, in yyyy-mm-dd format'), make_option('--dump-users', action='store', default='', help='dump a json list to <file> (only valid with --dir for now)'), ) def handle(self, *args, **options): verbosity = int(options['verbosity']) if options['commit']: if not options['instance']: raise CommandError("You must specify an instance") instance, _ = Instance.objects.get_or_create(label=options['instance']) else: instance = Instance(label=options['instance']) options['instance'] = instance if options['file']: filename = os.path.expanduser(options['file']) (section, speakers) = self.import_document(filename, **options) if verbosity > 1: if section and section.id: self.stdout.write("Imported section %d\n\n" % section.id) elif options['dir']: files = self.document_list(options) if len(files): imports = [self.import_document(f, **options) for f in files] if options['commit']: sections = [a for a,_ in imports] if verbosity > 1: self.stdout.write("Imported sections %s\n\n" % str( [s.id for s in sections])) dump_users = os.path.expanduser(options['dump_users']) if dump_users: speakers = {} for (_,d) in imports: speakers.update(d) out = open(dump_users, 'w') speakers_list = [ (k, speakers[k]) for k in speakers] out.write( json.dumps( speakers_list, indent=4 ) ) if verbosity > 1: self.stdout.write("Saved speakers list to %s\n" % dump_users) else: self.stdout.write("No .%s files found in directory" % self.document_extension) else: self.stdout.write(self.help) def document_list(self, options): dir = os.path.expanduser(options['dir']) start_date = options['start_date'] valid = lambda f: f >= start_date if start_date else lambda _: True return [ os.path.join(root, filename) for (root, _, files) in os.walk(dir) for filename in files if filename[-4:] == '.%s' % self.document_extension and valid(filename)] def document_valid(self, path): return os.path.isfile(path) def import_document(self, path, **options): verbosity = int(options['verbosity']) if not self.document_valid(path): raise CommandError("No document found") if verbosity > 1: self.stdout.write("Starting import: %s\n" % path) if self.importer_class == None: raise CommandError("No importer_class specified!") importer = self.importer_class(**options) try: section = importer.import_document(path) except Exception as e: self.stderr.write(str(e)) return (None, {}) return (section, importer.speakers)
Python
0.000019
@@ -1,12 +1,27 @@ +import logging%0A import os%0Afr @@ -158,16 +158,55 @@ stance%0A%0A +logger = logging.getLogger(__name__)%0A%0A%0A class Im @@ -1628,33 +1628,27 @@ -self.stdout.write +logger.info (%22Import @@ -2018,33 +2018,27 @@ -self.stdout.write +logger.info (%22Import @@ -2598,33 +2598,27 @@ -self.stdout.write +logger.info (%22Saved @@ -2688,33 +2688,27 @@ -self.stdout.write +logger.info (%22No .%25s @@ -2787,33 +2787,27 @@ -self.stdout.write +logger.info (self.he @@ -3583,25 +3583,19 @@ -self.stdout.write +logger.info (%22St @@ -3892,25 +3892,20 @@ -self.stderr.write +logger.error (str
2a0e03efaca02a72a11086f7c49cbd7ba1887a60
fix import
uniqueids/admin.py
uniqueids/admin.py
import csv import codecs import io from django.contrib import admin from hellomama_registration import utils from .models import Record, State, Facility, Community, PersonnelUpload from .tasks import send_personnel_code class RecordAdmin(admin.ModelAdmin): list_display = [ "id", "identity", "write_to", "created_at", "updated_at"] list_filter = ["write_to", "created_at"] search_fields = ["identity", "write_to"] actions = ["resend_personnel_code"] def resend_personnel_code(self, request, queryset): created = 0 for record in queryset.filter(write_to="personnel_code").iterator(): send_personnel_code.apply_async(kwargs={ "identity": str(record.identity), "personnel_code": record.id}) created += 1 if created == 1: created_text = "%s Record was" % created else: created_text = "%s Records were" % created self.message_user(request, "%s resent." % created_text) resend_personnel_code.short_description = ( "Send code by SMS (personnel code only)") class StateAdmin(admin.ModelAdmin): list_display = ["name", "created_at", "updated_at"] search_fields = ["name"] class FacilityAdmin(admin.ModelAdmin): list_display = ["name", "created_at", "updated_at"] search_fields = ["name"] class CommunityAdmin(admin.ModelAdmin): list_display = ["name", "created_at", "updated_at"] search_fields = ["name"] class PersonnelUploadAdmin(admin.ModelAdmin): list_display_links = None list_display = ["import_type", "created_at", "valid", "error"] exclude = ["valid", "error"] def validate_keys(self, record, import_type): required_keys = [ "address_type", "address", "preferred_language", "receiver_role", "uniqueid_field_name", "uniqueid_field_length", "name", "surname"] required_keys_type = { "corps": ["community"], "personnel": ["role", "facility_name", "state"] } missing = [] for key in required_keys + required_keys_type[import_type]: if key not in record.keys(): missing.append(key) return missing def save_model(self, request, obj, form, change): csvfile = io.StringIO(request.FILES['csv_file'].read().decode()) reader = csv.DictReader(csvfile, delimiter=',') obj.valid = True obj.error = '' states = [] facilities = [] communities = [] if obj.import_type == 'personnel': states = State.objects.values_list('name', flat=True) facilities = Facility.objects.values_list('name', flat=True) elif obj.import_type == 'corps': communities = Community.objects.values_list('name', flat=True) missing_states = [] missing_facilities = [] missing_communities = [] missing_fields = [] errors = [] rows = list(reader) if not rows: errors.append("No Rows") obj.valid = False else: for line in rows: missing_keys = self.validate_keys(line, obj.import_type) if missing_keys: for key in missing_keys: if key not in missing_fields: missing_fields.append(key) obj.valid = False if obj.import_type == 'personnel': state = line.get('state') if state and state not in states: if state not in missing_states: missing_states.append(state) obj.valid = False facility = line.get('facility_name') if facility and facility not in facilities: if facility not in missing_facilities: missing_facilities.append(facility) obj.valid = False elif obj.import_type == 'corps': community = line.get('community') if community and community not in communities: if community not in missing_communities: missing_communities.append(community) obj.valid = False if missing_fields: errors.append("Missing fields: {}".format(', '.join( missing_fields))) if missing_states: errors.append("Invalid States: {}".format(', '.join( missing_states))) if missing_facilities: errors.append("Invalid Facilities: {}".format(', '.join( missing_facilities))) if missing_communities: errors.append("Invalid Communities: {}".format(', '.join( missing_communities))) if obj.valid: for line in rows: utils.create_identity(line) else: obj.error = ', '.join(errors) obj.save() admin.site.register(PersonnelUpload, PersonnelUploadAdmin) admin.site.register(Record, RecordAdmin) admin.site.register(State, StateAdmin) admin.site.register(Facility, FacilityAdmin) admin.site.register(Community, CommunityAdmin)
Python
0.000001
@@ -8,22 +8,8 @@ csv%0A -import codecs%0A impo
cfaa0df91ee072c8bd207d1d1cdb8b9a54160069
remove pdb imports
spotseeker_server/org_forms/uw_spot.py
spotseeker_server/org_forms/uw_spot.py
# Copyright 2021 UW-IT, University of Washington # SPDX-License-Identifier: Apache-2.0 """ Changes ================================================================= sbutler1@illinois.edu: adapted to the new form framework. """ from django import forms from django.dispatch import receiver from spotseeker_server.default_forms.spot import DefaultSpotForm from spotseeker_server.default_forms.spot import DefaultSpotExtendedInfoForm from spotseeker_server.models import Spot, SpotExtendedInfo from spotseeker_server.dispatch import spot_post_build import simplejson as json import re import phonenumbers # dict of all of the uw extended info with values that must be validated # and what all of the possible validated values are, or validated types validated_ei = { "app_type": ["food", "tech"], "auto_labstats_available": "int", "auto_labstats_total": "int", "campus": ["seattle", "tacoma", "bothell", "south_lake_union"], "food_nearby": ["space", "building", "neighboring"], "has_computers": ["true"], "has_displays": ["true"], "has_labstats": ["true"], "has_natural_light": ["true"], "has_outlets": ["true"], "has_printing": ["true"], "has_projector": ["true"], "has_scanner": ["true"], "has_whiteboards": ["true"], "is_hidden": ["true"], "labstats_id": "int", "location_description": "str", "noise_level": ["silent", "quiet", "moderate", "variable"], "num_computers": "int", "rating": "int", "reservable": ["true", "reservations"], "review_count": "int", "s_cuisine_american": ["true"], "s_cuisine_bbq": ["true"], "s_cuisine_chinese": ["true"], "s_cuisine_hawaiian": ["true"], "s_cuisine_indian": ["true"], "s_cuisine_italian": ["true"], "s_cuisine_korean": ["true"], "s_cuisine_mexican": ["true"], "s_cuisine_vietnamese": ["true"], "s_food_breakfast": ["true"], "s_food_burgers": ["true"], "s_food_curry": ["true"], "s_food_desserts": ["true"], "s_food_entrees": ["true"], "s_food_espresso": ["true"], "s_food_frozen_yogurt": ["true"], "s_food_groceries": ["true"], "s_food_pasta": ["true"], "s_food_pastries": ["true"], "s_food_pho": ["true"], "s_food_pizza": ["true"], "s_food_salads": ["true"], "s_food_sandwiches": ["true"], "s_food_smoothies": ["true"], "s_food_sushi_packaged": ["true"], "s_food_tacos": ["true"], "s_has_reservation": ["true"], "s_pay_cash": ["true"], "s_pay_dining": ["true"], "s_pay_husky": ["true"], "s_pay_mastercard": ["true"], "s_pay_visa": ["true"], } def uw_validate(value, key, choices): """Check to see if the value is one of the choices or if it is an int or str, else it throws a validation error """ if choices == "int": try: int(value) except ValueError: raise forms.ValidationError("Value must be an int") elif choices == "str": # if whitespace, django cleans data to None if value is None: raise forms.ValidationError("Location description cannot be all whitespace") elif value.isdecimal(): raise forms.ValidationError("Location description must be a string") elif value not in choices: raise forms.ValidationError( "Value for %s was %s, must be one of: %s" % (key, repr(value), "; ".join((repr(c) for c in choices))) ) class UWSpotExtendedInfoForm(DefaultSpotExtendedInfoForm): def clean(self): # import pdb;pdb.set_trace() cleaned_data = super(UWSpotExtendedInfoForm, self).clean() # Have to check value here since we look at multiple items key = self.cleaned_data["key"] try: value = self.cleaned_data["value"] except KeyError as e: value = self.cleaned_data.get("value") print(key in validated_ei) if key == "s_phone": p = re.compile("[A-Za-z]") if p.search(value): raise forms.ValidationError( "Phone number cannot contain " "letters" ) try: number = phonenumbers.parse(value, "US") if not phonenumbers.is_valid_number( number ) or not phonenumbers.is_possible_number(number): raise forms.ValidationError("") value = phonenumbers.format_number( number, phonenumbers.PhoneNumberFormat.E164 ) cleaned_data["value"] = value[2:] except Exception as ex: raise forms.ValidationError("s_phone must be a phone number") elif key in validated_ei: uw_validate(value, key, validated_ei[key]) return cleaned_data class UWSpotForm(DefaultSpotForm): validated_extended_info = validated_ei @receiver(spot_post_build, sender=UWSpotForm) def uw_validate_has_extended_info(sender, **kwargs): """ After a spot REST request has been processed, validate that it contained some extended info. """ spot = kwargs["spot"] if spot.spotextendedinfo_set.count() <= 0: raise forms.ValidationError("UWSpot must have extended info")
Python
0.000001
@@ -3523,45 +3523,8 @@ f):%0A - # import pdb;pdb.set_trace()%0A
fe56573f318578b6359d06d0454af6992f344c20
load optimizer from config
hypergan/trainers/simultaneous_trainer.py
hypergan/trainers/simultaneous_trainer.py
import numpy as np import torch import hyperchamber as hc import inspect from hypergan.trainers.base_trainer import BaseTrainer from hypergan.optimizers.adamirror import Adamirror TINY = 1e-12 class SimultaneousTrainer(BaseTrainer): """ Steps G and D simultaneously """ def _create(self): self.optimizer = torch.optim.Adam(self.gan.parameters(), lr=self.config.optimizer["learn_rate"], betas=(0.0,.999)) #self.optimizer = Adamirror(self.gan.parameters(), lr=self.config.optimizer["learn_rate"], betas=(0.0,.999)) #self.adamirror = Adamirror(self.gan.parameters(), lr=self.config.optimizer["learn_rate"], betas=(0.9074537537537538,.997)) #self.adamirror2 = Adamirror(self.gan.parameters(), lr=self.config.optimizer["learn_rate"]*3, betas=(0.9074537537537538,.997)) #self.optimizer = self.adamirror self.gan.add_component("optimizer", self.optimizer) #self.gan.add_component("optimizer", self.adamirror2) #self.gan.add_component("optimizer", self.adamirror) def required(self): return "".split() def _step(self, feed_dict): gan = self.gan config = self.config loss = gan.loss metrics = gan.metrics() self.before_step(self.current_step, feed_dict) d_grads, g_grads = self.calculate_gradients() for hook in self.train_hooks: d_grads, g_grads = hook.gradients(d_grads, g_grads) for p, np in zip(self.gan.d_parameters(), d_grads): p.grad = np for p, np in zip(self.gan.g_parameters(), g_grads): p.grad = np self.optimizer.step() if self.current_step % 10 == 0: self.print_metrics(self.current_step) def calculate_gradients(self): self.optimizer.zero_grad() d_loss, g_loss = self.gan.forward_loss() self.d_loss = d_loss self.g_loss = g_loss for hook in self.train_hooks: loss = hook.forward() if loss[0] is not None: d_loss += loss[0] if loss[1] is not None: g_loss += loss[1] for p in self.gan.g_parameters(): p.requires_grad = True for p in self.gan.d_parameters(): p.requires_grad = False g_loss = g_loss.mean() g_loss.backward(retain_graph=True) for p in self.gan.d_parameters(): p.requires_grad = True for p in self.gan.g_parameters(): p.requires_grad = False d_loss = d_loss.mean() d_loss.backward(retain_graph=True) for p in self.gan.g_parameters(): p.requires_grad = True d_grads = [p.grad for p in self.gan.d_parameters()] g_grads = [p.grad for p in self.gan.g_parameters()] return d_grads, g_grads def print_metrics(self, step): metrics = self.gan.metrics() metric_values = self.output_variables(metrics) print(str(self.output_string(metrics) % tuple([step] + metric_values)))
Python
0
@@ -67,16 +67,85 @@ nspect%0A%0A +from hypergan.gan_component import ValidationException, GANComponent%0A from hyp @@ -191,16 +191,16 @@ Trainer%0A - from hyp @@ -362,32 +362,33 @@ (self):%0A +# self.optimizer = @@ -915,24 +915,25 @@ ror%0A +# self.gan.add @@ -1034,16 +1034,16 @@ irror2)%0A - @@ -1094,16 +1094,267 @@ amirror) +%0A defn = self.config.optimizer%0A klass = GANComponent.lookup_function(None, defn%5B'class'%5D)%0A del defn%5B%22class%22%5D%0A self.optimizer = klass(self.gan.parameters(), **defn)%0A self.gan.add_component(%22optimizer%22, self.optimizer) %0A%0A de
30db4b3ae377669b3b598c9d4d22b5fbff2082ab
Fix typo on model
app/backend/aquifers/serializers.py
app/backend/aquifers/serializers.py
""" Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from rest_framework import serializers from aquifers.models import Aquifer class AquiferSerializer(serializers.ModelSerializer): """Serialize a aquifer list""" demand_description = serializers.SlugRelatedField(source='demand', read_only=True, slug_field='description') material_description = serializers.SlugRelatedField(source='material', read_only=True, slug_field='description') productivity_description = serializers.SlugRelatedField(source='productivity', read_only=True, slug_field='description') subtype_description = serializers.SlugRelatedField(source='subtype', read_only=True, slug_field='description') vulnerability_description = serializers.SlugRelatedField(source='vulnerability', read_only=True, slug_field='description') quality_concern_description = serializers.SlugRelatedField(source='quality_concert', read_only=True, slug_field='description') class Meta: model = Aquifer fields = ( 'aquifer_id', 'aquifer_name', 'area', 'demand_description', 'demand', 'litho_stratographic_unit', 'location_description', 'mapping_year', 'material_description', 'material', 'productivity_description', 'productivity', 'quality_concern_description', 'quality_concern', 'subtype_description', 'subtype', 'vulnerability_description', 'vulnerability' )
Python
0.000585
@@ -1410,17 +1410,17 @@ y_concer -t +n ', read_
306f597faad120ad5b5327544b40537fd0724f96
stop url render errors failing the plugin
flexget/plugins/output/prowl.py
flexget/plugins/output/prowl.py
from __future__ import unicode_literals, division, absolute_import from builtins import * # pylint: disable=unused-import, redefined-builtin import logging from requests import RequestException from flexget import plugin from flexget.event import event from flexget.utils.template import RenderError log = logging.getLogger('prowl') class OutputProwl(object): """ Send prowl notifications Example:: prowl: apikey: xxxxxxx [application: application name, default FlexGet] [event: event title, default New Release] [priority: -2 - 2 (2 = highest), default 0] [description: notification to send] Configuration parameters are also supported from entries (eg. through set). """ schema = { 'type': 'object', 'properties': { 'apikey': {'type': 'string'}, 'application': {'type': 'string', 'default': 'FlexGet'}, 'event': {'type': 'string', 'default': 'New Release'}, 'priority': {'type': 'integer', 'default': 0}, 'description': {'type': 'string'}, 'url': {'type': 'string'} }, 'required': ['apikey'], 'additionalProperties': False } # Run last to make sure other outputs are successful before sending notification @plugin.priority(0) def on_task_output(self, task, config): for entry in task.accepted: # get the parameters apikey = entry.get('apikey', config['apikey']) application = entry.get('application', config['application']) event = entry.get('event', config['event']) priority = entry.get('priority', config['priority']) description = config.get('description', entry['title']) message_url = config.get('url', '') # If event has jinja template, render it try: event = entry.render(event) except RenderError as e: log.error('Error rendering jinja event: %s' % e) # If description has jinja template, render it try: description = entry.render(description) except RenderError as e: description = entry['title'] log.error('Error rendering jinja description: %s' % e) # If url has jinja template, render it try: message_url = entry.render(message_url) except RenderError as e: log.error('Error rendering jinja url: %s' % e) url = 'https://api.prowlapp.com/publicapi/add' data = {'priority': priority, 'application': application, 'apikey': apikey, 'event': event.encode('utf-8'), 'description': description, 'url': message_url} if task.options.test: log.info('Would send prowl message about: %s', entry['title']) log.debug('options: %s' % data) continue try: response = task.requests.post(url, data=data, raise_status=False) except RequestException as e: log.error('Error with request: %s' % e) continue # Check if it succeeded request_status = response.status_code # error codes and messages from http://prowl.weks.net/api.php if request_status == 200: log.debug("Prowl message sent") elif request_status == 400: log.error("Bad request, the parameters you provided did not validate") elif request_status == 401: log.error("Not authorized, the API key given is not valid, and does not correspond to a user.") elif request_status == 406: log.error("Not acceptable, your IP address has exceeded the API limit.") elif request_status == 409: log.error("Not approved, the user has yet to approve your retrieve request.") elif request_status == 500: log.error("Internal server error, something failed to execute properly on the Prowl side.") else: log.error("Unknown error when sending Prowl message") @event('plugin.register') def register_plugin(): plugin.register(OutputProwl, 'prowl', api_ver=2)
Python
0.000001
@@ -2467,32 +2467,65 @@ nderError as e:%0A + message_url = ''%0A
2954d63dbd4f48eb6141fdb1298290c2adaf5814
Fix installed-unit builder.
flocker/provision/_rackspace.py
flocker/provision/_rackspace.py
# Copyright Hybrid Logic Ltd. See LICENSE file for details. """ Rackspace provisioner. """ from libcloud.compute.providers import get_driver, Provider from characteristic import attributes, Attribute from ._libcloud import get_size, get_image from ._install import provision # _node isn't immutable, since libcloud provides new instances # with updated data. @attributes([Attribute('_node'), 'address', 'distribution']) class RackspaceNode(object): def destroy(self): self._node.destroy() def provision(self, package_source): """ Provision flocker on this node. """ provision( self.address, username="root", package_source=package_source, distribution=self.distribution, ) return self.address IMAGE_NAMES = { 'fedora-20': u'Fedora 20 (Heisenbug) (PVHVM)', } @attributes([Attribute('_keyname')], apply_immutable=True) class Rackspace(object): def __init__(self, username, key, region): self._driver = get_driver(Provider.RACKSPACE)( key=username, secret=key, region=region) def create_node(self, name, distribution, userdata=None, size="performance1-2", disk_size=8, keyname=None, metadata={}): """ :param str name: The name of the node. :param str base_ami: The name of the ami to use. :param bytes userdata: User data to pass to the instance. :param bytes size: The name of the size to use. :param int disk_size: The size of disk to allocate. :param dict metadata: Metadata to associate with the node. """ if keyname is None: keyname = self._keyname image_name = IMAGE_NAMES[distribution] node = self._driver.create_node( name=name, image=get_image(self._driver, image_name), size=get_size(self._driver, size), ex_keyname=keyname, ex_userdata=userdata, ex_config_drive="true", ex_metadata=metadata, ) node, addresses = self._driver.wait_until_running([node])[0] public_address = addresses[0] return RackspaceNode(node=node, address=public_address, distribution=distribution)
Python
0
@@ -91,68 +91,8 @@ %22%22%0A%0A -from libcloud.compute.providers import get_driver, Provider%0A from @@ -937,16 +937,180 @@ egion):%0A + # Import these here, so that this can be imported without%0A # installng libcloud.%0A from libcloud.compute.providers import get_driver, Provider%0A
e303da18866a0e9afbc3cfd0402b966e1a019dd3
comment out sentry
everyclass/server/__init__.py
everyclass/server/__init__.py
import copy import gc import sys import logbook from flask import Flask, g, render_template, session from flask_cdn import CDN from htmlmin import minify from raven.contrib.flask import Sentry from raven.handlers.logbook import SentryHandler logger = logbook.Logger(__name__) sentry = Sentry() __app = None try: import uwsgidecorators """ below are functions that will be executed in **each** process after fork(). these functions will be executed in the same order of definition here. """ @uwsgidecorators.postfork def enable_gc(): """enable garbage collection""" gc.set_threshold(700) @uwsgidecorators.postfork def init_db(): """init database connection""" from everyclass.server.db.mysql import init_pool global __app init_pool(__app) @uwsgidecorators.postfork def init_log_handlers(): """init log handlers""" from everyclass.server.utils.log import LogstashHandler from elasticapm.contrib.flask import ElasticAPM from everyclass.server.utils import monkey_patch ElasticAPM.request_finished = monkey_patch.ElasticAPM.request_finished(ElasticAPM.request_finished) global __app current_app = __app if current_app.config['CONFIG_NAME'] in ["production", "staging", "testing"]: # ignore dev in container # Sentry sentry.init_app(app=current_app) sentry_handler = SentryHandler(sentry.client, level='WARNING') # Sentry 只处理 WARNING 以上的 logger.handlers.append(sentry_handler) # Elastic APM ElasticAPM(current_app) # Log to Logstash logstash_handler = LogstashHandler(host=current_app.config['LOGSTASH']['HOST'], port=current_app.config['LOGSTASH']['PORT'], release=current_app.config['GIT_DESCRIBE'], bubble=True, logger=logger, filter=lambda r, h: r.level >= 11) # do not send DEBUG logger.handlers.append(logstash_handler) # print current configuration import uwsgi if uwsgi.worker_id() == 1: # set to warning level because we want to monitor restarts logger.info('App (re)started in `{0}` environment'.format(current_app.config['CONFIG_NAME']), stack=False) logger.info('Below are configurations we are using:') logger.info('================================================================') for key, value in __app.config.items(): if key not in ('SECRET_KEY',): value = copy.copy(value) # 敏感内容抹去 if key == 'SENTRY_CONFIG': value['dsn'] = '[secret]' if key == 'MYSQL_CONFIG': value['password'] = '[secret]' if key == 'ELASTIC_APM': value['SECRET_TOKEN'] = '[secret]' logger.info('{}: {}'.format(key, value)) logger.info('================================================================') except ModuleNotFoundError: print('ModuleNotFoundError when importing uWSGI-decorators. Ignore this if you are not launched from uWSGI.') def create_app(outside_container=False) -> Flask: """创建 flask app @param outside_container: 是否不在容器内运行 """ from everyclass.server.db.dao import new_user_id_sequence from everyclass.server.db.mysql import get_connection, init_pool from everyclass.server.utils.log import LOG_FORMAT_STRING app = Flask(__name__, static_folder='../../frontend/dist', static_url_path='', template_folder="../../frontend/templates") # load app config from everyclass.server.config import get_config _config = get_config() app.config.from_object(_config) """ 每课统一日志机制 规则如下: - WARNING 以下 log 输出到 stdout - WARNING 以上输出到 stderr - DEBUG 以上日志以 json 形式通过 TCP 输出到 Logstash,然后发送到日志中心 - WARNING 以上级别的输出到 Sentry 日志等级: critical – for errors that lead to termination error – for errors that occur, but are handled warning – for exceptional circumstances that might not be errors notice – for non-error messages you usually want to see info – for messages you usually don’t want to see debug – for debug messages Sentry: https://docs.sentry.io/clients/python/api/#raven.Client.captureMessage - stack 默认是 False """ stdout_handler = logbook.StreamHandler(stream=sys.stdout, bubble=True, filter=lambda r, h: r.level < 13) stdout_handler.format_string = LOG_FORMAT_STRING logger.handlers.append(stdout_handler) stderr_handler = logbook.StreamHandler(stream=sys.stderr, bubble=True, level='WARNING') stderr_handler.format_string = LOG_FORMAT_STRING logger.handlers.append(stderr_handler) # CDN CDN(app) # 容器外运行(无 uWSGI)时初始化数据库 if outside_container and (app.config['CONFIG_NAME'] == "development"): init_pool(app) # 导入并注册 blueprints from everyclass.server.calendar.views import cal_blueprint from everyclass.server.query import query_blueprint from everyclass.server.views import main_blueprint as main_blueprint from everyclass.server.api import api_v1 as api_blueprint app.register_blueprint(cal_blueprint) app.register_blueprint(query_blueprint) app.register_blueprint(main_blueprint) app.register_blueprint(api_blueprint, url_prefix='/api/v1') @app.before_request def set_user_id(): """在请求之前设置 session uid,方便 Elastic APM 记录用户请求""" if not session.get('user_id', None): session['user_id'] = new_user_id_sequence() @app.after_request def response_minify(response): """用 htmlmin 压缩 HTML,减轻带宽压力""" if app.config['HTML_MINIFY'] and response.content_type == u'text/html; charset=utf-8': response.set_data(minify(response.get_data(as_text=True))) return response @app.template_filter('versioned') def version_filter(filename): """ 模板过滤器。如果 STATIC_VERSIONED,返回类似 'style-v1-c012dr.css' 的文件,而不是 'style-v1.css' :param filename: 文件名 :return: 新的文件名 """ if app.config['STATIC_VERSIONED']: if filename[:4] == 'css/': new_filename = app.config['STATIC_MANIFEST'][filename[4:]] return 'css/' + new_filename elif filename[:3] == 'js/': new_filename = app.config['STATIC_MANIFEST'][filename[3:]] return new_filename else: return app.config['STATIC_MANIFEST'][filename] return filename @app.errorhandler(500) def internal_server_error(error): return render_template('500.html', event_id=g.sentry_event_id, public_dsn=sentry.client.get_public_dsn('https')) global __app __app = app return app
Python
0
@@ -190,57 +190,8 @@ ntry -%0Afrom raven.handlers.logbook import SentryHandler %0A%0Alo @@ -1342,24 +1342,26 @@ %0A + # sentry.init @@ -1393,16 +1393,18 @@ + # sentry_ @@ -1488,32 +1488,34 @@ %E4%BB%A5%E4%B8%8A%E7%9A%84%0A + # logger.handlers @@ -2370,20 +2370,23 @@ logger. -info +warning ('App (r
96bc8bdec2736a1f86528d55331e935ecf13a529
np nan validator
pycqed/instrument_drivers/pq_parameters.py
pycqed/instrument_drivers/pq_parameters.py
from qcodes.instrument.parameter import ManualParameter from qcodes.utils.validators import Validator, Strings import numpy as np class NP_NANs(Validator): is_numeric = True def __init__(self): pass def __repr__(self): return '<nan>' def validate(self, value, context=''): try: if not np.isnan(value): raise ValueError('{} is not nan; {}'.format( repr(value), context)) except: raise ValueError('{} is not nan; {}'.format( repr(value), context)) class InstrumentParameter(ManualParameter): """ Args: name (string): the name of the instrument that one wants to add. instrument (Optional[Instrument]): the "parent" instrument this parameter is attached to, if any. initial_value (Optional[string]): starting value, the only invalid value allowed, and None is only allowed as an initial value, it cannot be set later **kwargs: Passed to Parameter parent class """ def get_instr(self): """ Returns the instance of the instrument with the name equal to the value of this parameter. """ instrument_name = self.get() # note that _instrument refers to the instrument this parameter belongs # to, while the instrument_name is the instrument that is the value # of this parameter. return self._instrument.find_instrument(instrument_name) def set_validator(self, vals): """ Set a validator `vals` for this parameter. Args: vals (Validator): validator to set """ if vals is None: self.vals = Strings() elif isinstance(vals, Validator): self.vals = vals else: raise TypeError('vals must be a Validator') class ConfigParameter(ManualParameter): # TODO: move this to qcodes as a pull request """ Define one parameter that reflects a manual configuration setting. Args: name (string): the local name of this parameter instrument (Optional[Instrument]): the instrument this applies to, if any. initial_value (Optional[string]): starting value, the only invalid value allowed, and None is only allowed as an initial value, it cannot be set later **kwargs: Passed to Parameter parent class """ def __init__(self, name, instrument=None, initial_value=None, **kwargs): super().__init__(name=name, **kwargs) self._instrument = instrument # if the instrument does not have _config_changed attribute creates it if not hasattr(self._instrument, '_config_changed'): self._instrument._config_changed = True self._meta_attrs.extend(['instrument', 'initial_value']) if initial_value is not None: self.validate(initial_value) self._save_val(initial_value) def set(self, value): """ Validate and saves value. If the value is different from the latest value it sets the Args: value (any): value to validate and save """ self.validate(value) if value != self.get_latest(): self._instrument._config_changed = True self._save_val(value) def get(self): """ Return latest value""" return self.get_latest()
Python
0.999504
@@ -210,12 +210,37 @@ -pass +self._valid_values = %5Bnp.nan%5D %0A%0A
8995d7314bddcf4418a08cb39b2fabbc8704706e
Use conservative defaults for local facebook settings.
pykeg/src/pykeg/contrib/facebook/models.py
pykeg/src/pykeg/contrib/facebook/models.py
import datetime from django.db import models from django.db.models.signals import post_save from socialregistration import models as sr_models PRIVACY_CHOICES = ( ('EVERYONE', 'Everyone'), ('ALL_FRIENDS', 'Friends'), ('FRIENDS_OF_FRIENDS', 'Friends of Friends'), ('NETWORK_FRIENDS', 'Networks and Friends'), #('CUSTOM', 'Custom permissions'), ) class FacebookSession(models.Model): """Stores the session id for a user.""" profile = models.ForeignKey(sr_models.FacebookProfile, unique=True, related_name='session') session_id = models.CharField(max_length=255, blank=False, null=False) updated = models.DateTimeField(default=datetime.datetime.now) @classmethod def get_session(cls, request): if not hasattr(request, 'facebook'): raise ValueError, "no facebook" return None fb = request.facebook if not fb.uid: raise ValueError, "no uid" return None profile = sr_models.FacebookProfile.objects.get(uid=fb.uid) if not profile: raise ValueError, "no profile" return None session, new = FacebookSession.objects.get_or_create(profile=profile) if new or session.session_id != fb.session_key: session.session_id = fb.session_key session.save() def add_permission(self, perm): qs = self.profile.permission_set.filter(permission=perm) if not qs.count(): perm = FacebookPermission(profile=self.profile, permission=perm) perm.save() def rm_permission(self, perm): qs = self.profile.permission_set.filter(permission=perm) if qs.count(): qs.delete() def profile_post_save(sender, instance, **kwargs): """Create default settings on new profile.""" settings, new = FacebookSettings.objects.get_or_create( profile=instance) post_save.connect(profile_post_save, sender=sr_models.FacebookProfile) class FacebookPermission(models.Model): """Records a granted permission.""" profile = models.ForeignKey(sr_models.FacebookProfile, unique=True, related_name='permission_set') permission = models.CharField(max_length=255, blank=False, null=False, unique=True) class FacebookSettings(models.Model): profile = models.ForeignKey(sr_models.FacebookProfile, unique=True, related_name='settings') # stream.publish stuff # http://wiki.developers.facebook.com/index.php/Stream.publish publish_events = models.BooleanField(default=True, help_text='Post each drink to your wall.') include_link = models.BooleanField(default=True, help_text='Add a link to this kegbot when publishing to wall.') publish_status = models.BooleanField(default=False, help_text='Update status on start of a new drinking session.') privacy = models.CharField(max_length=64, choices=PRIVACY_CHOICES, default='ALL_FRIENDS', help_text='Privacy setting for drink posts.')
Python
0
@@ -2379,35 +2379,36 @@ anField(default= -Tru +Fals e,%0A help_te @@ -2488,19 +2488,20 @@ default= -Tru +Fals e,%0A