commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
86d4b4a241887bfcd990180a6486cb8054bf514c
Add 'TODO' for YAML editor.
core/io/pyslvs_yaml.py
core/io/pyslvs_yaml.py
# -*- coding: utf-8 -*- """YAML format processing function.""" __author__ = "Yuan Chang" __copyright__ = "Copyright (C) 2016-2018" __license__ = "AGPL" __email__ = "pyslvs@gmail.com" import yaml from core.QtModules import QObject from core import main_window as mn class YamlEditor(QObject): """YAML reader and writer.""" def __init__(self, parent: 'mn.MainWindow'): super(YamlEditor, self).__init__(parent) # Check file changed function. self.__check_file_changed = parent.checkFileChanged # Check workbook saved function. self.__workbook_saved = parent.workbookSaved # Call to get point expressions. self.__point_expr_func = parent.EntitiesPoint.expression # Call to get link data. self.__link_expr_func = parent.EntitiesLink.colors # Call to get storage data. self.__storage_data_func = parent.getStorage # Call to get collections data. self.__collect_data_func = parent.CollectionTabPage.collect_data # Call to get triangle data. self.__triangle_data_func = parent.CollectionTabPage.triangle_data # Call to get inputs variables data. self.__inputs_data_func = parent.InputsWidget.inputPairs # Call to get algorithm data. self.__algorithm_data_func = parent.DimensionalSynthesis.mechanism_data # Call to get path data. self.__path_data_func = parent.InputsWidget.pathData # Call to load collections data. self.__load_collect_func = parent.CollectionTabPage.StructureWidget.addCollections # Call to load triangle data. self.__load_triangle_func = parent.CollectionTabPage.TriangularIterationWidget.addCollections # Call to load inputs variables data. self.__load_inputs_func = parent.InputsWidget.addInputsVariables # Call after loaded algorithm results. self.__load_algorithm_func = parent.DimensionalSynthesis.loadResults # Call after loaded paths. self.__load_path_func = parent.InputsWidget.loadPaths # Add empty links function. self.__add_links_func = parent.addEmptyLinks # Parse function. self.__parse_func = parent.parseExpression # Clear function for main window. self.__clear_func = parent.clear # Add storage function. self.__add_storage_func = parent.addMultipleStorage self.file_name = "" def reset(self): """Reset some settings.""" self.file_name = "" def save(self): """Save YAML file.""" def save_as(self, file_name: str): """Save to a new YAML file.""" def load(self, file_name: str): """Load YAML file.""" self.file_name = file_name
Python
0
@@ -179,16 +179,45 @@ l.com%22%0A%0A +from typing import Dict, Any%0A import y @@ -2592,32 +2592,223 @@ ve YAML file.%22%22%22 +%0A data = %7B%7D%0A # TODO: Data structure.%0A yaml_script = yaml.dump(data, default_flow_style=True)%0A with open(self.file_name, 'w') as f:%0A f.write(yaml_script) %0A%0A def save_a @@ -2866,24 +2866,79 @@ AML file.%22%22%22 +%0A self.file_name = file_name%0A self.save() %0A%0A def lo @@ -2985,32 +2985,32 @@ d YAML file.%22%22%22%0A - self.fil @@ -3008,28 +3008,188 @@ self.file_name = file_name%0A + with open(self.file_name) as f:%0A yaml_script = f.read()%0A data: Dict%5Bstr, Any%5D = yaml.load(yaml_script)%0A # TODO: Load function.%0A
fe82dce52884661297ecf640cd3ffd18c76ffc25
change graph access
scdown/neo.py
scdown/neo.py
from py2neo import Graph, Relationship import logging import os try: import json except ImportError: import simplejson as json ID_PROP = "id" NODE_USER = "User" NODE_TRACK = "Track" NODE_COMMENT = "Comment" NODE_PROFILE = "Profile" UNIQUES = [(x, ID_PROP) for x in [NODE_USER, NODE_TRACK, NODE_COMMENT, NODE_PROFILE]] # relationships: # (a:User)-[:FOLLOWS]->(b:User) # timestamp # (a:User)-[:UPLOADED]->(b:Track) # (a:User)-[:FAVORITED]->(b:Track) # timestamp # (a:User)-[:HAS_PROFILE]->(b:Profile) # timestamp # (a:User)-[:WROTE]->(b:Comment) # (a:Comment)-[:REFERS_TO]->(b:Track) REL_FOLLOWS = "FOLLOWS" REL_UPLOADED = "UPLOADED" REL_FAVORITED = "FAVORITED" REL_HAS_PROFILE = "HAS_PROFILE" REL_WROTE = "WROTE" REL_REFERS_TO = "REFERS_TO" class Neo(object): _graph = None _extra_label = None logger = None def __init__(self, graph=None, logger=logging.getLogger("")): if graph is None: graph = Graph(os.getenv("GRAPHENEDB_URL")) self._graph = graph self.logger = logger for (l, p) in UNIQUES: self.mk_unique(l, p) def mk_unique(self, label, property_key): schema = self._graph.schema if len(schema.get_uniqueness_constraints(label)) == 0: schema.create_uniqueness_constraint(label, property_key) def deflate(self, d): """Turn arbitrary dict into something Neo4j serializable""" new_d = {} for k, v in d.iteritems(): if hasattr(v, "__iter__"): new_d["__json_" + k] = True new_d[k] = json.dumps(v) else: new_d[k] = v assert not any((isinstance(x, dict) or isinstance(x, list) for x in new_d)) return new_d def inflate(self, properties): """Turn Neo4j serialized dict into normal""" new_d = {} json_keys = [x for x in properties.keys() if x.startswith("__json_")] for k in json_keys: orig_key = k[7:] new_d[orig_key] = json.loads(properties[orig_key]) for k, v in properties.iteritems(): if not k.startswith("__json_"): if k not in new_d: new_d[k] = v return new_d def get(self, node_id): return self._graph.node(node_id) def create_or_update_node(self, item_type, item_properties): props = self.deflate(item_properties) item_id = props[ID_PROP] node = self._graph.merge_one(item_type, ID_PROP, item_id) if self._extra_label is not None: if self._extra_label not in node.labels: node.labels.add(self._extra_label) node.labels.push() if dict(node.properties) != props: node.properties.update(props) node.push() return node def check_relation(self, node1, relationship, node2): """Check that the relation is being made between objects of the right type.""" if relationship == REL_FOLLOWS: assert (NODE_USER in node1.labels and NODE_USER in node2.labels) elif relationship == REL_UPLOADED: assert (NODE_USER in node1.labels and NODE_TRACK in node2.labels) elif relationship == REL_FAVORITED: assert (NODE_USER in node1.labels and NODE_TRACK in node2.labels) elif relationship == REL_HAS_PROFILE: assert (NODE_USER in node1.labels and NODE_PROFILE in node2.labels) elif relationship == REL_WROTE: assert (NODE_USER in node1.labels and NODE_COMMENT in node2.labels) elif relationship == REL_REFERS_TO: assert (NODE_COMMENT in node1.labels and NODE_TRACK in node2.labels) def mk_relation(self, node1, relationship, node2, props={}): self.check_relation(node1, relationship, node2) return Relationship(node1, relationship, node2, **props) def create_all(self, relations): self._graph.create_unique(*relations)
Python
0
@@ -12,21 +12,27 @@ import -Graph +ServiceRoot , Relati @@ -961,45 +961,176 @@ raph - = Graph(os.getenv(%22GRAPHENEDB_URL%22)) +enedb_url = os.environ.get(%22GRAPHENEDB_URL%22,%0A %22http://localhost:7474/%22)%0A graph = ServiceRoot(graphenedb_url).graph %0A
33d55c74f2c0928c4052c5ae8be6754202616d9a
Add some additional exceptions to images.py.
common/imagenet_server/images.py
common/imagenet_server/images.py
""" Deals with image acquiring and manipulation. """ import httplib import logging import os import re import socket import ssl import urllib2 import urlparse import time import cv2 import numpy as np logger = logging.getLogger(__name__) BAD_IMAGES_DIR = "error_images" # How close an image has to be to a known bad image to throw it out. ERROR_IMAGE_THRESH = 1.0 # Minimum number of bytes we need to consistently read every second before we # give up. MIN_DOWNLOAD_RATE = 512 def _load_error_images(): """ Loads error images from the disk. Returns: The list of error images. """ file_names = os.listdir(BAD_IMAGES_DIR) error_images = [] for image_name in file_names: image_path = os.path.join(BAD_IMAGES_DIR, image_name) image = cv2.imread(image_path, cv2.IMREAD_UNCHANGED) if image is None: raise RuntimeError("Loading bad image %s failed." % (image_path)) error_images.append(image) return error_images def _url_encode_non_ascii(b): """ Encodes non-ascii characters so they can be used in URLs """ return re.sub('[\x80-\xFF]', lambda c: '%%%02x' % ord(c.group(0)), b) def _iri_to_uri(iri): """ Convert an IRI to a URI. """ parts= urlparse.urlparse(iri) encoded = [] for parti, part in enumerate(parts): if parti == 1: encoded.append(part.encode("idna")) else: encoded.append(_url_encode_non_ascii(part.encode("utf-8"))) return urlparse.urlunparse(encoded) def _check_bad_image(image): """ Checks an image against a known set of bad images in order to decide whether this image is good or not. Returns: True if it thinks the image is bad, False otherwise. """ # Check if it matches any of the error images. for error_image in _error_images: distance = abs(np.mean(image.astype("int8") - error_image.astype("int8"))) if distance < ERROR_IMAGE_THRESH: return True return False def download_image(url, keep_color=False): """ Downloads the image from the specified url. Args: url: The URL to download from. keep_color: If False, images will be saved in grayscale. Returns: The image data that was downloaded. """ logger.info("Downloading new image: %s", url) try: url = _iri_to_uri(url) except UnicodeDecodeError as e: logger.warning("Error decoding URL: %s" % (e)) return None try: response = urllib2.urlopen(url, timeout=10) except (urllib2.HTTPError, urllib2.URLError, httplib.BadStatusLine, socket.timeout, socket.error, ssl.CertificateError) as e: # Generally, this is because the image was not found. logger.warning("Image download failed with '%s'." % (e)) return None if "photo_unavailable" in response.geturl(): # Flickr has this wonderful failure mode where it just redirects to this # picture instead of throwing a 404 error. Actually, lots of websites do # this, but since Flickr is the most common one, it saves time to handle # that issue here. logger.warning("Got Flickr 'photo unavailable' error.") return None raw_data = "" slow_cycles = 0 while True: start_time = time.time() try: new_data = response.read(512) except (socket.timeout, ssl.SSLError, socket.error) as e: logger.warning("Image download failed with '%s'." % (e)) return None elapsed = time.time() - start_time if not new_data: # We're done reading the response. break raw_data += new_data if 512.0 / elapsed < MIN_DOWNLOAD_RATE: logger.debug("Downloading image too slowly.") slow_cycles += 1 if slow_cycles >= 3: logger.warning("Aborting download due to slowness: %s" % (url)) return None else: slow_cycles = 0 image = np.asarray(bytearray(raw_data), dtype="uint8") if keep_color: flags = cv2.IMREAD_COLOR else: flags = cv2.IMREAD_GRAYSCALE image = cv2.imdecode(image, flags) if image is None: return image # Reshape the image. image = reshape_image(image) # Check for other bad images besides Flickr's. if _check_bad_image(image): logging.warning("Got bad image: %s." % (url)) return None return image def download_words(wnid): """ Downloads the words associated with a synset. Args: wnid: The wnid of the synset. Returns: The word data that was downloaded. """ base_url = \ "http://www.image-net.org/api/text/wordnet.synset.getwords?wnid=%s" response = urllib2.urlopen(base_url % (wnid)) words = response.read().split("\n")[:-1] logger.info("Got words for synset: %s", words) return words def reshape_image(image): """ Reshapes a stored image so that it is a consistent shape and size. Args: image: The image to reshape. Returns: The reshaped image. """ # Crop the image to just the center square. if len(image.shape) == 3: # It may have multiple color channels. height, width, _ = image.shape else: height, width = image.shape logger.debug("Original image shape: (%d, %d)" % (width, height)) if width != height: if width > height: # Landscape length = height crop_left = (width - height) / 2 crop_top = 0 elif height > width: # Portrait. length = width crop_top = (height - width) / 2 crop_left = 0 image = image[crop_top:(length + crop_top), crop_left:(length + crop_left)] # Set a proper size. At this point, we'll do 256x256, which should be enough # resolution for simple classification. image = cv2.resize(image, (256, 256)) return image socket.setdefaulttimeout(10) # Pre-load error images. if not os.path.exists(BAD_IMAGES_DIR): raise RuntimeError("Could not find bad images directory '%s'." % \ (BAD_IMAGES_DIR)) else: _error_images = _load_error_images()
Python
0
@@ -2503,24 +2503,81 @@ ificateError +,%0A httplib.HTTPException, httplib.IncompleteRead ) as e:%0A
cc9743cabcc53be0e97ff76eca8ef14bdc911e11
fix indentation in plot script
cvpr15_eval/plot_score_vs_ocsvm.py
cvpr15_eval/plot_score_vs_ocsvm.py
#!/usr/bin/env python import os.path import sys sys.path.append('.') sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../toolbox/.') import numpy as np import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import argparse import math import structsvm import trackingfeatures if __name__ == "__main__": parser = argparse.ArgumentParser( description='Show quality of reranker compared to one class SVM') # file paths parser.add_argument('--lineage', type=str, required=True, dest='lineage_filename', help='Lineage tree dump') parser.add_argument('--precisions', type=str, required=True, dest='precisions_filename', help='file containing the precision against the ground truth of each lineage tree') parser.add_argument('--reranker-weights', type=str, dest='reranker_weights', help='file containing the learned reranker weights') parser.add_argument('-o', required=True, type=str, dest='out_file', help='Name of the file the plot is saved to') options = parser.parse_args() # load precisions = np.loadtxt(options.precisions_filename) tracks, divisions, lineage_trees = trackingfeatures.load_lineage_dump(options.lineage_filename) print("Found {} tracks, {} divisions and {} lineage trees".format(len(tracks), len(divisions), len(lineage_trees))) weights = np.loadtxt(options.reranker_weights) means = np.loadtxt(os.path.splitext(options.reranker_weights)[0] + '_means.txt') variances = np.loadtxt(os.path.splitext(options.reranker_weights)[0] + '_variances.txt') # compute scores scores = [] for lt in lineage_trees: feat_vec = np.expand_dims(lt.get_expanded_feature_vector([-1, 2]), axis=1) structsvm.utils.apply_feature_normalization(feat_vec, means, variances) score = np.dot(weights, feat_vec[:, 0]) scores.append(score) filename, extension = os.path.splitext(options.out_file) prec_score_pairs = zip(list(precisions), scores) prec_score_pairs.sort(key=lambda x: x[1], reverse=True) # sort by score sorted_precs, sorted_scores = zip(*prec_score_pairs) threshold = 0.9 def filter_precs(threshold, precs): precs = list(precs) i = 0 num_below_thresh = sum(np.array(precs) < threshold) for c in range(len(precs)): if precs[c] < threshold: i+=1 precs[c] = 1.0 - float(i) / num_below_thresh return precs sorted_precs = filter_precs(threshold, sorted_precs) # plot only outlier svm score (averaged over lineage) vs precision outlier_svm_scores = [] track_outlier_feature_idx = trackingfeatures.LineagePart.feature_to_weight_idx('track_outlier_svm_score') div_outlier_feature_idx = trackingfeatures.LineagePart.feature_to_weight_idx('div_outlier_svm_score') for lt in lineage_trees: fv = lt.get_feature_vector() outlier_svm_scores.append(fv[track_outlier_feature_idx] + fv[div_outlier_feature_idx]) prec_outlier_pairs = zip(list(precisions), outlier_svm_scores) prec_outlier_pairs.sort(key=lambda x: x[1], reverse=True) # sort by outlier_svm_score o_sorted_precs, sorted_outliers = zip(*prec_outlier_pairs) o_sorted_precs = filter_precs(threshold, o_sorted_precs) plt.figure() plt.hold(True) plt.plot(sorted_precs,label='ordered by score') plt.plot(o_sorted_precs,label='ordered by outlier-svm') plt.xlabel("ordered paths") plt.ylabel("Precision") #plt.savefig(filename + "_outlier_score" + extension) plt.legend() plt.savefig(options.out_file) # # scatter plot # plt.figure() # plt.hold(True) # plt.scatter(precisions, scores) # plt.xlabel("Precision") # plt.ylabel("Score") # plt.savefig(options.out_file) # # length histogram # plt.figure() # plt.hist(lengths, 100) # plt.xlabel("Length") # plt.ylabel("Frequency") # plt.savefig(filename + "_length_histo" + extension) # # sort according to precision and plot again # # log_scores = map(math.log, scores) # prec_score_pairs = zip(list(precisions), scores, num_divs, num_tracks, lengths) # prec_score_pairs.sort(key=lambda x: x[1]) # sort by score # plt.figure() # plt.plot(range(len(prec_score_pairs)), zip(*prec_score_pairs)[0]) # plt.ylabel("Precision") # plt.xlabel("Num Tracks, sorted by score") # plt.savefig(filename + "_sorted_num_tracks" + extension) # plt.figure() # plt.hold(True) # plt.plot(zip(*prec_score_pairs)[1], zip(*prec_score_pairs)[0]) # plt.ylabel("Precision") # plt.xlabel("Score") # plt.savefig(filename + "_sorted" + extension) # plt.figure() # plt.hold(True) # plt.scatter(zip(*prec_score_pairs)[2], zip(*prec_score_pairs)[1], c='b', label='Num divisions') # plt.scatter(zip(*prec_score_pairs)[3], zip(*prec_score_pairs)[1], c='r', label='Num tracks') # # plt.plot(zip(*prec_score_pairs)[4], zip(*prec_score_pairs)[1], c='g', label='overall lengths') # plt.xlabel("Length") # plt.ylabel("Score") # plt.legend() # plt.savefig(filename + "_length_score" + extension) # plt.figure() # plt.hold(True) # plt.scatter(zip(*prec_score_pairs)[2], zip(*prec_score_pairs)[0], c='b', label='Num divisions') # plt.scatter(zip(*prec_score_pairs)[3], zip(*prec_score_pairs)[0], c='r', label='Num tracks') # # plt.scatter(zip(*prec_score_pairs)[4], zip(*prec_score_pairs)[0], c='g', label='overall lengths') # plt.xlabel("Length") # plt.ylabel("Precision") # plt.legend() # plt.savefig(filename + "_length_precision" + extension)
Python
0.000013
@@ -2374,16 +2374,20 @@ d = 0.9%0A + def filt @@ -2414,16 +2414,20 @@ precs):%0A + prec @@ -2450,14 +2450,22 @@ + + i = 0%0A + @@ -2516,24 +2516,28 @@ eshold)%0A + + for c in ran @@ -2560,16 +2560,20 @@ + if precs @@ -2605,13 +2605,21 @@ + + i+=1%0A + @@ -2667,16 +2667,20 @@ _thresh%0A + retu @@ -3814,32 +3814,78 @@ ptions.out_file) +%0A print(%22Saved figure %22, options.out_file) %0A%0A # # scatte @@ -5894,20 +5894,21 @@ cision%22 + extension) +%0A
3747f72e81a3c143145dcbbdcfbfc13b292f19e1
add filter plot test
neurodsp/tests/test_plts_filt.py
neurodsp/tests/test_plts_filt.py
""" test_burst.py Test burst detection functions """ import os import numpy as np import neurodsp from .util import _load_example_data def test_detect_bursts_dual_threshold(): """ Confirm consistency in burst detection results on a generated neural signal """ # Load data and ground-truth filtered signal sig = _load_example_data(data_idx=1) fs = 1000 f_range = (13, 30) # Load past burst findings bursting_true = np.load(os.path.dirname(neurodsp.__file__) + '/tests/data/sample_data_1_burst_deviation.npy') # Detect bursts with different algorithms bursting = neurodsp.detect_bursts_dual_threshold(sig, fs, f_range, (0.9, 2)) assert np.isclose(np.sum(bursting - bursting_true), 0)
Python
0
@@ -2,20 +2,24 @@ %22%22%0Atest_ -burs +plts_fil t.py%0ATes @@ -24,49 +24,29 @@ est -burst detection function +filtering plot s%0A%22%22%22%0A%0A -import os%0A impo @@ -60,22 +60,20 @@ y as np%0A -import +from neurods @@ -77,27 +77,66 @@ odsp -%0Afrom .ut +.filt import filter_signal%0Afrom neurodsp.plts.f il +t import _loa @@ -135,157 +135,132 @@ ort -_load_example_data%0A%0A%0Adef test_detect_bursts_dual_threshold():%0A %22%22%22%0A Confirm consistency in burst detection results on a generated neural signal +plot_frequency_response%0A%0A%0Adef test_plot_frequency_response():%0A %22%22%22%0A Confirm frequency response plotting function works %0A @@ -268,56 +268,56 @@ %22%22%22%0A +%0A # -Load data and ground-tru +Test plotting through th +e filter -ed signal + function %0A @@ -327,434 +327,284 @@ g = -_load_example_data(data_idx=1)%0A fs = 1000%0A f_range = (13, 30)%0A%0A # Load past burst findings%0A bursting_true = np.load(os.path.dirname(neurodsp.__file__) +%0A '/tests/data/sample_data_1_burst_deviation.npy')%0A%0A # Detect bursts with different algorithms%0A bursting = neurodsp.detect_bursts_dual_threshold(sig, fs, f_range, (0.9, 2))%0A%0A assert np.isclose(np.sum(bursting - bursting_true), 0) +np.random.randn(2000)%0A fs = 1000%0A sig_filt, kernel = filter_signal(sig, fs, 'bandpass', (8, 12),%0A plot_freq_response=True, return_kernel=True, verbose=False)%0A%0A # Test calling frequency response plot directly%0A plot_frequency_response(fs, kernel)%0A assert True %0A
3e8bfa86026c9c99d697049acbbfac640bca8af8
Fix test so as to also pass in debug mode
Lib/test/test_cmd_line.py
Lib/test/test_cmd_line.py
# Tests invocation of the interpreter with various command line arguments # All tests are executed with environment variables ignored # See test_cmd_line_script.py for testing of script execution import test.support, unittest import os import sys import subprocess def _spawn_python(*args): cmd_line = [sys.executable] # When testing -S, we need PYTHONPATH to work (see test_site_flag()) if '-S' not in args: cmd_line.append('-E') cmd_line.extend(args) return subprocess.Popen(cmd_line, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) def _kill_python(p): return _kill_python_and_exit_code(p)[0] def _kill_python_and_exit_code(p): p.stdin.close() data = p.stdout.read() p.stdout.close() # try to cleanup the child so we don't appear to leak when running # with regrtest -R. This should be a no-op on Windows. subprocess._cleanup() returncode = p.wait() return data, returncode class CmdLineTest(unittest.TestCase): def start_python(self, *args): return self.start_python_and_exit_code(*args)[0] def start_python_and_exit_code(self, *args): p = _spawn_python(*args) return _kill_python_and_exit_code(p) def exit_code(self, *args): cmd_line = [sys.executable, '-E'] cmd_line.extend(args) return subprocess.call(cmd_line, stdout=subprocess.PIPE, stderr=subprocess.PIPE) def test_directories(self): self.assertNotEqual(self.exit_code('.'), 0) self.assertNotEqual(self.exit_code('< .'), 0) def verify_valid_flag(self, cmd_line): data = self.start_python(cmd_line) self.assertTrue(data == b'' or data.endswith(b'\n')) self.assertTrue(b'Traceback' not in data) def test_optimize(self): self.verify_valid_flag('-O') self.verify_valid_flag('-OO') def test_q(self): self.verify_valid_flag('-Qold') self.verify_valid_flag('-Qnew') self.verify_valid_flag('-Qwarn') self.verify_valid_flag('-Qwarnall') def test_site_flag(self): if os.name == 'posix': # Workaround bug #586680 by adding the extension dir to PYTHONPATH from distutils.util import get_platform s = "./build/lib.%s-%.3s" % (get_platform(), sys.version) if hasattr(sys, 'gettotalrefcount'): s += '-pydebug' p = os.environ.get('PYTHONPATH', '') if p: p += ':' os.environ['PYTHONPATH'] = p + s self.verify_valid_flag('-S') def test_usage(self): self.assertTrue(b'usage' in self.start_python('-h')) def test_version(self): version = ('Python %d.%d' % sys.version_info[:2]).encode("ascii") self.assertTrue(self.start_python('-V').startswith(version)) def test_verbose(self): # -v causes imports to write to stderr. If the write to # stderr itself causes an import to happen (for the output # codec), a recursion loop can occur. data, rc = self.start_python_and_exit_code('-v') self.assertEqual(rc, 0) self.assertTrue(b'stack overflow' not in data) data, rc = self.start_python_and_exit_code('-vv') self.assertEqual(rc, 0) self.assertTrue(b'stack overflow' not in data) def test_run_module(self): # Test expected operation of the '-m' switch # Switch needs an argument self.assertNotEqual(self.exit_code('-m'), 0) # Check we get an error for a nonexistent module self.assertNotEqual( self.exit_code('-m', 'fnord43520xyz'), 0) # Check the runpy module also gives an error for # a nonexistent module self.assertNotEqual( self.exit_code('-m', 'runpy', 'fnord43520xyz'), 0) # All good if module is located and run successfully self.assertEqual( self.exit_code('-m', 'timeit', '-n', '1'), 0) def test_run_module_bug1764407(self): # -m and -i need to play well together # Runs the timeit module and checks the __main__ # namespace has been populated appropriately p = _spawn_python('-i', '-m', 'timeit', '-n', '1') p.stdin.write(b'Timer\n') p.stdin.write(b'exit()\n') data = _kill_python(p) self.assertTrue(data.find(b'1 loop') != -1) self.assertTrue(data.find(b'__main__.Timer') != -1) def test_run_code(self): # Test expected operation of the '-c' switch # Switch needs an argument self.assertNotEqual(self.exit_code('-c'), 0) # Check we get an error for an uncaught exception self.assertNotEqual( self.exit_code('-c', 'raise Exception'), 0) # All good if execution is successful self.assertEqual( self.exit_code('-c', 'pass'), 0) # Test handling of non-ascii data if sys.getfilesystemencoding() != 'ascii': command = "assert(ord('\xe9') == 0xe9)" self.assertEqual( self.exit_code('-c', command), 0) def test_unbuffered_output(self): # Test expected operation of the '-u' switch for stream in ('stdout', 'stderr'): # Binary is unbuffered code = ("import os, sys; sys.%s.buffer.write(b'x'); os._exit(0)" % stream) data, rc = self.start_python_and_exit_code('-u', '-c', code) self.assertEqual(rc, 0) self.assertEqual(data, b'x', "binary %s not unbuffered" % stream) # Text is line-buffered code = ("import os, sys; sys.%s.write('x\\n'); os._exit(0)" % stream) data, rc = self.start_python_and_exit_code('-u', '-c', code) self.assertEqual(rc, 0) self.assertEqual(data.strip(), b'x', "text %s not line-buffered" % stream) def test_unbuffered_input(self): # sys.stdin still works with '-u' code = ("import sys; sys.stdout.write(sys.stdin.read(1))") p = _spawn_python('-u', '-c', code) p.stdin.write(b'x') p.stdin.flush() data, rc = _kill_python_and_exit_code(p) self.assertEqual(rc, 0) self.assertEqual(data.strip(), b'x') def test_main(): test.support.run_unittest(CmdLineTest) test.support.reap_children() if __name__ == "__main__": test_main()
Python
0
@@ -6378,37 +6378,33 @@ self.assert -Equal +_ (data.st rip(), b'x') @@ -6391,27 +6391,36 @@ (data.st -rip(), b'x' +artswith(b'x'), data )%0A%0A%0Adef
b3e04b7d4c395fdc2ebe1c0e140516d2c1f57a3a
Fix migration to work with Django 1.10
casepro/statistics/migrations/0010_existing_case_timings_count.py
casepro/statistics/migrations/0010_existing_case_timings_count.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from math import ceil from django.db import migrations, models def calculate_totals_for_cases(apps, schema_editor): from casepro.statistics.models import datetime_to_date Case = apps.get_model('cases', 'Case') CaseAction = apps.get_model('cases', 'CaseAction') Outgoing = apps.get_model('msgs', 'Outgoing') DailySecondTotalCount = apps.get_model('statistics', 'DailySecondTotalCount') qs = Case.objects.all().order_by('id') for case in qs: partner = case.assignee if case.closed_on is not None: # we only consider the first time a case was closed, not any subsequent reopenings close_action = case.actions.filter(action='C').earliest('created_on') org = close_action.case.org user = close_action.created_by partner = close_action.case.assignee case = close_action.case day = datetime_to_date(close_action.created_on, close_action.case.org) # count the time to close on an org level td = close_action.created_on - case.opened_on seconds_since_open = ceil(td.total_seconds()) DailySecondTotalCount.objects.create(day=day, item_type='C', scope='org:%d' % org.pk, count=1, seconds=seconds_since_open) # count the time since case was last assigned to this partner till it was closed if user.partners.filter(id=partner.id).exists(): # count the time since this case was (re)assigned to this partner try: action = case.actions.filter(action='A', assignee=partner).latest('created_on') start_date = action.created_on except CaseAction.DoesNotExist: start_date = case.opened_on td = close_action.created_on - start_date seconds_since_open = ceil(td.total_seconds()) DailySecondTotalCount.objects.create(day=day, item_type='C', scope='partner:%d' % partner.pk, count=1, seconds=seconds_since_open) # check if responded to if case.outgoing_messages.exists(): # count the first reponse at an org level first_response = case.outgoing_messages.earliest('created_on') day = datetime_to_date(first_response.created_on, case.org) td = first_response.created_on - case.opened_on seconds_since_open = ceil(td.total_seconds()) DailySecondTotalCount.objects.create(day=day, item_type='A', scope='org:%d' % case.org.pk, count=1, seconds=seconds_since_open) try: first_response = case.outgoing_messages.filter(partner=partner).earliest('created_on') except Outgoing.DoesNotExist: continue day = datetime_to_date(first_response.created_on, case.org) # count the first response by this partner author_action = case.actions.filter(action='O').order_by('created_on').first() reassign_action = case.actions.filter(action='A', assignee=partner).order_by('created_on').first() if author_action and author_action.created_by.get_partner(org) != partner: # only count the time since this case was (re)assigned to this partner # or cases that were assigned during creation by another partner if reassign_action: start_date = reassign_action.created_on else: start_date = author_action.created_on td = first_response.created_on - start_date seconds_since_open = ceil(td.total_seconds()) DailySecondTotalCount.objects.create(day=day, item_type='A', scope='partner:%d' % partner.pk, count=1, seconds=seconds_since_open) def remove_totals_for_cases(apps, schema_editor): DailySecondTotalCount = apps.get_model('statistics', 'DailySecondTotalCount') db_alias = schema_editor.connection.alias DailySecondTotalCount.objects.using(db_alias).filter(item_type='A').delete() DailySecondTotalCount.objects.using(db_alias).filter(item_type='C').delete() class Migration(migrations.Migration): dependencies = [ ('statistics', '0009_dailysecondtotalcount'), ('cases', '0042_auto_20160805_1003'), ] operations = [ # migrations.RunPython(calculate_totals_for_cases, remove_totals_for_cases), # the reverse migration is commented out because it could remove data created after this migration was run, # so it should only be used when you know it will do what you want it to do migrations.RunPython(calculate_totals_for_cases), ]
Python
0.000001
@@ -123,16 +123,111 @@ odels%0A%0A%0A +def get_partner(org, user):%0A return user.partners.filter(org=org, is_active=True).first()%0A%0A%0A def calc @@ -3442,16 +3442,33 @@ tion and + get_partner(org, author_ @@ -3488,24 +3488,8 @@ d_by -.get_partner(org ) !=
d3a57fafbbee1959d5369311f53e8fa8166b78ae
Fix typos
rnacentral_pipeline/databases/data/databases.py
rnacentral_pipeline/databases/data/databases.py
# -*- coding: utf-8 -*- from __future__ import annotations """ Copyright [2009-2018] EMBL-European Bioinformatics Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import enum import typing as ty class DatabaseValue(ty.NamedTuple): id: int pretty: str @enum.unique class Database(enum.Enum): """ This is an enum that is used to represent the databases that RNAcentral knows about. """ crw = DatabaseValue(0, "CRW") dictybase = DatabaseValue(1, "DictyBase") ena = DatabaseValue(2, "ENA") ensembl = DatabaseValue(3, "Ensembl") ensembl_fungi = DatabaseValue(4, "Ensmbl Fungi") ensembl_metazoa = DatabaseValue(5, "Ensmbl Metazoa") ensembl_plants = DatabaseValue(6, "Ensembl Plants") ensembl_protists = DatabaseValue(7, "Ensembl Protists") five_srrnadb = DatabaseValue(8, "5SrRNAdb") flybase = DatabaseValue(9, "FlyBase") gencode = DatabaseValue(10, "Ensembl/GENCODE") genecards = DatabaseValue(11, "GeneCards") greengenes = DatabaseValue(12, "GtRNAdb") gtrnadb = DatabaseValue(13, "GeneCards") hgnc = DatabaseValue(14, "HGNC") intact = DatabaseValue(15, "IntAct") lncbase = DatabaseValue(16, "LncBase") lncbook = DatabaseValue(17, "LncBook") lncipedia = DatabaseValue(18, "LNCipedia") lncrnadb = DatabaseValue(19, "lncRNAdb") malacards = DatabaseValue(20, "MalaCards") mgi = DatabaseValue(21, "MGI") mirbase = DatabaseValue(22, "miRBase") mirgenedb = DatabaseValue(23, "MirGeneDB") modomics = DatabaseValue(24, "Modomics") noncode = DatabaseValue(25, "NONCODE") pdbe = DatabaseValue(26, "PDBe") pirbase = DatabaseValue(27, "PirBase") pombase = DatabaseValue(28, "PomBase") rdp = DatabaseValue(29, "RDP") refseq = DatabaseValue(30, "RefSeq") rfam = DatabaseValue(31, "Rfam") rgd = DatabaseValue(32, "RGD") sgd = DatabaseValue(33, "SGD") silva = DatabaseValue(34, "SILVA") snodb = DatabaseValue(35, "snoDB") snopy = DatabaseValue(36, "snOPY") snorna_database = DatabaseValue(37, "snoRNA Database") srpdb = DatabaseValue(38, "SRPDB") tair = DatabaseValue(39, "TAIR") tarbase = DatabaseValue(40, "TarBase") tmrna_website = DatabaseValue(41, "tmRNA Website") vega = DatabaseValue(42, "VEGA") wormbase = DatabaseValue(43, "WormBase") zfin = DatabaseValue(44, "Zfin") zwd = DatabaseValue(45, "ZWD") @classmethod def build(cls, name: str) -> Database: if isinstance(name, cls): return name attribute = name.lower().replace(" ", "_") if hasattr(cls, attribute): return getattr(cls, attribute) if name == "pdb": return cls.pdbe if name == "tmrna-website" or attribute == "tmrna_web": return cls.tmrna_website if name == "snopydb": return cls.snopy if attribute == "ensembl/gencode" or attribute == "ensembl_gencode": return cls.gencode if attribute == "5srrnadb": return cls.five_srrnadb if attribute == "snornadb": return cls.snorna_database raise ValueError("Unknown database name %s" % name) def normalized(self) -> str: if self is Database.gencode: return "ENSEMBL_GENCODE" return self.name.upper().replace(" ", "_") def index(self) -> int: return self.value.id def pretty(self) -> str: return self.value.pretty
Python
0.999999
@@ -1085,24 +1085,25 @@ alue(4, %22Ens +e mbl Fungi%22)%0A @@ -1145,16 +1145,17 @@ (5, %22Ens +e mbl Meta
d57c3ad63b737fda4632f5896c8049329bcd4fe2
Make this test work under Windows as well.
Lib/test/test_fpformat.py
Lib/test/test_fpformat.py
''' Tests for fpformat module Nick Mathewson ''' from test_support import run_unittest import unittest from fpformat import fix, sci, NotANumber StringType = type('') # Test the old and obsolescent fpformat module. # # (It's obsolescent because fix(n,d) == "%.*f"%(d,n) and # sci(n,d) == "%.*e"%(d,n) # for all reasonable numeric n and d, except that sci gives 3 exponent # digits instead of 2. # # Differences only occur for unreasonable n and d. <.2 wink>) class FpformatTest(unittest.TestCase): def checkFix(self, n, digits): result = fix(n, digits) if isinstance(n, StringType): n = repr(n) expected = "%.*f" % (digits, float(n)) self.assertEquals(result, expected) def checkSci(self, n, digits): result = sci(n, digits) if isinstance(n, StringType): n = repr(n) expected = "%.*e" % (digits, float(n)) # add the extra 0 expected = expected[:-2]+'0'+expected[-2:] self.assertEquals(result, expected) def test_basic_cases(self): self.assertEquals(fix(100.0/3, 3), '33.333') self.assertEquals(sci(100.0/3, 3), '3.333e+001') def test_reasonable_values(self): for d in range(7): for val in (1000.0/3, 1000, 1000.0, .002, 1.0/3, 1e10): for realVal in (val, 1.0/val, -val, -1.0/val): self.checkFix(realVal, d) self.checkSci(realVal, d) def test_failing_values(self): # Now for 'unreasonable n and d' self.assertEquals(fix(1.0, 1000), '1.'+('0'*1000)) self.assertEquals(sci("1"+('0'*1000), 0), '1e+1000') # This behavior is inconsistent. sci raises an exception; fix doesn't. yacht = "Throatwobbler Mangrove" self.assertEquals(fix(yacht, 10), yacht) try: sci(yacht, 10) except NotANumber: pass else: self.fail("No exception on non-numeric sci") run_unittest(FpformatTest)
Python
0.000001
@@ -959,59 +959,163 @@ ra 0 -%0A expected = expected%5B:-2%5D+'0'+expected%5B-2:%5D + if needed%0A num, exp = expected.split(%22e%22)%0A if len(exp) %3C 4:%0A exp = exp%5B0%5D + %220%22 + exp%5B1:%5D%0A expected = %22%25se%25s%22 %25 (num, exp) %0A%0A
728012090d3f24411e460b99f68f9b5754d38480
Handle character substitution in html formatter
npc/formatters/html.py
npc/formatters/html.py
""" Markdown formatter for creating a page of characters. Has a single entry point `dump`. """ import html import markdown import tempfile from .. import util from mako.template import Template def dump(characters, outstream, *, include_metadata=None, metadata=None, prefs=None): """ Create a markdown character listing Args: characters (list): Character info dicts to show outstream (stream): Output stream include_metadata (string|None): Whether to include metadata, and what format to use.What kind of metadata to include, if any. Accepts values of 'mmd', 'yaml', or 'yfm'. Metadata will always include a title and creation date. metadata (dict): Additional metadata to insert. Ignored unless include_metadata is set. The keys 'title', and 'created' will overwrite the generated values for those keys. prefs (Settings): Settings object. Used to get the location of template files. Returns: A util.Result object. Openable will not be set. """ if not metadata: metadata = {} if include_metadata: # load and render template header_file = prefs.get("templates.listing.header.{}".format(include_metadata)) if not header_file: return util.Result( False, errmsg="Unrecognized metadata format option '{}'".format(include_metadata), errcode=6) header_template = Template(filename=header_file) outstream.write(header_template.render(metadata=metadata)) else: outstream.write("<!DOCTYPE html>\n<html>\n<head></head>\n<body>\n") with tempfile.TemporaryDirectory() as tempdir: for char in characters: body_file = prefs.get("templates.listing.character.html.{}".format(char.get_type_key())) if not body_file: body_file = prefs.get("templates.listing.character.html.default") body_template = Template(filename=body_file, module_directory=tempdir) outstream.write( markdown.markdown( body_template.render( character=char.copy_and_alter(html.escape)), ['markdown.extensions.extra'] )) outstream.write("</body>\n</html>\n") return util.Result(True)
Python
0.000007
@@ -90,16 +90,30 @@ %60.%0A%22%22%22%0A%0A +import codecs%0A import h @@ -1141,16 +1141,256 @@ a = %7B%7D%0A%0A + # encode as ascii unless our stream has an opinion%0A try:%0A encoding = outstream.encoding%0A except AttributeError:%0A encoding = 'ascii'%0A%0A modstream = codecs.getwriter(encoding)(outstream, errors='xmlcharrefreplace')%0A%0A if i @@ -1789,35 +1789,35 @@ r_file)%0A -out +mod stream.write(hea @@ -1866,35 +1866,35 @@ else:%0A -out +mod stream.write(%22%3C! @@ -2330,27 +2330,27 @@ -out +mod stream.write @@ -2566,27 +2566,27 @@ ))%0A -out +mod stream.write
5f20029756f76380662684969a0235935cfb5f73
Add customer to filter fields list.
nodeconductor/openstack/views.py
nodeconductor/openstack/views.py
import django_filters from rest_framework import viewsets from nodeconductor.core import filters as core_filters from nodeconductor.structure import views as structure_views from nodeconductor.openstack import models, serializers class OpenStackServiceFilter(django_filters.FilterSet): name = django_filters.CharFilter( name='settings__name' ) customer = django_filters.CharFilter( name='settings__customer__uuid' ) customer_url = core_filters.URLFilter( viewset=structure_views.CustomerViewSet, name='settings__customer__uuid' ) class Meta(object): model = models.OpenStackService fields = [ 'name', 'customer_url', ] class OpenStackServiceViewSet(structure_views.BaseServiceViewSet): queryset = models.OpenStackService.objects.all() serializer_class = serializers.ServiceSerializer import_serializer_class = serializers.InstanceImportSerializer filter_class = OpenStackServiceFilter class OpenStackServiceProjectLinkFilter(django_filters.FilterSet): service = django_filters.CharFilter( name='service__uuid' ) service_url = core_filters.URLFilter( viewset=OpenStackServiceViewSet, name='service__uuid' ) project = django_filters.CharFilter( name='project__uuid' ) project_url = core_filters.URLFilter( viewset=structure_views.ProjectViewSet, name='project__uuid' ) class Meta(object): model = models.OpenStackServiceProjectLink fields = [ 'service', 'service_url', 'project', 'project_url' ] class OpenStackServiceProjectLinkViewSet(structure_views.BaseServiceProjectLinkViewSet): queryset = models.OpenStackServiceProjectLink.objects.all() serializer_class = serializers.ServiceProjectLinkSerializer filter_class = OpenStackServiceProjectLinkFilter class FlavorViewSet(viewsets.ReadOnlyModelViewSet): queryset = models.Flavor.objects.all() serializer_class = serializers.FlavorSerializer lookup_field = 'uuid' class ImageViewSet(viewsets.ReadOnlyModelViewSet): queryset = models.Image.objects.all() serializer_class = serializers.ImageSerializer lookup_field = 'uuid' class InstanceFilter(django_filters.FilterSet): project = django_filters.CharFilter( name='service_project_link__project__uuid', lookup_type='icontains', distinct=True) project_name = django_filters.CharFilter( name='service_project_link__project__name', lookup_type='icontains', distinct=True) project_group_name = django_filters.CharFilter( name='service_project_link__project__project_groups__name', lookup_type='icontains', distinct=True) project_group = django_filters.CharFilter( name='service_project_link__project__project_groups__uuid', distinct=True) customer = django_filters.CharFilter( name='service_project_link__project__customer__uuid', distinct=True) customer_name = django_filters.CharFilter( name='service_project_link__project__customer__name', lookup_type='icontains', distinct=True) customer_native_name = django_filters.CharFilter( name='service_project_link__project__customer__native_name', lookup_type='icontains', distinct=True) customer_abbreviation = django_filters.CharFilter( name='service_project_link__project__customer__abbreviation', lookup_type='icontains', distinct=True) name = django_filters.CharFilter(lookup_type='icontains') description = django_filters.CharFilter(lookup_type='icontains') state = django_filters.NumberFilter() # In order to return results when an invalid value is specified strict = False class Meta(object): model = models.Instance fields = [ 'name', 'description', 'customer', 'customer_name', 'customer_native_name', 'customer_abbreviation', 'project', 'project_name', 'project_group_name', 'project_group', 'state', 'start_time', 'created', 'ram', 'cores', 'system_volume_size', 'data_volume_size', ] order_by = [ 'name', '-name', 'state', '-state', 'service_project_link__project__customer__name', '-service_project_link__project__customer__name', 'service_project_link__project__customer__native_name', '-service_project_link__project__customer__native_name', 'service_project_link__project__customer__abbreviation', '-service_project_link__project__customer__abbreviation', 'service_project_link__project__name', '-service_project_link__project__name', 'service_project_link__project__project_groups__name', '-service_project_link__project__project_groups__name', 'created', '-created', 'ram', '-ram', 'cores', '-cores', 'system_volume_size', '-system_volume_size', 'data_volume_size', '-data_volume_size', ] order_by_mapping = { # Proper field naming 'customer_name': 'service_project_link__project__customer__name', 'customer_native_name': 'service_project_link__project__customer__native_name', 'customer_abbreviation': 'service_project_link__project__customer__abbreviation', 'project_name': 'service_project_link__project__name', 'project_group_name': 'service_project_link__project__project_groups__name', # Backwards compatibility 'project__customer__name': 'service_project_link__project__customer__name', 'project__name': 'service_project_link__project__name', 'project__project_groups__name': 'service_project_link__project__project_groups__name', } class InstanceViewSet(structure_views.BaseResourceViewSet): queryset = models.Instance.objects.all() serializer_class = serializers.InstanceSerializer filter_class = InstanceFilter def perform_provision(self, serializer): resource = serializer.save() backend = resource.get_backend() backend.provision( resource, flavor=serializer.validated_data['flavor'], image=serializer.validated_data['image'], ssh_key=serializer.validated_data.get('ssh_public_key'))
Python
0.000001
@@ -679,32 +679,55 @@ 'name',%0A + 'customer'%0A 'cus
d3c7f5de6a4c1d15ab3ffe19da18faaecd466fb6
replace mysteriously missing haystack settings from staging
tndata_backend/tndata_backend/settings/staging.py
tndata_backend/tndata_backend/settings/staging.py
from .base import * DEBUG = False #DEBUG = True STAGING = True # Site's FQDN and URL. For building links in email. SITE_DOMAIN = "staging.tndata.org" SITE_URL = "https://{0}".format(SITE_DOMAIN) INSTALLED_APPS = INSTALLED_APPS + ( 'debug_toolbar', 'querycount', ) # Just like production, but without the cached template loader TEMPLATES[0]['OPTIONS']['debug'] = DEBUG TEMPLATES[0]['OPTIONS']['loaders'] = [ 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ] # django-cors-headers: https://github.com/ottoyiu/django-cors-headers/ CORS_ORIGIN_ALLOW_ALL = True # EMAIL via Mailgun. Production server details, below (staging.tndata.org) EMAIL_SUBJECT_PREFIX = "[Staging TNData] " EMAIL_HOST = 'smtp.mailgun.org' EMAIL_HOST_USER = 'postmaster@sandbox4dc4d62d8cf24785914c55630ab480e6.mailgun.org' EMAIL_HOST_PASSWORD = 'ac2a70a9988127ff7fa217f559c2d59a' EMAIL_PORT = '587' EMAIL_USE_TLS = True EMAIL_USE_SSL = False # Caching # Redis notes: redis_max_clients: 10000, edis_max_memory: 512mb REDIS_PASSWORD = 'VPoDYBZgeyktxArddu4EHrNMdFsUzf7TtFKTP' REDIS_HOST = 'worker.tndata.org' REDIS_CACHE_DB = 2 REDIS_CACHE_URL = 'redis://:{password}@{host}:{port}/{db}'.format( password=REDIS_PASSWORD, host=REDIS_HOST, port=REDIS_PORT, db=REDIS_CACHE_DB ) CACHES['default']['LOCATION'] = REDIS_CACHE_URL CACHES['default']['OPTIONS']['IGNORE_EXCEPTIONS'] = True # django-cacheops CACHEOPS_REDIS = { 'host': REDIS_HOST, 'port': REDIS_PORT, 'db': REDIS_CACHE_DB, 'socket_timeout': 5, 'password': REDIS_PASSWORD, } # Explicit setting for debug_toolbar DEBUG_TOOLBAR_PATCH_SETTINGS = False MIDDLEWARE_CLASSES = ( 'querycount.middleware.QueryCountMiddleware', 'debug_toolbar.middleware.DebugToolbarMiddleware', ) + MIDDLEWARE_CLASSES INTERNAL_IPS = ( '159.203.68.206', '127.0.0.1', '::1', )
Python
0.000001
@@ -1424,16 +1424,197 @@ = True%0A%0A +# django-haystack settings for staging%0AHAYSTACK_CONNECTIONS%5B'default'%5D%5B'URL'%5D = 'http://worker.tndata.org:9200/'%0AHAYSTACK_CONNECTIONS%5B'default'%5D%5B'INDEX_NAME'%5D = 'haystack_staging'%0A%0A # django
cba82ad3bc1a726402e4193aec8a49a85f9999f0
Add an 'if 0''d block of code to numpy.distutils.log to ignore some log messages. Especially useful to turn on if you're developing by using eggs.
numpy/distutils/log.py
numpy/distutils/log.py
# Colored log, requires Python 2.3 or up. import sys from distutils.log import * from distutils.log import Log as old_Log from distutils.log import _global_log from misc_util import red_text, yellow_text, cyan_text, green_text, is_sequence, is_string def _fix_args(args,flag=1): if is_string(args): return args.replace('%','%%') if flag and is_sequence(args): return tuple([_fix_args(a,flag=0) for a in args]) return args class Log(old_Log): def _log(self, level, msg, args): if level >= self.threshold: if args: print _global_color_map[level](msg % _fix_args(args)) else: print _global_color_map[level](msg) sys.stdout.flush() def good(self, msg, *args): """If we'd log WARN messages, log this message as a 'nice' anti-warn message. """ if WARN >= self.threshold: if args: print green_text(msg % _fix_args(args)) else: print green_text(msg) sys.stdout.flush() _global_log.__class__ = Log good = _global_log.good def set_threshold(level, force=False): prev_level = _global_log.threshold if prev_level > DEBUG or force: # If we're running at DEBUG, don't change the threshold, as there's # likely a good reason why we're running at this level. _global_log.threshold = level if level <= DEBUG: info('set_threshold: setting thershold to DEBUG level, it can be changed only with force argument') else: info('set_threshold: not changing thershold from DEBUG level %s to %s' % (prev_level,level)) return prev_level def set_verbosity(v, force=False): prev_level = _global_log.threshold if v < 0: set_threshold(ERROR, force) elif v == 0: set_threshold(WARN, force) elif v == 1: set_threshold(INFO, force) elif v >= 2: set_threshold(DEBUG, force) return {FATAL:-2,ERROR:-1,WARN:0,INFO:1,DEBUG:2}.get(prev_level,1) _global_color_map = { DEBUG:cyan_text, INFO:yellow_text, WARN:red_text, ERROR:red_text, FATAL:red_text } # don't use INFO,.. flags in set_verbosity, these flags are for set_threshold. set_verbosity(0, force=True)
Python
0
@@ -582,84 +582,236 @@ -print _global_color_map%5Blevel%5D(msg %25 _fix_args(args))%0A else:%0A +msg = msg %25 _fix_args(args)%0A if 0:%0A if msg.startswith('copying ') and msg.find(' -%3E ') != -1:%0A return%0A if msg.startswith('byte-compiling '):%0A return%0A
779bfca2e9fb39d29f644940a1ef5351ec4ced2b
handle fundamental parsing errors in emails
ingestors/email/msg.py
ingestors/email/msg.py
from __future__ import unicode_literals import six import rfc822 import logging from time import mktime from datetime import datetime from collections import defaultdict from normality import safe_filename from flanker import mime from flanker.addresslib import address from flanker.mime.message.errors import DecodingError from ingestors.base import Ingestor from ingestors.support.temp import TempFileSupport from ingestors.support.plain import PlainTextSupport from ingestors.support.html import HTMLSupport from ingestors.util import join_path log = logging.getLogger(__name__) class RFC822Ingestor(Ingestor, TempFileSupport, HTMLSupport, PlainTextSupport): MIME_TYPES = ['multipart/mixed'] EXTENSIONS = ['eml', 'rfc822', 'email', 'msg'] SCORE = 6 def write_temp(self, part, temp_dir, file_name): file_name = safe_filename(file_name, default='attachment') out_path = join_path(temp_dir, file_name) with open(out_path, 'wb') as fh: if part.body is not None: body = part.body if isinstance(body, six.text_type): body = body.encode('utf-8') fh.write(body) return out_path def parse_headers(self, msg): self.result.title = msg.subject if msg.message_id and self.result.id is None: self.result.id = six.text_type(msg.message_id) if msg.headers.get('From'): addr = address.parse(msg.headers.get('From')) if addr is not None: if addr.display_name and addr.display_name != addr.address: self.result.author = addr.display_name self.result.entities.append(addr.display_name) self.result.emails.append(addr.address) for hdr in ['To', 'CC', 'BCC']: if msg.headers.get(hdr): for addr in address.parse_list(msg.headers.get(hdr)): if addr.display_name and addr.display_name != addr.address: self.result.entities.append(addr.display_name) self.result.emails.append(addr.address) date = msg.headers.get('Date') date = rfc822.parsedate(date) if date is not None: self.result.timestamp = datetime.fromtimestamp(mktime(date)) self.result.headers = dict([(k, unicode(v)) for k, v in msg.headers.items()]) def ingest(self, file_path): with self.create_temp_dir() as temp_dir: with open(file_path, 'rb') as fh: self.ingest_message(fh.read(), temp_dir) def ingest_message(self, data, temp_dir): msg = mime.from_string(data) self.parse_headers(msg) self.extract_plain_text_content(None) bodies = defaultdict(list) for part in msg.walk(with_self=True): try: if part.body is None: continue except DecodingError: log.error("Cannot decode part: [%s]", self.result) continue file_name = part.detected_file_name mime_type = six.text_type(part.detected_content_type) mime_type = mime_type.lower().strip() if part.is_attachment(): out_path = self.write_temp(part, temp_dir, file_name) child_id = join_path(self.result.id, file_name) self.manager.handle_child(self.result, out_path, id=child_id, file_name=file_name, mime_type=mime_type) if part.is_body(): bodies[mime_type].append(part.body) if 'text/html' in bodies: self.extract_html_content('\n\n'.join(bodies['text/html'])) if 'text/plain' in bodies: self.extract_plain_text_content('\n\n'.join(bodies['text/plain']))
Python
0.000004
@@ -498,32 +498,78 @@ ort HTMLSupport%0A +from ingestors.exc import ProcessingException%0A from ingestors.u @@ -2712,24 +2712,41 @@ temp_dir):%0A + try:%0A msg @@ -2770,16 +2770,125 @@ g(data)%0A + except DecodingError as derr:%0A raise ProcessingException('Cannot parse email: %25s' %25 derr)%0A
fadfc73f101915a9d8a9f703e2bfab59bc16f631
Change language code.
src/oauau/settings.py
src/oauau/settings.py
""" Django settings for oauau project. Generated by 'django-admin startproject' using Django 1.8.4. For more information on this file, see https://docs.djangoproject.com/en/1.8/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.8/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'l0o2cer+vv8ufj5%swi2qumos7xhr&)1)!e!*qk1d=+vg_h*-j' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'oauau.newsletter', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', ) ROOT_URLCONF = 'oauau.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'templates')], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'oauau.wsgi.application' # Database # https://docs.djangoproject.com/en/1.8/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.8/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ STATIC_URL = '/static/' STATICFILES_DIRS = ( os.path.join(BASE_DIR, "static"), os.path.join(BASE_DIR, "media"), ) MAILCHIMP_API_KEY = '' MAILCHIMP_WORKBOOK_LIST_ID = 'b595af5d99' MAILCHIMP_WORKBOOK_LIST_NAME = 'O Au Au - Livro De Atividades 1' MAILCHIMP_LAUNCH_LIST_ID = 'a7fdd160e3' MAILCHIMP_LAUNCH_LIST_NAME = 'O Au Au - Lançamento' MADMIMI_USER = '' MADMIMI_API_KEY = '' MADMIMI_NEWSLETTER_LIST_ID = 'O Au Au' MADMIMI_NEWSLETTER_LIST_NAME = 'O Au Au' MADMIMI = 'madmimi' MAILCHIMP = 'mailchimp' CURRENT_EMAIL_MARKETING_PROVIDER = MAILCHIMP EMAIL_HOST = "" EMAIL_HOST_PASSWORD = "" EMAIL_HOST_USER = "" DEFAULT_FROM_EMAIL = "(O au au) " \ "<noreply@oauau.com.br>" try: from .developmentsettings import * except ImportError: from .productionsettings import *
Python
0.000003
@@ -2488,13 +2488,13 @@ = ' -en-us +pt-br '%0A%0AT
ec4c9a07dc5ca2fab6b341932f65d0cfbd6a332b
Bump version to 1.1
molly/__init__.py
molly/__init__.py
""" Molly Project http://mollyproject.org A framework for creating Mobile Web applications for HE/FE institutions. """ __version__ = '1.0'
Python
0
@@ -134,6 +134,6 @@ '1. -0 +1 '
75e61ecf5efebe78676512d714fc7551f3dfac4c
Fix test
src/program/lwaftr/tests/subcommands/generate_binding_table_test.py
src/program/lwaftr/tests/subcommands/generate_binding_table_test.py
""" Test uses "snabb lwaftr generate-binding-table" subcommand. Does not need NICs as it doesn't use any network functionality. The command is just to produce a binding table config result. """ from test_env import ENC, SNABB_CMD, BaseTestCase NUM_SOFTWIRES = 10 class TestGenerateBindingTable(BaseTestCase): generation_args = ( str(SNABB_CMD), 'lwaftr', 'generate-binding-table', '193.5.1.100', str(NUM_SOFTWIRES), 'fc00::100', 'fc00:1:2:3:4:5:0:7e', '1') def test_binding_table_generation(self): """ This runs the generate-binding-table subcommand and verifies that it gets back the number of softwires it expects. Usage can be found in the README; however, it's: <ipv4> <num_ipv4s> <br_address> <b4> <psid_len> <shift> """ # Get generate-binding-table command output. output = self.run_cmd(self.generation_args) # Split it into lines. config = str(output, ENC).split('\n')[:-1] # The output should be "binding-table {" followed by NUM_SOFTWIRES # softwires, then "}". self.assertIn('binding-table {', config[0], 'Start line: %s' % config[0]) for idx, softwire in enumerate(config[1:-1]): line_msg = 'Line #%d: %s' % (idx + 2, softwire) self.assertTrue(softwire.startswith(' softwire {'), line_msg) self.assertTrue(softwire.endswith('}'), line_msg) self.assertIn(config[-1], '}', 'End line: %s' % config[0]) # Check that the number of lines is the number of softwires # plus the start and end lines. self.assertEqual(len(config), NUM_SOFTWIRES + 2, len(config))
Python
0.000004
@@ -30,29 +30,29 @@ enerate- -binding-table +configuration %22 subcom @@ -375,29 +375,29 @@ enerate- -binding-table +configuration ', '193. @@ -564,29 +564,29 @@ enerate- -binding-table +configuration subcomm @@ -619,55 +619,49 @@ -it gets back the number of softwires it expects +the output contains a valid binding-table .%0A%0A @@ -816,29 +816,29 @@ enerate- -binding-table +configuration command @@ -996,198 +996,248 @@ # -The output should be %22binding-table %7B%22 followed by NUM_SOFTWIRES%0A # softwires, then %22%7D%22.%0A self.assertIn('binding-table %7B', config%5B0%5D,%0A 'Start line: %25s' %25 +Check out that output is softwire-config plus a binding-table.%0A self.assertIn('softwire-config %7B', config%5B0%5D.strip())%0A self.assertIn('binding-table %7B', config%5B1%5D.strip())%0A%0A lineno = 2%0A while lineno %3C len( config -%5B0%5D)%0A +): %0A @@ -1245,113 +1245,113 @@ -for idx, softwire in enumerate(config%5B1:-1%5D):%0A line_msg = 'Line #%25d: %25s' %25 (idx + 2, softwire) + line = config%5Blineno%5D.strip()%0A if not line.startswith('softwire %7B'):%0A break %0A @@ -1375,23 +1375,19 @@ ertTrue( -softwir +lin e.starts @@ -1392,18 +1392,16 @@ tswith(' - softwire @@ -1404,26 +1404,16 @@ wire %7B') -, line_msg )%0A @@ -1434,23 +1434,19 @@ ertTrue( -softwir +lin e.endswi @@ -1456,275 +1456,140 @@ '%7D') -, line_msg)%0A%0A self.assertIn(config%5B-1%5D, '%7D',%0A 'End line: %25s' %25 config%5B0%5D)%0A%0A # Check that the number of lines is the number of softwires%0A # plus the start and end lines.%0A self.assertEqual(len(config), NUM_SOFTWIRES + 2, len(config) +)%0A lineno = lineno + 1%0A%0A self.assertTrue(lineno %3C len(config))%0A self.assertTrue(config%5Blineno%5D.strip() == '%7D' )%0A
f3de5cae870f9df2435ae8587cc7d17d059728b1
Add test, fixed #1073.
playhouse/tests/test_speedups.py
playhouse/tests/test_speedups.py
import datetime import unittest from peewee import * from playhouse import _speedups as speedups from playhouse.tests.base import database_initializer from playhouse.tests.base import ModelTestCase db = database_initializer.get_in_memory_database(use_speedups=True) class BaseModel(Model): class Meta: database = db class Note(BaseModel): content = TextField() timestamp = DateTimeField(default=datetime.datetime.now) class TestResultWrappers(ModelTestCase): requires = [Note] def setUp(self): super(TestResultWrappers, self).setUp() for i in range(10): Note.create(content='note-%s' % i) def test_dirty_fields(self): note = Note.create(content='huey') self.assertFalse(note.is_dirty()) self.assertEqual(note.dirty_fields, []) ndb = Note.get(Note.content == 'huey') self.assertFalse(ndb.is_dirty()) self.assertEqual(ndb.dirty_fields, []) ndb.content = 'x' self.assertTrue(ndb.is_dirty()) self.assertEqual(ndb.dirty_fields, ['content']) def test_tuple_results(self): query = Note.select().order_by(Note.id).tuples() qr = query.execute() self.assertTrue(isinstance(qr, speedups._TuplesQueryResultWrapper)) results = list(qr) self.assertEqual(len(results), 10) first, last = results[0], results[-1] self.assertEqual(first[:2], (1, 'note-0')) self.assertEqual(last[:2], (10, 'note-9')) self.assertTrue(isinstance(first[2], datetime.datetime)) def test_dict_results(self): query = Note.select().order_by(Note.id).dicts() qr = query.execute() self.assertTrue(isinstance(qr, speedups._DictQueryResultWrapper)) results = list(qr) self.assertEqual(len(results), 10) first, last = results[0], results[-1] self.assertEqual(sorted(first.keys()), ['content', 'id', 'timestamp']) self.assertEqual(first['id'], 1) self.assertEqual(first['content'], 'note-0') self.assertTrue(isinstance(first['timestamp'], datetime.datetime)) self.assertEqual(last['id'], 10) self.assertEqual(last['content'], 'note-9') def test_model_results(self): query = Note.select().order_by(Note.id) qr = query.execute() self.assertTrue(isinstance(qr, speedups._ModelQueryResultWrapper)) results = list(qr) self.assertEqual(len(results), 10) first, last = results[0], results[-1] self.assertTrue(isinstance(first, Note)) self.assertEqual(first.id, 1) self.assertEqual(first.content, 'note-0') self.assertTrue(isinstance(first.timestamp, datetime.datetime)) self.assertEqual(last.id, 10) self.assertEqual(last.content, 'note-9') def test_aliases(self): query = (Note .select( Note.id, Note.content.alias('ct'), Note.timestamp.alias('ts')) .order_by(Note.id)) rows = list(query.tuples()) self.assertEqual(len(rows), 10) self.assertEqual(rows[0][:2], (1, 'note-0')) self.assertTrue(isinstance(rows[0][2], datetime.datetime)) rows = list(query.dicts()) first = rows[0] self.assertEqual(sorted(first.keys()), ['ct', 'id', 'ts']) self.assertEqual(first['id'], 1) self.assertEqual(first['ct'], 'note-0') self.assertTrue(isinstance(first['ts'], datetime.datetime)) rows = list(query) first = rows[0] self.assertTrue(isinstance(first, Note)) self.assertEqual(first.id, 1) self.assertEqual(first.ct, 'note-0') self.assertIsNone(first.content) self.assertTrue(isinstance(first.ts, datetime.datetime)) def test_fill_cache(self): with self.assertQueryCount(1): query = Note.select().order_by(Note.id) qr = query.execute() qr.fill_cache(3) self.assertEqual(qr._ct, 3) self.assertEqual(len(qr._result_cache), 3) # No changes to result wrapper. notes = query[:3] self.assertEqual([n.id for n in notes], [1, 2, 3]) self.assertEqual(qr._ct, 4) self.assertEqual(len(qr._result_cache), 4) self.assertFalse(qr._populated) qr.fill_cache(5) notes = query[:5] self.assertEqual([n.id for n in notes], [1, 2, 3, 4, 5]) self.assertEqual(qr._ct, 6) self.assertEqual(len(qr._result_cache), 6) notes = query[:7] self.assertEqual([n.id for n in notes], [1, 2, 3, 4, 5, 6, 7]) self.assertEqual(qr._ct, 8) self.assertFalse(qr._populated) qr.fill_cache() self.assertEqual( [n.id for n in query], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) self.assertEqual(qr._ct, 10) self.assertTrue(qr._populated)
Python
0
@@ -1069,24 +1069,377 @@ content'%5D)%0A%0A + def test_gh_regression_1073_func_coerce(self):%0A func = fn.GROUP_CONCAT(Note.id).alias('note_ids')%0A query = Note.select(func)%0A self.assertRaises(ValueError, query.get)%0A%0A query = Note.select(func.coerce(False))%0A result = query.get().note_ids%0A self.assertEqual(result, ','.join(str(i) for i in range(1, 11)))%0A%0A def test
72e0b19383ed00b83d2a3897d61e5130aea4d736
Change out of the temp directory before deleting
monty/tempfile.py
monty/tempfile.py
""" Temporary directory and file creation utilities. """ from __future__ import absolute_import import os import tempfile import shutil try: from pathlib import Path except ImportError: try: from pathlib2 import Path except ImportError: Path = None from monty.shutil import copy_r __author__ = "Shyue Ping Ong" __copyright__ = "Copyright 2012, The Materials Project" __version__ = "0.1" __maintainer__ = "Shyue Ping Ong" __email__ = "ongsp@ucsd.edu" __date__ = "3/6/14" class ScratchDir(object): """ .. note:: With effect from Python 3.2, tempfile.TemporaryDirectory already implements much of the functionality of ScratchDir. However, it does not provide options for copying of files to and from (though it is possible to do this with other methods provided by shutil). Creates a "with" context manager that automatically handles creation of temporary directories (utilizing Python's build in temp directory functions) and cleanup when done. This improves on Python's built in functions by allowing for truly temporary workspace that are deleted when it is done. The way it works is as follows: 1. Create a temp dir in specified root path. 2. Optionally copy input files from current directory to temp dir. 3. Change to temp dir. 4. User performs specified operations. 5. Optionally copy generated output files back to original directory. 6. Change back to original directory. 7. Delete temp dir. """ SCR_LINK = "scratch_link" def __init__(self, rootpath, create_symbolic_link=False, copy_from_current_on_enter=False, copy_to_current_on_exit=False): """ Initializes scratch directory given a **root** path. There is no need to try to create unique directory names. The code will generate a temporary sub directory in the rootpath. The way to use this is using a with context manager. Example:: with ScratchDir("/scratch"): do_something() If the root path does not exist or is None, this will function as a simple pass through, i.e., nothing happens. Args: rootpath (str/Path): The path in which to create temp subdirectories. If this is None, no temp directories will be created and this will just be a simple pass through. create_symbolic_link (bool): Whether to create a symbolic link in the current working directory to the scratch directory created. copy_from_current_on_enter (bool): Whether to copy all files from the current directory (recursively) to the temp dir at the start, e.g., if input files are needed for performing some actions. Defaults to False. copy_to_current_on_exit (bool): Whether to copy files from the scratch to the current directory (recursively) at the end. E .g., if output files are generated during the operation. Defaults to False. """ if Path is not None and isinstance(rootpath, Path): rootpath = str(rootpath) self.rootpath = os.path.abspath(rootpath) if rootpath is not None \ else None self.cwd = os.getcwd() self.create_symbolic_link = create_symbolic_link self.start_copy = copy_from_current_on_enter self.end_copy = copy_to_current_on_exit def __enter__(self): tempdir = self.cwd if self.rootpath is not None and os.path.exists(self.rootpath): tempdir = tempfile.mkdtemp(dir=self.rootpath) self.tempdir = os.path.abspath(tempdir) if self.start_copy: copy_r(".", tempdir) if self.create_symbolic_link: os.symlink(tempdir, ScratchDir.SCR_LINK) os.chdir(tempdir) return tempdir def __exit__(self, exc_type, exc_val, exc_tb): if self.rootpath is not None and os.path.exists(self.rootpath): if self.end_copy: tempdir = tempfile.mkdtemp(dir=self.cwd) copy_r(self.cwd, tempdir) for f in os.listdir(self.cwd): fpath = os.path.join(self.cwd, f) try: if f != os.path.basename(tempdir): if os.path.isfile(fpath): os.remove(fpath) else: shutil.rmtree(fpath) except: # Ignore file not found. pass copy_r(".", self.cwd) shutil.rmtree(tempdir) shutil.rmtree(self.tempdir) os.chdir(self.cwd) if self.create_symbolic_link: os.remove(ScratchDir.SCR_LINK)
Python
0
@@ -4791,32 +4791,80 @@ rmtree(tempdir)%0A + %0A os.chdir(self.cwd)%0A shut @@ -4899,34 +4899,16 @@ -os.chdir(self.cwd) %0A
224522e88347d4eafd68202222bb83c2d596524b
Modify SCons tools
conda/python-dev/boost_python.py
conda/python-dev/boost_python.py
from types import MethodType import itertools def generate(env): """Add Builders and construction variables to the Environment.""" if not 'boost_python' in env['TOOLS'][:-1]: env.Tool('system') env.AppendUnique(LIBS = ['boost_python']) env.AppendUnique(CPPDEFINES = ['BOOST_PYTHON_DYNAMIC_LIB', 'BOOST_ALL_NO_LIB']) def BuildBoostPython(env, target, sources): # Code to build "target" from "source" target = env.File(target).srcnode() targets = list(itertools.chain(*[env.SharedObject(None, source) for source in sources if source.suffix in ['.cpp', '.cxx', '.c++']])) print sources sources = [source for source in sources if source.suffix == '.h'] print sources SYSTEM = env['SYSTEM'] print SYSTEM if SYSTEM == 'linux' and len(sources) == 1: cmd = env.Command(sources[0].target_from_source('', '.h.gch'), sources[0], '$CXX -o $TARGET -x c++-header -c -fPIC $SHCXXFLAGS $_CCCOMCOM $SOURCE') env.Depends(targets, cmd) env.Depends(target, targets) source = env.File('response_file.rsp') with open(source.abspath, 'w') as filehandler: filehandler.write(' '.join(target.abspath.replace('\\','/') + ' ' for target in targets)) env.Append(LINKFLAGS = '@' + source.abspath) kwargs = dict(SHLIBSUFFIX = '.so', SHLIBPREFIX = '') if SYSTEM == 'osx': return env.LoadableModule(target, [], LDMODULESUFFIX='.so', FRAMEWORKSFLAGS = '-flat_namespace -undefined suppress', **kwargs) else: return env.LoadableModule(target, [], **kwargs) env.BuildBoostPython = MethodType(BuildBoostPython, env) env.Tool('python') def exists(env): return 1
Python
0
@@ -689,34 +689,8 @@ %5D))%0A - print sources%0A @@ -779,81 +779,30 @@ -print sources%0A SYSTEM = env%5B'SYSTEM'%5D%0A print SYSTEM +SYSTEM = env%5B'SYSTEM'%5D %0A
1ca5ba7884d35193f0a035b8e8f6ac4ac6032928
stop cycling after applying the forumla
mpexpertadjust.py
mpexpertadjust.py
#!/usr/bin/env python import os, sys, csv import tkinter, tkinter.messagebox STANDARD_FILE='defstd.txt' STANDARD_NAME=0 STANDARD_ELEMENT=1 STANDARD_QTY=2 SAMPLE_NAME=0 SAMPLE_DATE=2 SAMPLE_ELEMENT=4 SAMPLE_QTY=8 OUTPUT_FILE='output.csv' def is_standard(label, element, standards): """Check if a label is a standard""" for s in standards: if label.strip()==s[STANDARD_NAME].strip() and element.strip()==s[STANDARD_ELEMENT].strip(): return (True,s[STANDARD_QTY]) return (False,) def written_lines_reversed(f): """Return all lines written in the output file as a list in reverse order""" f.seek(0) # restart from the beginning of the file res = [] while True: line = f.readline() if line=='': break res.append(line) f.seek(2) # set the pointer at the end of the file return res.reverse() ### MAIN PROGRAM ### window = tkinter.Tk() window.wm_withdraw() if len(sys.argv) == 1: tkinter.messagebox.showinfo(title="MPExpertAdjust Error", message='No input file supplied') sys.exit(1) # READ STANDARD FILE standards = [] try: f = open(STANDARD_FILE,'r') reader = csv.reader(f, delimiter=';') for i in reader: if len(i)==0: continue standards.append([elem.strip() for elem in i]) f.close() except: tkinter.messagebox.showinfo(title="MPExpertAdjust Error", message='Malformed standard file '+STANDARD_FILE) sys.exit(1) # READ SAMPLE FILE SAMPLE_FILE=sys.argv[1] samples = [] try: f = open(SAMPLE_FILE,'r') while True: line = f.readline() if line == '': break if line == '\n': continue if line.startswith('Label,Type'): continue if line[0] == '\x00': continue if line[0] == '\ufeff': continue line_ar = line.split(',') if len(line_ar)>=2 and (line_ar[1]=='STD' or line_ar[1]=='BLK'): continue samples.append([elem.strip() for elem in line_ar]) f.close() except: tkinter.messagebox.showinfo(title="MPExpertAdjust Error", message='Malformed input file '+SAMPLE_FILE) sys.exit(1) #print(standards) #Eprint(samples) with open(OUTPUT_FILE,'w+') as f: standards_present=[] for row in samples: std = is_standard(row[SAMPLE_NAME],row[SAMPLE_ELEMENT],standards) if std[0]==True: f.write(row[SAMPLE_NAME]+','+row[SAMPLE_ELEMENT]+','+row[SAMPLE_QTY]+','+std[1]+','+row[SAMPLE_DATE]+'\n') continue out_rev = written_lines_reversed(f) # now we reverse the already written output file and we parse it # searching for the first standard with the current element for line in out_rev: if row[SAMPLE_ELEMENT]==line[1] and is_standard(line[0],row[1],standards)[0]==True: # standard found! Applying the forumula res = (line[3]*row[SAMPLE_QTY])/line[2] dilution = row[0].split(' ')[-1] res = res*dilution f.write(row[SAMPLE_NAME]+','+row[SAMPLE_ELEMENT]+','+row[SAMPLE_QTY]+','+res+','+row[SAMPLE_DATE]+'\n')
Python
0
@@ -2760,28 +2760,39 @@ +','+row%5BSAMPLE_DATE%5D+'%5Cn')%0A +%09%09%09%09break%0A%0A
fc22465decac6a33543e5232097af7ea847c4029
Bump version to 1.0.1-machtfit-41
src/oscar/__init__.py
src/oscar/__init__.py
import os # Use 'dev', 'beta', or 'final' as the 4th element to indicate release type. VERSION = (1, 0, 1, 'machtfit', 40) def get_short_version(): return '%s.%s' % (VERSION[0], VERSION[1]) def get_version(): return '{}.{}.{}-{}-{}'.format(*VERSION) # Cheeky setting that allows each template to be accessible by two paths. # Eg: the template 'oscar/templates/oscar/base.html' can be accessed via both # 'base.html' and 'oscar/base.html'. This allows Oscar's templates to be # extended by templates with the same filename OSCAR_MAIN_TEMPLATE_DIR = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'templates/oscar') OSCAR_CORE_APPS = [ 'oscar', 'oscar.apps.analytics', 'oscar.apps.checkout', 'oscar.apps.address', 'oscar.apps.shipping', 'oscar.apps.catalogue', 'oscar.apps.partner', 'oscar.apps.basket', 'oscar.apps.payment', 'oscar.apps.offer', 'oscar.apps.order', 'oscar.apps.customer', 'oscar.apps.voucher', 'oscar.apps.dashboard', 'oscar.apps.dashboard.users', 'oscar.apps.dashboard.orders', 'oscar.apps.dashboard.catalogue', 'oscar.apps.dashboard.offers', 'oscar.apps.dashboard.partners', 'oscar.apps.dashboard.ranges', # 3rd-party apps that oscar depends on 'treebeard', 'sorl.thumbnail', 'django_tables2', ] def get_core_apps(overrides=None): """ Return a list of oscar's apps amended with any passed overrides """ if not overrides: return OSCAR_CORE_APPS def get_app_label(app_label, overrides): pattern = app_label.replace('oscar.apps.', '') for override in overrides: if override.endswith(pattern): if 'dashboard' in override and 'dashboard' not in pattern: continue return override return app_label apps = [] for app_label in OSCAR_CORE_APPS: apps.append(get_app_label(app_label, overrides)) return apps
Python
0
@@ -115,17 +115,17 @@ tfit', 4 -0 +1 )%0A%0A%0Adef
14ee6e2e9986c58fdeb8e482f3426b756ab1d2cb
Bump dev version
mtools/version.py
mtools/version.py
#!/usr/bin/env python3 """Mtools version.""" __version__ = '1.6.4'
Python
0
@@ -60,9 +60,13 @@ '1. -6.4 +7.0-dev '%0A
f83ce11dccd7209e4c124e9dadbcbbd86568e320
Comment reason why the example is commented out
numba/tests/compile_with_pycc.py
numba/tests/compile_with_pycc.py
import cmath import numpy as np from numba import exportmany, export from numba.pycc import CC # # New API # cc = CC('pycc_test_simple') @cc.export('multf', 'f4(f4, f4)') @cc.export('multi', 'i4(i4, i4)') def mult(a, b): return a * b _two = 2 # This one can't be compiled by the legacy API as it doesn't execute # the script in a proper module. @cc.export('square', 'i8(i8)') def square(u): return u ** _two # These ones need helperlib cc_helperlib = CC('pycc_test_helperlib') @cc_helperlib.export('power', 'i8(i8, i8)') def power(u, v): return u ** v @cc_helperlib.export('sqrt', 'c16(c16)') def sqrt(u): return cmath.sqrt(u) @cc_helperlib.export('size', 'i8(f8[:])') def sqrt(arr): return arr.size # This one clashes with libc random() unless pycc takes measures # to disambiguate implementation names. @cc_helperlib.export('random', 'f8(i4)') def random_impl(seed): np.random.seed(seed) return np.random.random() # These ones need NRT cc_nrt = CC('pycc_test_nrt') cc_nrt.use_nrt = True @cc_nrt.export('zero_scalar', 'f8(i4)') def zero_scalar(n): arr = np.zeros(n) return arr[-1] #@cc_nrt.export('zeros', 'f8(i4)') #def empty_scalar(n): #arr = np.empty(n) #return arr[-1] # # Legacy API # exportmany(['multf f4(f4,f4)', 'multi i4(i4,i4)'])(mult) # Needs to link to helperlib to due with complex arguments # export('multc c16(c16,c16)')(mult) export('mult f8(f8, f8)')(mult)
Python
0.000008
@@ -1127,16 +1127,56 @@ rr%5B-1%5D%0A%0A +# Fails because it needs an environment%0A #@cc_nrt @@ -1191,24 +1191,27 @@ 'zeros', 'f8 +%5B:%5D (i4)')%0A#def @@ -1214,67 +1214,41 @@ def -empty_scalar(n):%0A #arr = np.empty(n)%0A #return arr%5B-1%5D +zeros(n):%0A #return np.zeros(n) %0A%0A%0A#
2f55f00c17b51f24b5407182516c22baead08879
remove BeautifulSoup for now
plugins/slideshare/slideshare.py
plugins/slideshare/slideshare.py
#!/usr/bin/env python import urllib2 import re import urllib import time import sha import BeautifulSoup from BeautifulSoup import BeautifulStoneSoup from optparse import OptionParser TOTALIMPACT_SLIDESHARE_KEY = "nyHCUoNM" TOTALIMPACT_SLIDESHARE_SECRET = "z7sRiGCG" SLIDESHARE_DOI_URL = "http://www.slideshare.net/api/2/get_slideshow?api_key=nyHCUoNM&detailed=1&ts=%s&hash=%s&slideshow_url=%s" SLIDESHARE_DOWNLOADS_PATTERN = re.compile("<NumDownloads>(?P<stats>\d+)</NumDownloads>", re.DOTALL) SLIDESHARE_VIEWS_PATTERN = re.compile("<NumViews>(?P<stats>\d+)</NumViews>", re.DOTALL) SLIDESHARE_COMMENTS_PATTERN = re.compile("<NumComments>(?P<stats>\d+)</NumComments>", re.DOTALL) SLIDESHARE_FAVORITES_PATTERN = re.compile("<NumFavorites>(?P<stats>\d+)</NumFavorites>", re.DOTALL) def get_page(id): if not id: return(None) ts = time.time() hash_combo = sha.new(TOTALIMPACT_SLIDESHARE_SECRET + str(ts)).hexdigest() url = SLIDESHARE_DOI_URL %(ts, hash_combo, id) #print url try: page = urllib2.urlopen(url).read() except urllib2.HTTPError, err: if err.code == 404: page = None else: raise return(page) def get_stats(page): if not page: return(None) if (False): soup = BeautifulStoneSoup(page) downloads = soup.numdownloads.text views = soup.numviews.text comments = soup.numcomments.text favorites = soup.numfavorites.text matches = SLIDESHARE_DOWNLOADS_PATTERN.search(page) if matches: downloads = matches.group("stats") matches = SLIDESHARE_VIEWS_PATTERN.search(page) if matches: views = matches.group("stats") matches = SLIDESHARE_COMMENTS_PATTERN.search(page) if matches: comments = matches.group("stats") matches = SLIDESHARE_FAVORITES_PATTERN.search(page) if matches: favorites = matches.group("stats") response = {"downloads":downloads, "views":views, "comments":comments, "favorites":favorites} return(response) from optparse import OptionParser def main(): parser = OptionParser(usage="usage: %prog [options] filename", version="%prog 1.0") #parser.add_option("-x", "--xhtml", # action="store_true", # dest="xhtml_flag", # default=False, # help="create a XHTML template instead of HTML") (options, args) = parser.parse_args() if len(args) != 1: parser.error("wrong number of arguments") #print options #print args id = args[0] page = get_page(id) response = get_stats(page) print response return(response) if __name__ == '__main__': main() #example = "http://www.slideshare.net/hpiwowar/7-data-citation-challenges-illustrated-with-data-includes-elephants"
Python
0
@@ -78,16 +78,17 @@ ort sha%0A +# import B @@ -100,16 +100,17 @@ fulSoup%0A +# from Bea
63a3e6e0c65fa17e6abe58da06b4bdfa20c62bfe
Add onchange for set vector in orders
mx_agent/agent.py
mx_agent/agent.py
# -*- coding: utf-8 -*- ############################################################################### # # Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################### import os import sys import logging import openerp import openerp.netsvc as netsvc import openerp.addons.decimal_precision as dp from openerp.osv import fields, osv, expression, orm from datetime import datetime, timedelta from dateutil.relativedelta import relativedelta from openerp import SUPERUSER_ID, api from openerp import tools from openerp.tools.translate import _ from openerp.tools.float_utils import float_round as round from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP, float_compare) _logger = logging.getLogger(__name__) class SaleOrder(orm.Model): """ Model name: Sale Order """ _inherit = 'sale.order' # TODO onchange for setup from partner _columns = { 'mx_agent_id': fields.many2one('res.partner', 'Agent', domain=[('is_agent', '=', True)]), } class AccountInvoice(orm.Model): """ Model name: Account Invoice """ _inherit = 'account.invoice' # TODO onchange for setup from partner _columns = { 'mx_agent_id': fields.many2one('res.partner', 'Agent', domain=[('is_agent', '=', True)]), } class StockPicking(orm.Model): """ Model name: Stock Picking """ _inherit = 'stock.picking' # TODO onchange for setup from partner _columns = { 'mx_agent_id': fields.many2one('res.partner', 'Agent', domain=[('is_agent', '=', True)]), } class StockDdt(orm.Model): """ Model name: Stock DDT """ _inherit = 'stock.ddt' # TODO onchange for setup from partner _columns = { 'mx_agent_id': fields.many2one('res.partner', 'Agent', domain=[('is_agent', '=', True)]), } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
Python
0
@@ -1692,32 +1692,710 @@ up from partner%0A + def on_change_partner_id(self, cr, uid, ids, partner_id, context=None):%0A res = super(SaleOrder, self).on_change_partner_id(%0A cr, uid, ids, partner_id, context=context)%0A %0A %0A # Update agent field for partner form%0A # TODO propagate!!!%0A if 'value' not in res:%0A res%5B'value'%5D = %7B%7D%0A if partner_id:%0A partner_proxy = self.pool.get('res.partner').browse(%0A cr, uid, partner_id, context=context)%0A res%5B'value'%5D%5B%0A 'mx_agent_id'%5D = partner_proxy.agent_id.id%0A else: %0A %0A res%5B'value'%5D%5B'mx_agent_id'%5D = False%0A return res %0A %0A %0A _column
6eedd6e5b96d9ee051e7708c4c127fdfb6c2a92b
modify file : add class Report and Score
NippoKun/report/models.py
NippoKun/report/models.py
from django.db import models # Create your models here.
Python
0
@@ -1,8 +1,52 @@ +from django.contrib.auth.models import User%0A from dja @@ -67,16 +67,17 @@ models%0A%0A +%0A # Create @@ -95,8 +95,727 @@ s here.%0A +%0A%0Aclass Report(models.Model):%0A report_author = models.ForeignKey(User, related_name='report_author')%0A report_title = models.CharField(max_length=50)%0A report_content = models.TextField(max_length=999)%0A created_at = models.DateTimeField(auto_now_add=True)%0A updated_at = models.DateTimeField(auto_now=True)%0A%0A%0Aclass Score(models.Model):%0A report = models.ForeignKey(Report, related_name='score')%0A score_author = models.ForeignKey(User, related_name='score_author')%0A score = models.IntegerField()%0A evaluate_point = models.TextField(max_length=30)%0A comment = models.TextField(max_length=999, blank=True)%0A average_score = models.FloatField()%0A scored_at = models.DateTimeField(auto_now=True)%0A
88d2918606870ef7bdaafda87b37537d21c02036
Extend failed and end with traceback
polyaxon_client/tracking/base.py
polyaxon_client/tracking/base.py
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function import atexit import sys import time from polystores.stores.manager import StoreManager from polyaxon_client import PolyaxonClient, settings from polyaxon_client.exceptions import PolyaxonClientException from polyaxon_client.tracking.paths import get_outputs_path from polyaxon_client.tracking.utils.project import get_project_info class BaseTracker(object): def __init__(self, project=None, client=None, track_logs=True, track_code=True, track_env=True, outputs_store=None): if settings.NO_OP: return if not settings.IN_CLUSTER and project is None: raise PolyaxonClientException('Please provide a valid project.') self.last_status = None self.client = client or PolyaxonClient() if settings.IN_CLUSTER: self.user = None else: self.user = (self.client.auth.get_user().username if self.client.api_config.schema_response else self.client.auth.get_user().get('username')) username, project_name = get_project_info(current_user=self.user, project=project) self.track_logs = track_logs self.track_code = track_code self.track_env = track_env self.project = project self.username = username self.project_name = project_name self.outputs_store = outputs_store # Setup the outputs store if outputs_store is None and settings.IN_CLUSTER: self.set_outputs_store(outputs_path=get_outputs_path(), set_env_vars=True) def _set_health_url(self): raise NotImplementedError def log_status(self, status, message=None, traceback=None): raise NotImplementedError def _start(self): if settings.NO_OP: return atexit.register(self._end) self.start() def excepthook(exception, value, tb): self.failed(message='Type: {}, Value: {}'.format(exception, value)) # Resume normal work sys.__excepthook__(exception, value, tb) sys.excepthook = excepthook def _end(self): if settings.NO_OP: return self.succeeded() def start(self): if settings.NO_OP: return self.log_status('running') self.last_status = 'running' def end(self, status, message=None): if settings.NO_OP: return if self.last_status in ['succeeded', 'failed', 'stopped']: return self.log_status(status, message) self.last_status = status time.sleep(0.1) # Just to give the opportunity to the worker to pick the message def succeeded(self): if settings.NO_OP: return self.end('succeeded') def stop(self): if settings.NO_OP: return self.end('stopped') def failed(self, message=None): if settings.NO_OP: return self.end(status='failed', message=message) def set_outputs_store(self, outputs_store=None, outputs_path=None, set_env_vars=False): if settings.NO_OP: return if not any([outputs_store, outputs_path]): raise PolyaxonClientException( 'An Store instance or and outputs path is required.') self.outputs_store = outputs_store or StoreManager(path=outputs_path) if self.outputs_store and set_env_vars: self.outputs_store.set_env_vars() def log_output(self, filename, **kwargs): if settings.NO_OP: return self.outputs_store.upload_file(filename=filename) def log_outputs(self, dirname, **kwargs): if settings.NO_OP: return self.outputs_store.upload_dir(dirname=dirname)
Python
0
@@ -2545,32 +2545,48 @@ us, message=None +, traceback=None ):%0A if se @@ -2731,24 +2731,31 @@ tatus(status +=status , message)%0A @@ -2751,16 +2751,45 @@ message +=message, traceback=traceback )%0A @@ -3135,24 +3135,40 @@ message=None +, traceback=None ):%0A i @@ -3254,16 +3254,37 @@ =message +, traceback=traceback )%0A%0A d
a2eae87fc76ba1e9fbfa8102c3e19c239445a62a
Fix form retrieval in ModelForm
nazs/web/forms.py
nazs/web/forms.py
from achilles.forms import * # noqa from nazs.models import SingletonModel # Override forms template Form.template_name = 'web/form.html' class ModelForm(ModelForm): def get_form(self, form_data=None, *args, **kwargs): # manage SingletonModels if issubclass(self.form_class.Meta.model, SingletonModel): instance = self.form_class.Meta.model.get() return self.form_class(form_data, instance=instance) else: return super(ModelForm, self).get_form(*args, **kwargs)
Python
0.000002
@@ -508,16 +508,27 @@ et_form( +form_data, *args, *
a4ee20e078175c5d75380afca7b02305440ab32f
Add a couple numeric columns to better portray overall performance.
postgresql/test/perf_query_io.py
postgresql/test/perf_query_io.py
#!/usr/bin/env python ## # copyright 2009, James William Pye # http://python.projects.postgresql.org ## # Statement I/O: Mass insert and select performance ## import os import time import sys def insertSamples(count, insert_records): recs = [ (-3, 123, 0xfffffea023, 'some_óäæ_thing', 'varying', 'æ') for x in range(count) ] gen = time.time() insert_records.load(recs) fin = time.time() xacttime = fin - gen ats = count / xacttime sys.stderr.write( "INSERT Summary,\n " \ "inserted tuples: %d\n " \ "total time: %f\n " \ "average tuples per second: %f\n\n" %( count, xacttime, ats, ) ) def timeTupleRead(portal): loops = 0 tuples = 0 genesis = time.time() for x in portal.chunks: loops += 1 tuples += len(x) finalis = time.time() looptime = finalis - genesis ats = tuples / looptime sys.stderr.write( "SELECT Summary,\n " \ "looped: {looped}\n " \ "looptime: {looptime}\n " \ "tuples: {ntuples}\n " \ "average tuples per second: {tps}\n ".format( looped = loops, looptime = looptime, ntuples = tuples, tps = ats ) ) def main(count): execute('CREATE TEMP TABLE samples ' '(i2 int2, i4 int4, i8 int8, t text, v varchar, c char)') insert_records = prepare( "INSERT INTO samples VALUES ($1, $2, $3, $4, $5, $6)" ) select_records = prepare("SELECT * FROM samples") try: insertSamples(count, insert_records) timeTupleRead(select_records()) finally: execute("DROP TABLE samples") def command(args): main(int((args + [25000])[1])) if __name__ == '__main__': command(sys.argv)
Python
0
@@ -184,16 +184,31 @@ port sys +%0Aimport decimal %0A%0Adef in @@ -278,16 +278,84 @@ ffea023, + decimal.Decimal(%2290900023123.40031%22), decimal.Decimal(%22432.40031%22), 'some_%C3%B3 @@ -1244,16 +1244,39 @@ i8 int8, + n numeric, n2 numeric, t text, @@ -1377,16 +1377,24 @@ , $5, $6 +, $7, $8 )%22%0A%09)%0A%09s
475b4dfa711d475d57b1f6712b6286d8310a6945
Update help text
src/penn_chime/cli.py
src/penn_chime/cli.py
"""Command line interface.""" from argparse import ( Action, ArgumentParser, ) from datetime import datetime from pandas import DataFrame from .constants import CHANGE_DATE from .parameters import Parameters, RateDays from .models import SimSirModel as Model class FromFile(Action): """From File.""" def __call__(self, parser, namespace, values, option_string=None): with values as f: parser.parse_args(f.read().split(), namespace) def cast_date(string): return datetime.strptime(string, '%Y-%m-%d').date() def validator(arg, cast, min_value, max_value, required=True): """Validator.""" def validate(string): """Validate.""" if string == '' and cast != str: if required: raise AssertionError('%s is required.') return None value = cast(string) if min_value is not None: assert value >= min_value if max_value is not None: assert value <= max_value return value return validate def parse_args(): """Parse args.""" parser = ArgumentParser(description=f"penn_chime: {CHANGE_DATE}") parser.add_argument("--file", type=open, action=FromFile) for arg, cast, min_value, max_value, help, required in ( ( "--current-hospitalized", int, 0, None, "Currently Hospitalized COVID-19 Patients (>= 0)", True, ), ( "--date-first-hospitalized", cast_date, None, None, "Current date", False, ), ( "--doubling-time", float, 0.0, None, "Doubling time before social distancing (days)", True, ), ("--hospitalized-days", int, 0, None, "Hospital Length of Stay (days)", True), ( "--hospitalized-rate", float, 0.00001, 1.0, "Hospitalized Rate: 0.00001 - 1.0", True, ), ("--icu-days", int, 0, None, "Days in ICU", True), ("--icu-rate", float, 0.0, 1.0, "ICU Rate: 0.0 - 1.0", True), ( "--market_share", float, 0.00001, 1.0, "Hospital Market Share (0.00001 - 1.0)", True, ), ("--infectious-days", float, 0.0, None, "Infectious days", True), ("--n-days", int, 0, None, "Number of days to project >= 0", True), ( "--relative-contact-rate", float, 0.0, 1.0, "Social Distancing Reduction Rate: 0.0 - 1.0", True, ), ("--population", int, 1, None, "Regional Population >= 1", True), ("--ventilated-days", int, 0, None, "Days on Ventilator", True), ("--ventilated-rate", float, 0.0, 1.0, "Ventilated Rate: 0.0 - 1.0", True), ): parser.add_argument(arg, type=validator(arg, cast, min_value, max_value, required)) return parser.parse_args() def main(): """Main.""" a = parse_args() p = Parameters( current_hospitalized=a.current_hospitalized, date_first_hospitalized=a.date_first_hospitalized, doubling_time=a.doubling_time, infectious_days=a.infectious_days, market_share=a.market_share, n_days=a.n_days, relative_contact_rate=a.relative_contact_rate, population=a.population, hospitalized=RateDays(a.hospitalized_rate, a.hospitalized_days), icu=RateDays(a.icu_rate, a.icu_days), ventilated=RateDays(a.ventilated_rate, a.ventilated_days), ) m = Model(p) for df, name in ( (m.sim_sir_w_date_df, "sim_sir_w_date"), (m.admits_df, "projected_admits"), (m.census_df, "projected_census"), ): df.to_csv(f"{p.current_date}_{name}.csv") if __name__ == "__main__": main()
Python
0
@@ -1872,16 +1872,24 @@ None, %22 +Average Hospital @@ -2133,24 +2133,32 @@ , 0, None, %22 +Average Days in ICU%22 @@ -2874,16 +2874,24 @@ None, %22 +Average Days on
b6dff8fcd7dec56703006f2a7bcf1c8c72d0c21b
FIX price sec. related field as readonly
price_security/models/invoice.py
price_security/models/invoice.py
# -*- coding: utf-8 -*- ############################################################################## # For copyright and license notices, see __openerp__.py file in module root # directory ############################################################################## from openerp import fields, models, api class account_invoice_line(models.Model): _inherit = 'account.invoice.line' # we add this fields instead of making original readonly because we need # on change to change values, we make readonly in view because sometimes # we want them to be writeable invoice_line_tax_id_readonly = fields.Many2many( related='invoice_line_tax_id', ) price_unit_readonly = fields.Float( related='price_unit', ) product_can_modify_prices = fields.Boolean( related='product_id.can_modify_prices', string='Product Can modify prices') @api.one @api.constrains( 'discount', 'product_can_modify_prices') def check_discount(self): if ( self.user_has_groups( 'price_security.group_restrict_prices') and not self.product_can_modify_prices and self.invoice_id ): self.env.user.check_discount( self.discount, self.invoice_id.partner_id.property_product_pricelist.id)
Python
0.000003
@@ -845,16 +845,39 @@ rices',%0A + readonly=True,%0A
fb142d3324ca974c9308cb8ab18dd9db2c2aae0b
Use monospace font
editor.py
editor.py
#!/usr/bin/env python import sys import sip sip.setapi('QString', 2) from PyQt4.QtGui import QApplication, QPlainTextEdit, QSyntaxHighlighter, \ QTextCharFormat, QTextBlockUserData from qutepart.SyntaxHighlighter import SyntaxHighlighter from qutepart.syntax_manager import SyntaxManager def main(): if len(sys.argv) != 2: print 'Usage:\n\t%s FILE' % sys.argv[0] filePath = sys.argv[1] try: syntax = SyntaxManager().getSyntaxBySourceFileName(filePath) except KeyError: print 'No syntax for', filePath return print 'Using syntax', syntax.name with open(filePath) as file: text = file.read() app = QApplication(sys.argv) pte = QPlainTextEdit() pte.setPlainText(text) pte.setWindowTitle(filePath) hl = SyntaxHighlighter(syntax, pte.document()) pte.show() return app.exec_() if __name__ == '__main__': main()
Python
0.000001
@@ -102,16 +102,23 @@ ication, + QFont, QPlainT @@ -798,24 +798,60 @@ e(filePath)%0A + pte.setFont(QFont(%22Monospace%22))%0A %0A hl
a098efa1b69d2de3b1e2437a056b0c6937cbf998
add documentation
src/bat/images.py
src/bat/images.py
#!/usr/bin/python ## Binary Analysis Tool ## Copyright 2012 Armijn Hemel for Tjaldur Software Governance Solutions ## Licensed under Apache 2.0, see LICENSE file for details ''' This is a plugin for the Binary Analysis Tool. It generates images of files, both full files and thumbnails. The files can be used for informational purposes, such as detecting roughly where offsets can be found, if data is compressed or encrypted, etc. This should be run as a postrun scan ''' import os, os.path, sys, subprocess, array from PIL import Image def generateImages(filename, unpackreport, leafscans, envvars={}): if not unpackreport.has_key('sha256'): return scanenv = os.environ if envvars != None: for en in envvars.split(':'): try: (envname, envvalue) = en.split('=') scanenv[envname] = envvalue except Exception, e: pass ## TODO: check if BAT_IMAGEDIR exists imagedir = scanenv.get('BAT_IMAGEDIR', '.') fwfile = open(filename) ## this is very inefficient for large files, but we *really* need all the data :-( fwdata = fwfile.read() fwfile.close() fwlen = len(fwdata) if fwlen > 1024: height = 1024 else: height = fwlen width = fwlen/height ## we might need to add some bytes so we can create a valid picture if fwlen%height > 0: width = width + 1 for i in range(0, height - (fwlen%height)): fwdata = fwdata + chr(0) imgbuffer = buffer(bytearray(fwdata)) im = Image.frombuffer("L", (height, width), imgbuffer, "raw", "L", 0, 1) im.save("%s/%s.png" % (imagedir, unpackreport['sha256'])) if width > 100: imthumb = im.thumbnail((height/4, width/4)) im.save("%s/%s-thumbnail.png" % (imagedir, unpackreport['sha256'])) ''' p = subprocess.Popen(['python', '/home/armijn/gpltool/trunk/bat-extratools/bat-visualisation/bat-generate-histogram.py', '-i', filename, '-o', '%s/%s-histogram.png' % (imagedir, unpackreport['sha256'])], stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) (stanout, stanerr) = p.communicate() if p.returncode != 0: print >>sys.stderr, stanerr '''
Python
0
@@ -428,16 +428,330 @@ ,%0Aetc.%0A%0A +It also generates histograms, which show how different byte values are distributed.%0AThis can provide another visual clue about how files are constructed. Binaries from%0Athe same type (like ELF binaries) are actually quite similar, so binaries that%0Asignificantly deviate from this could mean something interesting.%0A%0A This sho @@ -1992,16 +1992,39 @@ )%0A%0A%09'''%0A +%09## generate histogram%0A %09p = sub
7a60bd74b3af40223553c64dafed07c46c5db639
add a --jit commandline option
prolog/targetprologstandalone.py
prolog/targetprologstandalone.py
""" A simple standalone target for the prolog interpreter. """ import sys from prolog.interpreter.translatedmain import repl, execute # __________ Entry point __________ from prolog.interpreter.continuation import Engine from prolog.interpreter import term from prolog.interpreter import arithmetic # for side effects from prolog import builtin # for side effects e = Engine(load_system=True) term.DEBUG = False def entry_point(argv): e.clocks.startup() if len(argv) == 2: execute(e, argv[1]) try: repl(e) except SystemExit: return 1 return 0 # _____ Define and setup target ___ def target(driver, args): driver.exe_name = 'pyrolog-%(backend)s' return entry_point, None def portal(driver): from prolog.interpreter.portal import get_portal return get_portal(driver) def jitpolicy(self): from pypy.jit.codewriter.policy import JitPolicy return JitPolicy() if __name__ == '__main__': entry_point(sys.argv)
Python
0.000002
@@ -218,16 +218,27 @@ t Engine +, jitdriver %0Afrom pr @@ -476,54 +476,463 @@ -if len(argv) == 2:%0A execute(e, argv%5B1%5D) +# XXX crappy argument handling%0A for i in range(len(argv)):%0A if argv%5Bi%5D == %22--jit%22:%0A if len(argv) == i + 1:%0A print %22missing argument after --jit%22%0A return 2%0A jitarg = argv%5Bi + 1%5D%0A del argv%5Bi:i+2%5D%0A jitdriver.set_user_param(jitarg)%0A break%0A%0A if len(argv) == 2:%0A execute(e, argv%5B1%5D)%0A if len(argv) %3E 2:%0A print %22too many arguments%22%0A return 2 %0A
21eba24ffedf90046540bd9d400dce60b84cfac6
Change where kwargs go
src/pybel/io/lines.py
src/pybel/io/lines.py
# -*- coding: utf-8 -*- """This module contains IO functions for BEL scripts""" import codecs import logging import os from .line_utils import parse_lines from ..struct import BELGraph from ..utils import download __all__ = [ 'from_lines', 'from_path', 'from_url' ] log = logging.getLogger(__name__) def from_lines(lines, manager=None, allow_naked_names=False, allow_nested=False, allow_unqualified_translocations=False, citation_clearing=True, no_identifier_validation=False, **kwargs): """Loads a BEL graph from an iterable over the lines of a BEL script :param iter[str] lines: An iterable of strings (the lines in a BEL script) :param manager: database connection string to cache, pre-built CacheManager, or None to use default cache :type manager: None or str or :class:`pybel.manager.CacheManager` :param bool allow_naked_names: if true, turn off naked namespace failures :param bool allow_nested: if true, turn off nested statement failures :param bool allow_unqualified_translocations: If true, allow translocations without TO and FROM clauses. :param bool citation_clearing: Should :code:`SET Citation` statements clear evidence and all annotations? Delegated to :class:`pybel.parser.ControlParser` :param dict kwargs: keyword arguments to pass to :class:`networkx.MultiDiGraph` :return: A BEL graph :rtype: BELGraph """ graph = BELGraph(**kwargs) parse_lines( graph=graph, lines=lines, manager=manager, allow_naked_names=allow_naked_names, allow_nested=allow_nested, allow_unqualified_translocations=allow_unqualified_translocations, citation_clearing=citation_clearing, no_identifier_validation=no_identifier_validation, ) return graph def from_path(path, manager=None, allow_naked_names=False, allow_nested=False, citation_clearing=True, no_identifier_validation=False, encoding='utf-8', **kwargs): """Loads a BEL graph from a file resource. This function is a thin wrapper around :func:`from_lines`. :param str path: A file path :param manager: database connection string to cache, pre-built CacheManager, or None to use default cache :type manager: None or str or :class:`pybel.manager.CacheManager` :param bool allow_naked_names: if true, turn off naked namespace failures :param bool allow_nested: if true, turn off nested statement failures :param bool citation_clearing: Should :code:`SET Citation` statements clear evidence and all annotations? Delegated to :class:`pybel.parser.ControlParser` :param str encoding: the encoding to use when reading this file. Is passed to :code:`codecs.open`. See the python `docs <https://docs.python.org/3/library/codecs.html#standard-encodings>`_ for a list of standard encodings. For example, files starting with a UTF-8 BOM should use :code:`utf_8_sig` :param dict kwargs: Keyword arguments to pass to :class:`networkx.MultiDiGraph` :return: A BEL graph :rtype: BELGraph """ log.info('Loading from path: %s', path) with codecs.open(os.path.expanduser(path), encoding=encoding) as file: return from_lines( lines=file, manager=manager, allow_naked_names=allow_naked_names, allow_nested=allow_nested, citation_clearing=citation_clearing, no_identifier_validation=no_identifier_validation, **kwargs ) def from_url(url, manager=None, allow_naked_names=False, allow_nested=False, citation_clearing=True, **kwargs): """Loads a BEL graph from a URL resource. This function is a thin wrapper around :func:`from_lines`. :param str url: A valid URL pointing to a BEL resource :param manager: database connection string to cache, pre-built CacheManager, or None to use default cache :type manager: None or str or :class:`pybel.manager.CacheManager` :param bool allow_naked_names: if true, turn off naked namespace failures :param bool allow_nested: if true, turn off nested statement failures :param bool citation_clearing: Should :code:`SET Citation` statements clear evidence and all annotations? Delegated to :class:`pybel.parser.ControlParser` :param dict kwargs: Keyword arguments to pass to :class:`networkx.MultiDiGraph` :return: A BEL graph :rtype: BELGraph """ log.info('Loading from url: %s', url) res = download(url) lines = (line.decode('utf-8') for line in res.iter_lines()) return from_lines( lines=lines, manager=manager, allow_naked_names=allow_naked_names, allow_nested=allow_nested, citation_clearing=citation_clearing, **kwargs )
Python
0.000015
@@ -1349,45 +1349,46 @@ to -pass to :class:%60networkx.MultiDiGraph +:func:%60pybel.io.line_utils.parse_lines %60%0A @@ -1460,24 +1460,16 @@ ELGraph( -**kwargs )%0A pa @@ -1801,24 +1801,41 @@ validation,%0A + **kwargs%0A )%0A re
5087e961e45ed9c8cdb168da3ba480ee65a43644
Allow setting ES config, and convert results to utf-8 if needed
nlpipe/backend.py
nlpipe/backend.py
""" Useful functions for communication with elastic NLPipe assumes that raw texts are stored in an elastic index (see esconfig). If multiple fields are specified, and/or a field contains multiple results, they are joined with empty lines in between (e.g. "\n\n".join) Results are stored in a separate document type per module (version), and are assumed to have the form: {id: <id>, pipeline: [{module: <module>, version: <version>, input_type: <doctype>, input_fields: [<fields>] (raw input only), begin_time: <time>, end_time: <time>}] result: <result>} """ import datetime from elasticsearch import Elasticsearch from elasticsearch.helpers import scan from . import esconfig from .document import Document _es = Elasticsearch([{"host": esconfig.ES_HOST, "port": esconfig.ES_PORT}]) _CHECKED_MAPPINGS = set() def _check_mapping(doc_type): if doc_type not in _CHECKED_MAPPINGS: index = esconfig.ES_RESULT_INDEX if not _es.indices.exists_type(index=index, doc_type=doc_type): mapping = {doc_type: esconfig.ES_MAPPING} if not _es.indices.exists(index): _es.indices.create(index) _es.indices.put_mapping(index=index, doc_type=doc_type, body=mapping) _CHECKED_MAPPINGS.add(doc_type) def get_input(id): input_type = esconfig.ES_INPUT_DOCTYPE, input_fields = esconfig.ES_INPUT_FIELDS res = _es.get(index=esconfig.ES_INPUT_INDEX, doc_type=input_type, id=id, fields=input_fields) fields = res['fields'] text = "\n\n".join("\n\n".join(fields[f]) for f in input_fields if f in fields) return Document(id, [], text, input_type, input_fields) def get_input_ids(query, limit=None): """Get the ids of existing input documents that match a query""" docs = scan(_es, index=esconfig.ES_INPUT_INDEX, doc_type=esconfig.ES_INPUT_DOCTYPE, query=query, size=(limit or 1000), fields="") for i, a in enumerate(docs): if limit and i >= limit: return yield a['_id'] def get_cached_documents(ids, doc_type): res = _es.mget(index=esconfig.ES_RESULT_INDEX, doc_type=doc_type, body={"ids": ids}) for doc in res['docs']: if doc['found']: d = Document(doc['_id'], doc['_source']['pipeline'], doc['_source']['result'], doc_type) yield d.id, d def get_document(id, doc_type): res= _es.get(index=esconfig.ES_RESULT_INDEX, doc_type=doc_type, id=id) return Document(id, res['_source']['pipeline'], res['_source']['result'], doc_type) def store_result(doc_type, id, pipeline, result): _check_mapping(doc_type) body = dict(id=id, pipeline=pipeline, result=result) _es.index(index=esconfig.ES_RESULT_INDEX, doc_type=doc_type, body = body, id = id) def exists(doc_type, id): return _es.exists(index=esconfig.ES_RESULT_INDEX, doc_type=doc_type, id=id) def count_cached(ids): body = {'query': {u'filtered': {u'filter': {'ids': {u'values': ids}}}}, 'aggregations': {u'aggregation': {u'terms': {u'field': u'_type'}}}} res = _es.search(index=esconfig.ES_RESULT_INDEX, body=body, size=0) for bucket in res['aggregations']['aggregation']['buckets']: yield bucket['key'], bucket['doc_count'] def get_cached_document_ids(ids, doc_type): """Get the ids of documents that have been parsed with this doc_type""" res = _es.mget(index=esconfig.ES_RESULT_INDEX, doc_type=doc_type, body={"ids": ids}, _source=False) for doc in res['docs']: if doc['found']: yield doc['_id']
Python
0
@@ -838,16 +838,170 @@ set()%0A%0A +def set_esconfig(host, port):%0A %22%22%22%0A Set the (global) es config%0A %22%22%22%0A global _es%0A _es = Elasticsearch(%5B%7B%22host%22: host, %22port%22: int(port)%7D%5D)%0A%0A def _che @@ -2864,24 +2864,142 @@ g(doc_type)%0A + if isinstance(result, bytes):%0A # elastic wants 'text', not 'bytes'%0A result = result.decode(%22utf-8%22)%0A body = d
3f1f86c358efc6d38012191c4b613aa775861805
Fix 'graph3d.py' to read from VTKData directory
Examples/Infovis/Python/graph3d.py
Examples/Infovis/Python/graph3d.py
from vtk import * reader = vtkXGMLReader() reader.SetFileName("fsm.gml") reader.Update() strategy = vtkSpanTreeLayoutStrategy() strategy.DepthFirstSpanningTreeOn() view = vtkGraphLayoutView() view.AddRepresentationFromInputConnection(reader.GetOutputPort()) view.SetVertexLabelArrayName("vertex id") view.SetVertexLabelVisibility(True) view.SetVertexColorArrayName("vertex id") view.SetColorVertices(True) view.SetLayoutStrategy( strategy ) view.SetInteractionModeTo3D() # Left mouse button causes 3D rotate instead of zoom theme = vtkViewTheme.CreateMellowTheme() theme.SetCellColor(.2,.2,.6) theme.SetLineWidth(2) theme.SetPointSize(10) view.ApplyViewTheme(theme) theme.FastDelete() view.GetRenderWindow().SetSize(600, 600) view.ResetCamera() view.Render() #Here's the window with David's original layout methodology # Aside from the theme elements in the view above, the notable # difference between the two views is the angling on the edges. layout = vtkGraphLayout() layout.SetLayoutStrategy(strategy) layout.SetInputConnection(reader.GetOutputPort()) edge_geom = vtkGraphToPolyData() edge_geom.SetInputConnection(layout.GetOutputPort()) vertex_geom = vtkGraphToPoints() vertex_geom.SetInputConnection(layout.GetOutputPort()) # Vertex pipeline - mark each vertex with a cube glyph cube = vtkCubeSource() cube.SetXLength(0.3) cube.SetYLength(0.3) cube.SetZLength(0.3) glyph = vtkGlyph3D() glyph.SetInputConnection(vertex_geom.GetOutputPort()) glyph.SetSourceConnection(0, cube.GetOutputPort()) gmap = vtkPolyDataMapper() gmap.SetInputConnection(glyph.GetOutputPort()) gact = vtkActor() gact.SetMapper(gmap) gact.GetProperty().SetColor(0,0,1) # Edge pipeline - map edges to lines mapper = vtkPolyDataMapper() mapper.SetInputConnection(edge_geom.GetOutputPort()) actor = vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetColor(0.4,0.4,0.6) # Renderer, window, and interaction ren = vtkRenderer() ren.AddActor(actor) ren.AddActor(gact) ren.ResetCamera() renWin = vtkRenderWindow() renWin.AddRenderer(ren) renWin.SetSize(800,550) iren = vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) iren.Initialize() #iren.Start() view.GetInteractor().Start()
Python
0
@@ -10,16 +10,90 @@ import * +%0Afrom vtk.util.misc import vtkGetDataRoot%0AVTK_DATA_ROOT = vtkGetDataRoot() %0A%0Areader @@ -130,17 +130,47 @@ ileName( -%22 +VTK_DATA_ROOT + %22/Data/Infovis/ fsm.gml%22
0cd2af0f20b6b544f0d36140a098ca8e3058d8fa
Update constants
node/constants.py
node/constants.py
######### KADEMLIA CONSTANTS ########### #: Small number Representing the degree of parallelism in network calls alpha = 3 #: Maximum number of contacts stored in a bucket; this should be an even number k = 8 # Delay between iterations of iterative node lookups (for loose parallelism) (in seconds) iterativeLookupDelay = rpcTimeout / 2 #: If a k-bucket has not been used for this amount of time, refresh it (in seconds) refreshTimeout = 3600 # 1 hour #: The interval at which nodes replicate (republish/refresh) data they are holding replicateInterval = refreshTimeout # The time it takes for data to expire in the network; the original publisher of the data # will also republish the data at this time if it is still valid dataExpireTimeout = 86400 # 24 hours ######## IMPLEMENTATION-SPECIFIC CONSTANTS ########### #: The interval in which the node should check its whether any buckets need refreshing, #: or whether any data needs to be republished (in seconds) checkRefreshInterval = refreshTimeout/5
Python
0.000001
@@ -205,16 +205,79 @@ %0Ak = 8%0A%0A +#: Timeout for network operations (in seconds)%0ArpcTimeout = 5%0A%0A # Delay @@ -1068,8 +1068,174 @@ meout/5%0A +%0A#: Max size of a single UDP datagram, in bytes. If a message is larger than this, it will%0A#: be spread accross several UDP packets.%0AudpDatagramMaxSize = 8192 # 8 KB%0A
8765ac953047ba1c63eb2eb2eb087ba92e9213bc
fix switch template
Firefly/core/templates/__init__.py
Firefly/core/templates/__init__.py
# -*- coding: utf-8 -*- # @Author: Zachary Priddy # @Date: 2016-04-12 13:33:30 # @Last Modified by: Zachary Priddy # @Last Modified time: 2016-04-12 13:33:30 class Templates(object): def __init__(self): self._filepath = 'core/templates/' self._switch_template = self.get_template('switch') def get_template(self, template): with open('%s%s.html' % (self._filepath, template)) as template_file: return template_file.read().replace('\n', '') @property def switch(self): """ Builds a switch template from switch.html. Returns: template (str): string of switch template """ return self._switch ffTemplates = Templates()
Python
0.000001
@@ -643,16 +643,25 @@ ._switch +_template %0A%0A%0AffTem
85fe9b8b48b565488406343de41fa77b41357e4a
define skip
ooiservices/tests/test_models.py
ooiservices/tests/test_models.py
#!/usr/bin/env python ''' unit testing for the model classes. ''' __author__ = 'M@Campbell' import unittest from flask import url_for from ooiservices.app import create_app, db from ooiservices.app.models import Array, InstrumentDeployment, PlatformDeployment, Stream, \ StreamParameter, User, OperatorEvent, OperatorEventType, Organization ''' These tests are additional to the normal testing performed by coverage; each of these tests are to validate model logic outside of db management. ''' class ModelTestCase(unittest.TestCase): def setUp(self): self.app = create_app('TESTING_CONFIG') self.app_context = self.app.app_context() self.app_context.push() db.create_all() self.client = self.app.test_client(use_cookies=False) Organization.insert_org() def tearDown(self): db.session.remove() db.drop_all() self.app_context.pop() def test_array(self): #Test the json in the object array = Array() self.assertTrue(array.to_json() == {'id': None, 'array_code': None, \ 'array_name': None, 'description': None, 'display_name': None, \ 'geo_location': None}) def test_platform_deployment(self): #Test the json in the object platform_deployment = PlatformDeployment() self.assertTrue(platform_deployment.to_json() == {'id': None, \ 'array_id': None, 'display_name': None, 'end_date': None, \ 'geo_location': None, 'reference_designator': None, 'start_date': None}) def test_instrument_deployment(self): #Test the json in the object instrument_deployment = InstrumentDeployment() should_be = { 'id' :None, 'depth': None, 'display_name' : None, 'end_date' : None, 'geo_location': None, 'platform_deployment_id' : None, 'reference_designator' : None, 'start_date' : None } self.assertEquals(instrument_deployment.to_json() , should_be) @skipIf(os.getenv('TRAVIS'), 'Skip if testing from Travis CI.') def test_stream(self): #Test the json in the object stream = Stream() self.assertTrue(stream.to_json() == {'id': None, 'description': None, \ 'instrument_id': None, 'stream_name': None}) def test_parameter(self): #Test the json in the object stream_param = StreamParameter() self.assertTrue(stream_param.to_json() == {'id': None, 'data_type': None, \ 'long_name': None, 'parameter_name': None, 'short_name': None, \ 'standard_name': None, 'units': None}) def test_user(self): #Test the json in the object user = User() self.assertEquals(user.to_json(), { 'email': None, 'id': None, 'user_id': None, 'active':None, 'first_name': None, 'last_name' : None, 'organization_id' : None, 'phone_alternate' : None, 'phone_primary' : None, 'scopes' : [], 'role' : None, 'user_name': None, 'email_opt_in': None}) def test_operator_event_type(self): #Test the json in the object operator_event_type = OperatorEventType() self.assertTrue(operator_event_type.to_json() == {'id': None, 'type_name': None, 'type_description': None}) def test_geometry(self): platform_deployment = PlatformDeployment() platform_deployment.reference_designator = 'TEST0000' platform_deployment.geo_location = 'POINT(-70 40)' db.session.add(platform_deployment) db.session.commit() pd = PlatformDeployment.query.filter(PlatformDeployment.reference_designator=='TEST0000').first() self.assertEquals(pd.geojson, {'coordinates': [-70, 40], 'type': 'Point'})
Python
0.000207
@@ -337,16 +337,44 @@ zation%0A%0A +from unittest import skipIf%0A '''%0AThes
b87b63db7e99a867ec163574ce1d13ab13285f36
Remove minor troubleshooting accidently committed
ironic/cmd/status.py
ironic/cmd/status.py
# Copyright (c) 2018 NEC, Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_config import cfg from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy import utils from oslo_upgradecheck import common_checks from oslo_upgradecheck import upgradecheck import sqlalchemy from ironic.cmd import dbsync from ironic.common.i18n import _ from ironic.common import policy # noqa importing to load policy config. import ironic.conf CONF = ironic.conf.CONF class Checks(upgradecheck.UpgradeCommands): """Upgrade checks for the ironic-status upgrade check command Upgrade checks should be added as separate methods in this class and added to _upgrade_checks tuple. """ def _check_obj_versions(self): """Check that the DB versions of objects are compatible. Checks that the object versions are compatible with this release of ironic. It does this by comparing the objects' .version field in the database, with the expected versions of these objects. """ try: # NOTE(TheJulia): Seems an exception is raised by sqlalchemy # when a table is missing, so lets catch it, since it is fatal. msg = dbsync.DBCommand().check_obj_versions( ignore_missing_tables=True) except sqlalchemy.exc.NoSuchTableError as e: msg = ('Database table missing. Please ensure you have ' 'updated the database schema. Not Found: %s' % e) return upgradecheck.Result(upgradecheck.Code.FAILURE, details=msg) if not msg: return upgradecheck.Result(upgradecheck.Code.SUCCESS) else: return upgradecheck.Result(upgradecheck.Code.FAILURE, details=msg) def _check_db_indexes(self): """Check if indexes exist on heavily used columns. Checks the database to see if indexes exist on heavily used columns and provide guidance of action that can be taken to improve ironic database performance. """ engine = enginefacade.reader.get_engine() indexes = [ ('nodes', 'reservation_idx'), ('nodes', 'driver_idx'), ('nodes', 'provision_state_idx'), ('nodes', 'conductor_group_idx'), ('nodes', 'resource_class_idx'), ('nodes', 'reservation_idx'), ('nodes', 'owner_idx'), ('nodes', 'lessee_idx'), ] missing_indexes = [] for table, idx in indexes: if not utils.index_exists(engine, table, idx): missing_indexes.append(idx) if missing_indexes: idx_list = ', '.join(missing_indexes) msg = ('Indexes missing for ideal database performance. Please ' 'consult https://docs.openstack.org/ironic/latest/admin/' 'tuning.html for information on indexes. Missing: %s' % idx_list) return upgradecheck.Result(upgradecheck.Code.WARNING, details=msg) else: return upgradecheck.Result(upgradecheck.Code.SUCCESS) def _check_allocations_table(self): msg = None engine = enginefacade.reader.get_engine() if 'mysql' not in str(engine.url): # This test only applies to mysql and database schema # selection. return upgradecheck.Result(upgradecheck.Code.SUCCESS) res = engine.execute("show create table allocations") results = str(res.all()).lower() print('####################################################33') print(results) if 'utf8' not in results: msg = ('The Allocations table is is not using UTF8 encoding. ' 'This is corrected in later versions of Ironic, where ' 'the table character set schema is automatically ' 'migrated. Continued use of a non-UTF8 character ' 'set may produce unexpected results.') if 'innodb' not in results: warning = ('The engine used by MySQL for the allocations ' 'table is not the intended engine for the Ironic ' 'database tables to use. This may have been a result ' 'of an error with the table creation schema. This ' 'may require Database Administrator intervention ' 'and downtime to dump, modify the table engine to ' 'utilize InnoDB, and reload the allocations table to ' 'utilize the InnoDB engine.') if msg: msg = msg + ' Additionally: ' + warning else: msg = warning if msg: return upgradecheck.Result(upgradecheck.Code.WARNING, details=msg) else: return upgradecheck.Result(upgradecheck.Code.SUCCESS) # A tuple of check tuples of (<name of check>, <check function>). # The name of the check will be used in the output of this command. # The check function takes no arguments and returns an # oslo_upgradecheck.upgradecheck.Result object with the appropriate # oslo_upgradecheck.upgradecheck.Code and details set. If the # check function hits warnings or failures then those should be stored # in the returned Result's "details" attribute. The # summary will be rolled up at the end of the check() method. _upgrade_checks = ( (_('Object versions'), _check_obj_versions), (_('Database Index Status'), _check_db_indexes), (_('Allocations Name Field Length Check'), _check_allocations_table), # Victoria -> Wallaby migration (_('Policy File JSON to YAML Migration'), (common_checks.check_policy_json, {'conf': CONF})), ) def main(): return upgradecheck.main( cfg.CONF, project='ironic', upgrade_command=Checks()) if __name__ == '__main__': sys.exit(main())
Python
0.00002
@@ -4069,103 +4069,8 @@ r()%0A - print('####################################################33')%0A print(results)%0A
03f9fe8ad1d52f15808e46440b14f9e9ae887448
Install EPEL into non-Fedora containers
container/generate-dockerfile.py
container/generate-dockerfile.py
#!/usr/bin/python import argparse labels = [ ("com.redhat.component", "openscap-docker"), ("name", "openscap"), ("version", "testing"), ("architecture", "x86_64"), ("summary", "OpenSCAP container image that provides security/compliance scanning capabilities for 'atomic scan'"), ("description", "OpenSCAP is an auditing tool that utilizes the Extensible Configuration Checklist Description Format (XCCDF). XCCDF is a standard way of expressing checklist content and defines security checklists."), ("io.k8s.display-name", "OpenSCAP"), ("io.k8s.description", "OpenSCAP is an auditing tool that utilizes the Extensible Configuration Checklist Description Format (XCCDF). XCCDF is a standard way of expressing checklist content and defines security checklists."), ("io.openshift.tags", "security openscap scan"), ("install", "docker run --rm --privileged -v /:/host/ IMAGE sh /root/install.sh"), ("run", "docker run -it --rm -v /:/host/ IMAGE sh /root/run.sh") ] packages = [ "bzip2", "wget" ] files = [ ("install.sh", "/root"), ("run.sh", "/root"), ("openscap", "/root"), ("config.ini", "/root") ] env_variables = [ ("container", "docker") ] install_commands = { "fedora": "dnf", "rhel": "yum" } builddep_packages = { "fedora" : "'dnf-command(builddep)'", "rhel" : "yum-utils" } builddep_commands = { "fedora": "dnf -y builddep", "rhel": "yum-builddep -y" } download_cve_feeds_command = [ "wget --no-verbose -P /var/lib/oscapd/cve_feeds/ " "https://www.redhat.com/security/data/oval/com.redhat.rhsa-RHEL{5,6,7}.xml.bz2", "bzip2 -dk /var/lib/oscapd/cve_feeds/com.redhat.rhsa-RHEL{5,6,7}.xml.bz2", "ln -s /var/lib/oscapd/cve_feeds/ /var/tmp/image-scanner" ] openscap_build_command = [ "git clone -b maint-1.2 https://github.com/OpenSCAP/openscap.git", "pushd /openscap", "./autogen.sh", "./configure --enable-sce --prefix=/usr", "make -j 4 install", "popd" ] ssg_build_command = [ "git clone https://github.com/OpenSCAP/scap-security-guide.git", "pushd /scap-security-guide/build", "cmake -DCMAKE_INSTALL_DATADIR=/usr/share ..", "make -j 4 install", "popd" ] daemon_build_command = [ "git clone https://github.com/OpenSCAP/openscap-daemon.git", "pushd /openscap-daemon", "python setup.py install", "popd" ] delim = " && \\\n " def main(): parser = argparse.ArgumentParser(description="Builds an image with OpenSCAP Daemon") parser.add_argument("--base", type=str, default="fedora", help="Base image name") parser.add_argument("--openscap-from-git", action="store_true", default=False, help="Use OpenSCAP from upstream instead of package") parser.add_argument("--ssg-from-git", action="store_true", default=False, help="Use SCAP Security Guide from upstream instead of package") parser.add_argument("--daemon-from-git", action="store_true", default=False, help="Use OpenSCAP Daemon from upstream instead of package") args = parser.parse_args() f = open("Dockerfile", "w") install_command = install_commands.get(args.base, install_commands["rhel"]) builddep_package = builddep_packages.get(args.base, builddep_packages["rhel"]) builddep_command = builddep_commands.get(args.base, builddep_commands["rhel"]) # write out the Dockerfile f.write("FROM " + args.base + "\n\n") # add labels for name, value in labels: f.write("LABEL " + name + '="' + value + '"\n') f.write("\n") # add environment variables for var, val in env_variables: f.write("ENV " + var + " " + val + "\n") f.write("\n") build_from_source = [] build_commands = [] # OpenSCAP if args.openscap_from_git: packages.extend(["git", "libtool", "automake"]) build_from_source.append("openscap") build_commands.append(openscap_build_command) else: packages.append("openscap-utils") # SCAP Security Guide if args.ssg_from_git: packages.append("git") build_from_source.append("scap-security-guide") build_commands.append(ssg_build_command) else: packages.append("scap-security-guide") # OpenSCAP Daemon if args.daemon_from_git: packages.append("git") build_from_source.append("openscap-daemon") build_commands.append(daemon_build_command) else: packages.append("openscap-daemon") # inject files for file, path in files: f.write("ADD " + file + " " + path + "\n") f.write("\n") if build_from_source: packages.append(builddep_package) # add a command to install packages f.write("RUN " + install_command + " -y install " + " ".join(set(packages)) + "\n\n") if build_from_source: # install build dependencies f.write("RUN " + builddep_command + " " + " ".join(build_from_source) + "\n\n") # clean package manager cache f.write("RUN " + install_command + " clean all\n\n") if build_from_source: # add commands for building from custom sources for cmd in build_commands: f.write("RUN " + delim.join(cmd) + "\n\n") # add RUN instruction that will download CVE feeds f.write("RUN " + delim.join(download_cve_feeds_command) + "\n\n") # add CMD instruction to the Dockerfile, including a comment f.write("# It doesn't matter what is in the line below, atomic will change the CMD\n") f.write("# before running it\n") f.write('CMD ["/root/run.sh"]\n') f.close() if __name__ == "__main__": main()
Python
0
@@ -4699,16 +4699,154 @@ ckage)%0A%0A + if args.base != %22fedora%22:%0A f.write(%22RUN rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm%5Cn%5Cn%22)%0A%0A # ad
38de795103748ca757a03a62da8ef3d89b0bf682
Fix bug that prevent commands with no values from being added
GoProController/models.py
GoProController/models.py
from django.db import models class Camera(models.Model): ssid = models.CharField(max_length=255) password = models.CharField(max_length=255) date_added = models.DateTimeField(auto_now_add=True) last_attempt = models.DateTimeField(auto_now=True) last_update = models.DateTimeField(null=True, blank=True) image_last_update = models.DateTimeField(null=True, blank=True) image = models.TextField(blank=True) summary = models.TextField(blank=True) status = models.TextField(blank=True) connection_attempts = models.IntegerField(default=0) connection_failures = models.IntegerField(default=0) def __unicode__(self): return self.ssid class Command(models.Model): camera = models.ForeignKey(Camera) command = models.CharField(max_length=255) value = models.CharField(max_length=255) date_added = models.DateTimeField(auto_now_add=True) time_completed = models.DateTimeField(null=True, blank=True) def __unicode__(self): return self.camera.__unicode__() + ' > ' + self.command
Python
0
@@ -817,32 +817,44 @@ odels.CharField( +blank=True, max_length=255)%0A
e1ad05fb19577aa108b94ea500106e36b29915fc
update indentation
amount_raised_by_candidate.py
amount_raised_by_candidate.py
# Written by Jonathan Saewitz, released May 24th, 2016 for Statisti.ca # Released under the MIT License (https://opensource.org/licenses/MIT) import csv, plotly.plotly as plotly, plotly.graph_objs as go, requests from bs4 import BeautifulSoup candidates=[] with open('presidential_candidates.csv', 'r') as f: reader=csv.reader(f) reader.next() #skip the headers row for row in reader: #loop through the candidates c_id=row[15] #row[15] is the candidate's FEC id html=requests.get('https://beta.fec.gov/data/candidate/' + c_id).text #get the candidate's FEC page b=BeautifulSoup(html, 'html.parser') if len(b.find_all(class_='t-big-data'))==0: #if this class isn't found on the candidate's FEC page, #the candidate raised $0 amt=0.0 else: amt=float(b.find_all(class_="t-big-data")[0].text.strip().replace("$", "").replace(",", "")) #class "t-big-data" contains the money data #the 0th element contains the total receipts #.text gets only the text (i.e. amount raised) #.strip() removes all whitespace #.replace("$", "") removes the dollar sign #.replace(",", "") removes all commas #we should be left with the total amount raised in the form 0.00 name=row[14] #row[14] is the candidate's name candidates.append({'name': name, 'amount': amt}) candidates=sorted(candidates, key=lambda k: k['amount']) #sort the candidates by amount raised trace=go.Bar( x=[candidate['name'] for candidate in candidates], y=[candidate['amount'] for candidate in candidates] ) layout=go.Layout( title="Presidential Candidates by Money Raised", xaxis=dict( title="Candidates", ), yaxis=dict( title="Amount raised ($)", ) ) data=[trace] fig=dict(data=data, layout=layout) plotly.plot(fig)
Python
0.000001
@@ -713,22 +713,20 @@ %0A%09%09%09%09%09%09%09 -%09%09%09%09%09%09 + #the can
0617c9a8bc320e0919f6dbf231b187c1d299525c
Include target variance in table.
analysis/process-movements.py
analysis/process-movements.py
#!/usr/bin/env python import climate import itertools import joblib import lmj.pca import os import pandas as pd import random import database logging = climate.get_logger('compress') MARKERS = [ 'marker00-r-head-back', 'marker01-r-head-front', 'marker02-l-head-front', 'marker03-l-head-back', 'marker06-r-collar', 'marker07-r-shoulder', 'marker08-r-elbow', 'marker09-r-wrist', 'marker11-r-fing-ring', 'marker12-r-fing-middle', 'marker13-r-fing-index', 'marker14-r-mc-outer', 'marker15-r-mc-inner', 'marker16-r-thumb-base', 'marker17-r-thumb-tip', 'marker18-l-collar', 'marker19-l-shoulder', 'marker20-l-elbow', 'marker21-l-wrist', 'marker22-l-fing-pinky', 'marker23-l-fing-ring', 'marker24-l-fing-middle', 'marker25-l-fing-index', 'marker26-l-mc-outer', 'marker27-l-mc-inner', 'marker28-l-thumb-base', 'marker29-l-thumb-tip', 'marker30-abdomen', 'marker31-sternum', 'marker32-t3', 'marker33-t9', 'marker34-l-ilium', 'marker35-r-ilium', 'marker36-r-hip', 'marker37-r-knee', 'marker38-r-shin', 'marker39-r-ankle', 'marker40-r-heel', 'marker41-r-mt-outer', 'marker42-r-mt-inner', 'marker43-l-hip', 'marker44-l-knee', 'marker45-l-shin', 'marker46-l-ankle', 'marker47-l-heel', 'marker48-l-mt-outer', 'marker49-l-mt-inner', ] COLUMNS = ['{}-{}'.format(m, c) for m in MARKERS for c in 'xyz'] def compress(trial, output, variance=0.995): trial.load() trial.mask_fiddly_target_frames() #trial.df.dropna(thresh=len(COLUMNS), inplace=True) init = [c for c in trial.columns if c[:6] in ('source', 'target')] out = pd.DataFrame(trial.df[init], index=trial.df.index) def p(w): return os.path.join(output, 'pca-{}-relative.npz'.format(w)) # encode body-relative data. body = database.Trial(trial.parent, trial.basename) body.df = trial.df.copy() body.make_body_relative() body_pcs = 0 pca = lmj.pca.PCA(filename=p('body')) for i, v in enumerate(pca.encode(body.df[COLUMNS].values, retain=variance).T): out['body-pc{:02d}'.format(i)] = pd.Series(v, index=trial.df.index) body_pcs = i # encode goal-relative data. goal = database.Trial(trial.parent, trial.basename) goal.df = trial.df.copy() goal.make_target_relative() goal_pcs = 0 pca = lmj.pca.PCA(filename=p('goal')) for i, v in enumerate(pca.encode(goal.df[COLUMNS].values, retain=variance).T): out['goal-pc{:02d}'.format(i)] = pd.Series(v, index=trial.df.index) goal_pcs = i # add columns for the jacobian. for bpc in range(body_pcs): db = out['body-pc{:02d}'.format(bpc)].diff() db[db == 0] = float('nan') for gpc in range(goal_pcs): dg = out['goal-pc{:02d}'.format(gpc)].diff() dg[dg == 0] = float('nan') out['jac-fwd-{:02d}/{:02d}'.format(gpc, bpc)] = dg / db out['jac-inv-{:02d}/{:02d}'.format(bpc, gpc)] = db / dg trial.df = out[sorted(out.columns)] trial.save(trial.root.replace(trial.experiment.root, output)) @climate.annotate( root='load data files from this directory tree', output='save encoded data to this directory tree', pattern='process trials matching this pattern', variance=('retain this fraction of variance', 'option', None, float), ) def main(root, output, pattern='*', variance=0.99): trials = list(database.Experiment(root).trials_matching(pattern)) keys = [(t.block.key, t.key) for t in trials] # choose N trials per subject to compute the principal components. N = 2 pca_trials = [] for s, ts in itertools.groupby(trials, key=lambda t: t.subject.key): ts = list(ts) idx = list(range(len(ts))) random.shuffle(idx) for i in idx[:N]: pca_trials.append(ts[i]) ts[i].load() body = database.Movement(pd.concat([t.df for t in pca_trials])) body.make_body_relative() pca = lmj.pca.PCA() pca.fit(body.df[COLUMNS]) for v in (0.5, 0.8, 0.9, 0.95, 0.98, 0.99, 0.995, 0.998, 0.999): print('{:.1f}%: {} body components'.format(100 * v, pca.num_components(v))) pca.save(os.path.join(output, 'pca-body-relative.npz')) goal = database.Movement(pd.concat([t.df for t in pca_trials])) goal.make_target_relative() pca = lmj.pca.PCA() pca.fit(goal.df[COLUMNS]) for v in (0.5, 0.8, 0.9, 0.95, 0.98, 0.99, 0.995, 0.998, 0.999): print('{:.1f}%: {} goal components'.format(100 * v, pca.num_components(v))) pca.save(os.path.join(output, 'pca-goal-relative.npz')) joblib.Parallel(-1)(joblib.delayed(compress)(t, output, variance) for t in trials) if __name__ == '__main__': climate.call(main)
Python
0
@@ -3589,16 +3589,94 @@ trials%5D +%0A probes = (variance, 0.5, 0.8, 0.9, 0.95, 0.98, 0.99, 0.995, 0.998, 0.999) %0A%0A # @@ -3748,17 +3748,17 @@ N = -2 +3 %0A pca @@ -4187,62 +4187,14 @@ in -(0.5, 0.8, 0.9, 0.95, 0.98, 0.99, 0.995, 0.998, 0.999) +probes :%0A @@ -4508,62 +4508,14 @@ in -(0.5, 0.8, 0.9, 0.95, 0.98, 0.99, 0.995, 0.998, 0.999) +probes :%0A
e7163abf13e5cec78f3cd894bd3b8393f9cea6d2
Fix counting total samples in cast view.
genome_designer/main/data_util.py
genome_designer/main/data_util.py
""" Common methods for getting data from the backend. These methods are intended to be used by both views.py, which should define only pages, and xhr_handlers.py, which are intended to respond to AJAX requests. This module interacts closely with the ModelViews in model_views.py. """ from collections import defaultdict from django.db import connection from main.model_views import CastVariantView from main.model_views import MeltedVariantView from main.models import ExperimentSample from main.models import Variant from main.models import VariantEvidence from variants.common import dictfetchall from variants.materialized_variant_filter import get_variants_that_pass_filter class LookupVariantsResult(object): """Result of a call to lookup_variants. Attributes: result_list: List of cast or melted Variant objects. num_total_variants: Total number of variants that match query. For pagination. """ def __init__(self, result_list, num_total_variants): self.result_list = result_list self.num_total_variants = num_total_variants def lookup_variants(reference_genome, combined_filter_string, is_melted, pagination_start, pagination_len): """Manages the end-to-end flow of looking up Variants that match the given filter. This function delegates to the variant_filter module to get the list of variants matching the filter. Then, this function takes those results and handles casting them to appropriate view-type objects (e.g. Melted vs Cast). Returns: LookupVariantsResult object. """ # First get the Variants that pass the filter. filter_eval_result = get_variants_that_pass_filter(combined_filter_string, reference_genome) result_list = list(filter_eval_result.variant_set) # If this is a melted view, return results as they are. if is_melted: # TODO: Handle pagination. page_results = result_list[pagination_start : pagination_start + pagination_len] num_total_variants = 1000000 return LookupVariantsResult(page_results, num_total_variants) # Otherwise, we need to Cast the results. page_results = cast_joined_variant_objects(result_list) page_results = page_results[pagination_start : pagination_start + pagination_len] num_total_variants = 1000000 return LookupVariantsResult(page_results, num_total_variants) def cast_joined_variant_objects(melted_variant_list): """Converts the list of melted variants into a cast representation. This means returning one row per variant, compressing other columns into an aggregate representation. For example, the 'experiment_sample_uid' column becomes the 'total_samples'. """ cast_obj_list = [] # First, we build a structure from variant id to list of result rows. variant_id_to_result_row = defaultdict(list) for result in melted_variant_list: variant_id_to_result_row[result['id']].append(result) for variant_id, result_row_list in variant_id_to_result_row.iteritems(): assert len(result_row_list), "Not expected. Debug." position = result_row_list[0]['position'] ref = result_row_list[0]['ref'] uid = result_row_list[0]['uid'] total_samples = len(result_row_list) cast_obj_list.append({ 'id': variant_id, 'uid': uid, 'position': position, 'ref': ref, 'alt': 'TODO', 'total_samples': total_samples }) return cast_obj_list
Python
0
@@ -2636,14 +2636,8 @@ ing -other colu @@ -2690,16 +2690,52 @@ example, + in this initial%0A implementation, the 'ex @@ -2754,20 +2754,16 @@ ple_uid' -%0A column @@ -2770,20 +2770,16 @@ becomes -the 'total_s @@ -3315,52 +3315,182 @@ d'%5D%0A - total_samples = len(result_row_list) +%0A # Count total samples.%0A total_samples = 0%0A for row in result_row_list:%0A if row%5B'experiment_sample_uid'%5D:%0A total_samples += 1%0A %0A
928cb2244de7a167a754f3cf303a913d814afc66
Use C json encoder
openfisca_web_api/wsgihelpers.py
openfisca_web_api/wsgihelpers.py
# -*- coding: utf-8 -*- # OpenFisca -- A versatile microsimulation software # By: OpenFisca Team <contact@openfisca.fr> # # Copyright (C) 2011, 2012, 2013, 2014 OpenFisca Team # https://github.com/openfisca # # This file is part of OpenFisca. # # OpenFisca is free software; you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # OpenFisca is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """Decorators to wrap functions to make them WSGI applications. The main decorator :class:`wsgify` turns a function into a WSGI application. """ import collections import json import webob.dec import webob.exc N_ = lambda message: message errors_title = { 400: N_("Unable to Access"), 401: N_("Access Denied"), 403: N_("Access Denied"), 404: N_("Unable to Access"), } wsgify = webob.dec.wsgify def handle_cross_origin_resource_sharing(ctx): # Cf http://www.w3.org/TR/cors/#resource-processing-model environ = ctx.req.environ headers = [] origin = environ.get('HTTP_ORIGIN') if origin is None: return headers if ctx.req.method == 'OPTIONS': method = environ.get('HTTP_ACCESS_CONTROL_REQUEST_METHOD') if method is None: return headers headers_name = environ.get('HTTP_ACCESS_CONTROL_REQUEST_HEADERS') or '' headers.append(('Access-Control-Allow-Credentials', 'true')) headers.append(('Access-Control-Allow-Origin', origin)) headers.append(('Access-Control-Max-Age', '3628800')) headers.append(('Access-Control-Allow-Methods', method)) headers.append(('Access-Control-Allow-Headers', headers_name)) raise webob.exc.status_map[204](headers = headers) # No Content headers.append(('Access-Control-Allow-Credentials', 'true')) headers.append(('Access-Control-Allow-Origin', origin)) headers.append(('Access-Control-Expose-Headers', 'WWW-Authenticate')) return headers def respond_json(ctx, data, code = None, headers = None, jsonp = None): """Return a JSON response. This function is optimized for JSON following `Google JSON Style Guide <http://google-styleguide.googlecode.com/svn/trunk/jsoncstyleguide.xml>`_, but will handle any JSON except for HTTP errors. """ if isinstance(data, collections.Mapping): # Remove null properties as recommended by Google JSON Style Guide. data = type(data)( (name, value) for name, value in data.iteritems() if value is not None ) error = data.get('error') if isinstance(error, collections.Mapping): error = data['error'] = type(error)( (name, value) for name, value in error.iteritems() if value is not None ) else: error = None if headers is None: headers = [] if jsonp: content_type = 'application/javascript; charset=utf-8' else: content_type = 'application/json; charset=utf-8' if error: code = code or error['code'] assert isinstance(code, int) response = webob.exc.status_map[code](headers = headers) response.content_type = content_type if code == 204: # No content return response if error.get('code') is None: error['code'] = code if error.get('message') is None: title = errors_title.get(code) title = ctx._(title) if title is not None else response.status error['message'] = title else: response = ctx.req.response response.content_type = content_type if code is not None: response.status = code response.headers.update(headers) try: text = json.dumps(data, encoding = 'utf-8', ensure_ascii = False, indent = 2) except UnicodeDecodeError: text = json.dumps(data, ensure_ascii = True, indent = 2) text = unicode(text) if jsonp: text = u'{0}({1})'.format(jsonp, text) response.text = text return response
Python
0.000002
@@ -4231,16 +4231,18 @@ ers)%0A + # try:%0A @@ -4238,24 +4238,26 @@ # try:%0A + # text = @@ -4330,16 +4330,18 @@ = 2)%0A + # except @@ -4359,24 +4359,26 @@ deError:%0A + # text = @@ -4423,24 +4423,52 @@ indent = 2)%0A + text = json.dumps(data)%0A text = u
a391da79f8213d26246234e489d0947b8b4b2a82
Update to allo no CSRF when logining in on mobile
OctaHomeCore/authviews.py
OctaHomeCore/authviews.py
from django.contrib.auth import authenticate, login, logout from OctaHomeCore.baseviews import * from OctaHomeCore.models import * class handleLoginView(viewRequestHandler): loginToken = '' def handleRequest(self): if self.Request.user.is_authenticated(): return super(handleLoginView, self).handleRequest() if self.Post.has_key('username') and self.Post.has_key('password'): user = authenticate(username=self.Post['username'], password=self.Post['password']) if user is not None and user.authy_id != "": self.loginToken = user.get_login_token() elif user is not None: login(self.Request, user) elif self.Post.has_key('authytoken') and self.Post.has_key('logintoken'): user = CustomUser().objects.authyCheck(self.Post['username'], self.Post['logintoken'], self.Post['authytoken']) if user is not None: login(self.Request, user) return super(handleLoginView, self).handleRequest() def getTemplate(self): if self.Request.user != None and self.Request.user.is_authenticated(): if self.Post.has_key('next') and self.Post['next'] != '': self.redirect(self.Post['next']) else: self.redirect(reverse('Home')) return '' if self.loginToken: return 'OctaHomeCore/pages/Account/AuthyLogin' else: return 'OctaHomeCore/pages/Account/Login' def getViewParameters(self): parameters = {} if self.Post.has_key('next'): parameters.update({ 'next':self.Post['next'] }) if self.loginToken != '' and self.Post.has_key('username'): parameters.update({ 'username':self.Post['username'], 'logintoken':self.loginToken }) return parameters def getSideBar(self): return [] def getSidebarUrlName(self): return '' def isPageSecured(self): return False class handleDeviceLoginView(viewRequestHandler): def handleRequest(self): if self.Request.user.is_authenticated(): return super(handleLoginView, self).handleRequest() if self.Post.has_key('loginToken'): loginItems = self.Post['loginToken'].split(",") if len(loginItems) == 2: device = DeviceUser.objects.get(pk=loginItems[0]) if device is not None and device.User is not None and device.checkToken(loginItems[1]): login(self.Request, device.User) return super(handleLoginView, self).handleRequest() def getTemplate(self): if self.Request.user != None and self.Request.user.is_authenticated(): if self.Post.has_key('next') and self.Post['next'] != '': self.redirect(self.Post['next']) else: self.redirect(reverse('Home')) return '' return 'OctaHomeCore/pages/Account/Login' def getViewParameters(self): parameters = {} if self.Post.has_key('next'): parameters.update({ 'next':self.Post['next'] }) if self.loginToken != '' and self.Post.has_key('username'): parameters.update({ 'username':self.Post['username'], 'logintoken':self.loginToken }) return parameters def getSideBar(self): return [] def getSidebarUrlName(self): return '' def isPageSecured(self): return False class handleLogOutView(viewRequestHandler): def handleRequest(self): logout(self.Request) return redirect(reverse('Home'))
Python
0
@@ -123,16 +123,69 @@ import * +%0Afrom django.views.decorators.csrf import csrf_exempt %0A%0Aclass @@ -1831,32 +1831,167 @@ equestHandler):%0A +%09@csrf_exempt%0A%09def post(self, request, *args, **kwargs):%0A%09%09return super(handleLoginView, self).post(self, request, *args, **kwargs)%0A%09%09%0A %09def handleReque
caff96633ce29a2139bc61bb5ee333efd69d50ef
Remove default classifier path from default config
processmysteps/default_config.py
processmysteps/default_config.py
""" Base line settings """ CONFIG = { 'input_path': None, 'backup_path': None, 'dest_path': None, 'life_all': None, 'db': { 'host': None, 'port': None, 'name': None, 'user': None, 'pass': None }, # 'preprocess': { # 'max_acc': 30.0 # }, 'smoothing': { 'use': True, 'algorithm': 'inverse', 'noise': 10 }, 'segmentation': { 'use': True, 'epsilon': 1.0, 'min_time': 80 }, 'simplification': { 'max_dist_error': 2.0, 'max_speed_error': 1.0, 'eps': 0.15 }, 'location': { 'max_distance': 20, 'min_samples': 2, 'limit': 5, 'google_key': '' }, 'transportation': { 'remove_stops': False, 'min_time': 60, 'classifier_path': 'classifier.data'# None }, 'trip_learning': { 'epsilon': 0.0, 'classifier_path': None, }, 'trip_name_format': '%Y-%m-%d' }
Python
0.000001
@@ -851,16 +851,21 @@ _path': +None# 'classif
2aaa2c6794176a9867d10d3a32a48ee5b8cef7f5
version 0.17.0
crontabber/__init__.py
crontabber/__init__.py
__version__ = '0.16.1'
Python
0.000001
@@ -15,9 +15,9 @@ '0.1 -6.1 +7.0 '%0A
22f9b4bacbb0662d3c4de67218ff43cea9588f66
Add keyword argument handling to unicode decorator
crypto_enigma/utils.py
crypto_enigma/utils.py
#!/usr/bin/env python # encoding: utf8 # Copyright (C) 2015 by Roy Levien. # This file is part of crypto-enigma, an Enigma Machine simulator. # released under the BSD-3 License (see LICENSE.txt). """ Description .. note:: Any additional note. """ from __future__ import (absolute_import, print_function, division, unicode_literals) import time import sys # TBD - Generalize to other platforms; test? def print_over(s, backup=True, delay=0.2): if backup: print('', end='\r') print("\033[F" * (s.count('\n')+2)) print(s) sys.stdout.flush() time.sleep(delay) def num_A0(c): return ord(c) - ord('A') def chr_A0(n): return chr(n + ord('A')) def ordering(items): return [i[1] for i in sorted(zip(items, range(0, len(items))))] # standard simple-substitution cypher encoding def encode_char(mapping, ch): if ch == ' ': return ' ' else: return mapping[num_A0(ch)] def encode_string(mapping, string): return ''.join([encode_char(mapping, ch) for ch in string]) # scan, because it's missing from Python; implemented to anticipate Python 3 def accumulate(l, f): it = iter(l) total = next(it) yield total for element in it: total = f(total, element) yield total # also missing from Python def chunk_of(it, n): return [it[i:i+n] for i in range(0, len(it), n)] # require unicode strings (see unicode_literal in enigma.py) - http://stackoverflow.com/a/33743668/656912 def require_unicode(*given_arg_names): def check_types(_func_, *args): def modified(*args): arg_names = list(_func_.func_code.co_varnames[:_func_.func_code.co_argcount]) if len(given_arg_names) == 0: unicode_arg_names = arg_names else: unicode_arg_names = given_arg_names for unicode_arg_name in unicode_arg_names: try: arg_index = arg_names.index(unicode_arg_name) except ValueError: raise NameError(unicode_arg_name) arg = args[arg_index] if not isinstance(arg, unicode): raise TypeError("Parameter '{}' should be Unicode".format(unicode_arg_name)) return _func_(*args) return modified return check_types
Python
0.000001
@@ -1435,18 +1435,20 @@ igma.py) +%0A# -- http:// @@ -1483,16 +1483,88 @@ /656912%0A +# http://code.activestate.com/recipes/454322-type-checking-decorator/%0A def requ @@ -1623,24 +1623,35 @@ func_, *args +, **kwargs ):%0A d @@ -1663,24 +1663,34 @@ dified(*args +, **kwargs ):%0A @@ -1850,32 +1850,79 @@ mes = arg_names%0A + #unicode_arg_names = arg_names%0A else @@ -2137,63 +2137,109 @@ -except ValueError:%0A raise NameError( + if len(args) %3E arg_index:%0A arg = args%5Barg_index%5D%0A elif unic @@ -2242,33 +2242,43 @@ unicode_arg_name -) + in kwargs: %0A @@ -2278,38 +2278,187 @@ + arg = +kw args%5B -arg_index +unicode_arg_name %5D%0A + else:%0A # Not given as argument, even though in list%0A continue%0A @@ -2498,16 +2498,20 @@ icode):%0A + @@ -2599,16 +2599,105 @@ _name))%0A + except ValueError:%0A raise NameError(unicode_arg_name)%0A @@ -2719,16 +2719,26 @@ c_(*args +, **kwargs )%0A
d8fc3888f0b40a8b7a476fc3fec0ca3dfe7a2416
make API able to work with single names
gender.py
gender.py
import requests, json def getGenders(names): url = "" cnt = 0 for name in names: if url == "": url = "name[0]=" + name else: cnt += 1 url = url + "&name[" + str(cnt) + "]=" + name req = requests.get("http://api.genderize.io?" + url) results = json.loads(req.text) retrn = [] for result in results: if result["gender"] is not None: retrn.append((result["gender"], result["probability"], result["count"])) else: retrn.append((u'None',u'0.0',0.0)) return retrn if __name__ == '__main__': print getGenders(["Brian","Apple","Jessica","Zaeem","NotAName"])
Python
0
@@ -58,16 +58,69 @@ cnt = 0%0A +%09if not isinstance(names,list):%0A%09%09names = %5Bnames,%5D%0A%09%0A %09for nam @@ -131,16 +131,16 @@ names:%0A - %09%09if url @@ -333,16 +333,61 @@ q.text)%0A +%09if len(names)==1 :%0A%09%09results = %5B results, %5D%0A %09%0A%09retrn @@ -621,17 +621,17 @@ :%0A%09print - +( getGende @@ -680,9 +680,10 @@ AName%22%5D) +) %0A
1933f6d3f97846a860a1b12ab25003cb807f3b4e
add rankings and alliances to csv backup
controllers/backup_controller.py
controllers/backup_controller.py
import cloudstorage import csv import os from google.appengine.api import taskqueue from google.appengine.ext import ndb from google.appengine.ext import webapp from google.appengine.ext.webapp import template from models.award import Award from models.event import Event from models.match import Match class TbaCSVBackupEnqueue(webapp.RequestHandler): """ Enqueues CSV backup """ def get(self, year=None): if year is None: event_keys = Event.query().fetch(None, keys_only=True) else: event_keys = Event.query(Event.year == int(year)).fetch(None, keys_only=True) for event_key in event_keys: taskqueue.add( url='/tasks/do/csv_backup_event/{}'.format(event_key.id()), method='GET') template_values = {'event_keys': event_keys} path = os.path.join(os.path.dirname(__file__), '../templates/backup/csv_backup_enqueue.html') self.response.out.write(template.render(path, template_values)) class TbaCSVBackupEventDo(webapp.RequestHandler): """ Backs up event awards, matches, and team list """ AWARDS_FILENAME_PATTERN = '/tbatv-prod-hrd.appspot.com/tba-data-backup/{}/{}/{}_awards.csv' # % (year, event_key, event_key) MATCHES_FILENAME_PATTERN = '/tbatv-prod-hrd.appspot.com/tba-data-backup/{}/{}/{}_matches.csv' # % (year, event_key, event_key) TEAMS_FILENAME_PATTERN = '/tbatv-prod-hrd.appspot.com/tba-data-backup/{}/{}/{}_teams.csv' # % (year, event_key, event_key) def get(self, event_key): event = Event.get_by_id(event_key) event.prepAwardsMatchesTeams() with cloudstorage.open(self.AWARDS_FILENAME_PATTERN.format(event.year, event_key, event_key), 'w') as awards_file: writer = csv.writer(awards_file, delimiter=',') for award in event.awards: for recipient in award.recipient_list: team = recipient['team_number'] if type(team) == int: team = 'frc{}'.format(team) self._writerow_unicode(writer, [award.key.id(), award.name_str, team, recipient['awardee']]) with cloudstorage.open(self.MATCHES_FILENAME_PATTERN.format(event.year, event_key, event_key), 'w') as matches_file: writer = csv.writer(matches_file, delimiter=',') for match in event.matches: red_score = match.alliances['red']['score'] blue_score = match.alliances['blue']['score'] self._writerow_unicode(writer, [match.key.id()] + match.alliances['red']['teams'] + match.alliances['blue']['teams'] + [red_score, blue_score]) with cloudstorage.open(self.TEAMS_FILENAME_PATTERN.format(event.year, event_key, event_key), 'w') as teams_file: writer = csv.writer(teams_file, delimiter=',') self._writerow_unicode(writer, [team.key.id() for team in event.teams]) self.response.out.write("Done backing up {}!".format(event_key)) def _writerow_unicode(self, writer, row): unicode_row = [] for s in row: try: unicode_row.append(s.encode("utf-8")) except: unicode_row.append(s) writer.writerow(unicode_row)
Python
0
@@ -1114,12 +1114,8 @@ hes, - and tea @@ -1120,16 +1120,56 @@ eam list +, rankings, and alliance selection order %0A %22%22%22 @@ -1555,24 +1555,294 @@ , event_key) +%0A RANKINGS_FILENAME_PATTERN = '/tbatv-prod-hrd.appspot.com/tba-data-backup/%7B%7D/%7B%7D/%7B%7D_rankings.csv' # %25 (year, event_key, event_key)%0A ALLIANCES_FILENAME_PATTERN = '/tbatv-prod-hrd.appspot.com/tba-data-backup/%7B%7D/%7B%7D/%7B%7D_alliances.csv' # %25 (year, event_key, event_key) %0A%0A def ge @@ -3252,24 +3252,619 @@ nt.teams%5D)%0A%0A + with cloudstorage.open(self.RANKINGS_FILENAME_PATTERN.format(event.year, event_key, event_key), 'w') as rankings_file:%0A writer = csv.writer(rankings_file, delimiter=',')%0A for row in event.rankings:%0A self._writerow_unicode(writer, row)%0A%0A with cloudstorage.open(self.ALLIANCES_FILENAME_PATTERN.format(event.year, event_key, event_key), 'w') as alliances_file:%0A writer = csv.writer(alliances_file, delimiter=',')%0A for alliance in event.alliance_selections:%0A self._writerow_unicode(writer, alliance%5B'picks'%5D)%0A%0A self
1564cd721b9ca4c2eaa98a7ac999a3bd5531a4a8
Update batch.py
tensorflow/contrib/learn/python/learn/dataframe/transforms/batch.py
tensorflow/contrib/learn/python/learn/dataframe/transforms/batch.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Batches `Series` objects. For internal use, not part of the public API.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.learn.python.learn.dataframe import transform from tensorflow.python.training import input as input_ops class AbstractBatchTransform(transform.TensorFlowTransform): """Abstract parent class for batching Transforms.""" def __init__(self, batch_size, output_names, num_threads=1, queue_capacity=None): super(AbstractBatchTransform, self).__init__() self._batch_size = batch_size self._output_name_list = output_names self._num_threads = num_threads self._queue_capacity = (self.batch_size * 10 if queue_capacity is None else queue_capacity) @transform.parameter def batch_size(self): return self._batch_size @transform.parameter def num_threads(self): return self._num_threads @transform.parameter def queue_capacity(self): return self._queue_capacity @property def input_valency(self): return len(self.output_names) @property def _output_names(self): return self._output_name_list class Batch(AbstractBatchTransform): """Batches Columns to specified size. Note that dimension 0 is assumed to correspond to "example number" so `Batch` does not prepend an additional dimension to incoming `Series`. For example, if a `Tensor` in `transform_input` has shape [x, y], the corresponding output will have shape [batch_size, y]. """ @property def name(self): return "Batch" def _apply_transform(self, transform_input, **kwargs): batched = input_ops.batch(transform_input, batch_size=self.batch_size, num_threads=self.num_threads, capacity=self.queue_capacity, enqueue_many=True) # TODO(jamieas): batch will soon return a list regardless of the number of # enqueued tensors. Remove the following once that change is in place. if not isinstance(batched, (tuple, list)): batched = (batched,) # pylint: disable=not-callable return self.return_type(*batched) class ShuffleBatch(AbstractBatchTransform): """Creates shuffled batches from `Series` containing a single row. Note that dimension 0 is assumed to correspond to "example number" so `ShuffleBatch` does not prepend an additional dimension to incoming `Series`. For example, if a `Tensor` in `transform_input` has shape [x, y], the corresponding output will have shape [batch_size, y]. """ @property def name(self): return "ShuffleBatch" def __init__(self, batch_size, output_names, num_threads=1, queue_capacity=None, min_after_dequeue=None, seed=None): super(ShuffleBatch, self).__init__(batch_size, output_names, num_threads, queue_capacity) self._min_after_dequeue = int(self.queue_capacity / 4 if min_after_dequeue is None else min_after_dequeue) self._seed = seed @transform.parameter def min_after_dequeue(self): return self._min_after_dequeue @transform.parameter def seed(self): return self._seed def _apply_transform(self, transform_input, **kwargs): batched = input_ops.shuffle_batch(transform_input, batch_size=self.batch_size, capacity=self.queue_capacity, min_after_dequeue=self.min_after_dequeue, num_threads=self.num_threads, seed=self.seed, enqueue_many=True) # TODO(jamieas): batch will soon return a list regardless of the number of # enqueued tensors. Remove the following once that change is in place. if not isinstance(batched, (tuple, list)): batched = (batched,) # pylint: disable=not-callable return self.return_type(*batched)
Python
0.000001
@@ -12,9 +12,9 @@ 201 -5 +6 The
fc6c6f9ecbf694198c650cf86151423226304c51
put import statement in try
alphatwirl/delphes/load_delphes.py
alphatwirl/delphes/load_delphes.py
# Tai Sakuma <tai.sakuma@cern.ch> import ROOT _loaded = False ##__________________________________________________________________|| def load_delphes(): global _loaded if _loaded: return # https://root.cern.ch/phpBB3/viewtopic.php?t=21603 ROOT.gInterpreter.Declare('#include "classes/DelphesClasses.h"') # https://cp3.irmp.ucl.ac.be/projects/delphes/ticket/1039 ROOT.gInterpreter.Declare('#include "external/ExRootAnalysis/ExRootTreeReader.h"') ROOT.gSystem.Load("libDelphes.so") _loaded = True ##__________________________________________________________________||
Python
0.000001
@@ -27,16 +27,25 @@ ern.ch%3E%0A +try:%0A import R @@ -47,16 +47,45 @@ ort ROOT +%0Aexcept ImportError:%0A pass %0A%0A_loade
1eb648b14c52c9a2e715774ec71b2c8e6228efc4
add vtkNumpy.numpyToImageData() function
src/python/director/vtkNumpy.py
src/python/director/vtkNumpy.py
from director.shallowCopy import shallowCopy import director.vtkAll as vtk from vtk.util import numpy_support import numpy as np def numpyToPolyData(pts, pointData=None, createVertexCells=True): pd = vtk.vtkPolyData() pd.SetPoints(getVtkPointsFromNumpy(pts.copy())) if pointData is not None: for key, value in pointData.iteritems(): addNumpyToVtk(pd, value.copy(), key) if createVertexCells: f = vtk.vtkVertexGlyphFilter() f.SetInputData(pd) f.Update() pd = shallowCopy(f.GetOutput()) return pd def getNumpyFromVtk(dataObj, arrayName='Points', arrayType='points'): assert arrayType in ('points', 'cells') if arrayName == 'Points': vtkArray = dataObj.GetPoints().GetData() elif arrayType == 'points': vtkArray = dataObj.GetPointData().GetArray(arrayName) else: vtkArray = dataObj.GetCellData().GetArray(arrayName) if not vtkArray: raise KeyError('Array not found') return numpy_support.vtk_to_numpy(vtkArray) def getVtkPointsFromNumpy(numpyArray): points = vtk.vtkPoints() points.SetData(getVtkFromNumpy(numpyArray)) return points def getVtkPolyDataFromNumpyPoints(points): return numpyToPolyData(points) def getVtkFromNumpy(numpyArray): def MakeCallback(numpyArray): def Closure(caller, event): closureArray = numpyArray return Closure vtkArray = numpy_support.numpy_to_vtk(numpyArray) vtkArray.AddObserver('DeleteEvent', MakeCallback(numpyArray)) return vtkArray def addNumpyToVtk(dataObj, numpyArray, arrayName, arrayType='points'): assert arrayType in ('points', 'cells') vtkArray = getVtkFromNumpy(numpyArray) vtkArray.SetName(arrayName) if arrayType == 'points': assert dataObj.GetNumberOfPoints() == numpyArray.shape[0] dataObj.GetPointData().AddArray(vtkArray) else: assert dataObj.GetNumberOfCells() == numpyArray.shape[0] dataObj.GetCellData().AddArray(vtkArray)
Python
0.000004
@@ -568,16 +568,422 @@ rn pd%0A%0A%0A +def numpyToImageData(img, flip=True, vtktype=vtk.VTK_UNSIGNED_CHAR):%0A if flip:%0A img = np.flipud(img)%0A height, width, numChannels = img.shape%0A image = vtk.vtkImageData()%0A image.SetDimensions(width, height, 1)%0A image.AllocateScalars(vtktype, numChannels)%0A scalars = getNumpyFromVtk(image, 'ImageScalars')%0A scalars%5B:%5D = img.reshape(width*height, numChannels)%5B:%5D%0A return image%0A%0A%0A def getN
981e9a2348953374cc18669318d1d7e92197e0e1
Update clinical trials
providers/gov/clinicaltrials/normalizer.py
providers/gov/clinicaltrials/normalizer.py
import pendulum from share.normalize import * class Tag(Parser): name = ctx class ThroughTags(Parser): tag = Delegate(Tag, ctx) class AgentIdentifier(Parser): # email address uri = IRI(ctx) class WorkIdentifier(Parser): uri = IRI(ctx) class AffiliatedAgent(Parser): schema = GuessAgentType(ctx, default='organization') name = ctx class IsAffiliatedWith(Parser): related = Delegate(AffiliatedAgent, ctx) class Institution(Parser): name = OneOf(ctx.agency, ctx.facility.name, ctx) location = RunPython('get_location', Try(ctx.facility.address)) class Extra: agency_class = Try(ctx.agency_class) def get_location(self, ctx): location = "" if 'country' in ctx: location += ctx['country'] + ': ' if 'city' in ctx: location += ctx['city'] + ', ' if 'state' in ctx: location += ctx['state'] + ' ' return location class Person(Parser): given_name = Maybe(ctx, 'first_name') family_name = Maybe(ctx, 'last_name') additional_name = Maybe(ctx, 'middle_name') identifiers = Map(Delegate(AgentIdentifier), Try(ctx.email)) related_agents = Map(Delegate(IsAffiliatedWith), Try(ctx.affiliation)) class Contributor(Parser): agent = Delegate(Person, ctx) class CreativeWork(Parser): title = OneOf( ctx.clinical_study.official_title, ctx.clinical_study.brief_title ) description = Maybe(ctx.clinical_study, 'brief_summary')['textblock'] related_agents = Concat( Map(Delegate(Contributor), Maybe(ctx.clinical_study, 'overall_official')), Map(Delegate(Contributor), Maybe(ctx.clinical_study, 'overall_contact')), Map(Delegate(Contributor), Maybe(ctx.clinical_study, 'overall_contact_backup')), Map(Delegate(Institution), Concat(ctx.clinical_study.sponsors.lead_sponsor, Maybe(ctx.clinical_study.sponsors, 'collaborator'), RunPython('get_locations', Concat(Try(ctx.clinical_study.location))))) ) tags = Map(Delegate(ThroughTags), Maybe(ctx.clinical_study, 'keyword')) identifiers = Concat(Map(Delegate(WorkIdentifier), Concat( ctx['clinical_study']['required_header']['url'], RunPython('format_url', ctx.clinical_study.id_info.nct_id, 'http://www.bioportfolio.com/resources/trial/'), RunPython('format_url', Try(ctx.clinical_study.reference.PMID), 'www.ncbi.nlm.nih.gov/pubmed/')))) class Extra: share_harvest_date = ctx.clinical_study.required_header.download_date org_study_id = ctx.clinical_study.id_info.org_study_id status = ctx.clinical_study.overall_status start_date = RunPython('parse_date', Try(ctx.clinical_study.start_date)) completion_date = RunPython('parse_date', Try(ctx.clinical_study.completion_date['#text'])) completion_date_type = Try(ctx.clinical_study.completion_date['@type']) study_type = ctx.clinical_study.study_type conditions = ctx.clinical_study.condition is_fda_regulated = ctx.clinical_study.is_fda_regulated is_section_801 = Try(ctx.clinical_study.is_section_801) citation = Try(ctx.clinical_study.reference.citation) def get_locations(self, locations): results = [] for location in locations: if 'name' in location['facility']: results.append(location) return results def parse_date(self, date): try: return pendulum.from_format(date, '%M %d, %Y').isoformat() except ValueError: return pendulum.from_format(date, '%B %Y').isoformat() def format_url(self, id, base): return base + id
Python
0
@@ -1306,24 +1306,87 @@ son, ctx)%0A%0A%0A +class Funder(Parser):%0A agent = Delegate(Institution, ctx)%0A%0A%0A class Creati @@ -1887,27 +1887,22 @@ elegate( -Institution +Funder ),%0A
f95ab3d2e9a9fc7c92698aded033f4860225c718
Add rate reporting for oadoi importer
backend/oadoi.py
backend/oadoi.py
# -*- encoding: utf-8 -*- import gzip import json from django.db import DataError from papers.models import Paper from papers.models import OaiSource from papers.baremodels import BareOaiRecord from papers.doi import doi_to_crossref_identifier from papers.doi import doi_to_url from papers.doi import to_doi from backend.doiprefixes import free_doi_prefixes from papers.errors import MetadataSourceException class OadoiAPI(object): """ An interface to import an OAdoi dump into dissemin """ def __init__(self): self.oadoi_source, _ = OaiSource.objects.get_or_create( identifier='oadoi_repo', defaults= {'name':'OAdoi', 'oa':True, 'priority':-10, 'default_pubtype':'preprint'}) self.crossref_source = OaiSource.objects.get(identifier='crossref') def load_dump(self, filename, start_doi=None, update_index=False, create_missing_dois=True): """ Reads a dump from the disk and loads it to the db """ with gzip.open(filename, 'r') as f: start_doi_seen = start_doi is None for idx, line in enumerate(f): record = json.loads(line.decode('utf-8')) if not start_doi_seen and record.get('doi') == start_doi: start_doi_seen = True if idx % 10000 == 0: print(idx, record.get('doi')) if start_doi_seen: self.create_oairecord(record, update_index, create_missing_dois) def create_oairecord(self, record, update_index=True, create_missing_dois=True): """ Given one line of the dump (represented as a dict), add it to the corresponding paper (if it exists) """ doi = to_doi(record['doi']) if not doi: return prefix = doi.split('/')[0] if prefix in free_doi_prefixes: return if not record.get('oa_locations'): return paper = Paper.get_by_doi(doi) if not paper: if not create_missing_dois: return try: paper = Paper.create_by_doi(doi) except (MetadataSourceException, ValueError): return if not paper: print('no such paper for doi {doi}'.format(doi=doi)) return print(doi) paper.cache_oairecords() for oa_location in record.get('oa_locations') or []: url = oa_location['url'] # just to speed things up a bit... if paper.pdf_url == url: return identifier='oadoi:'+url source = self.oadoi_source if oa_location['host_type'] == 'publisher': url = doi_to_url(doi) identifier = doi_to_crossref_identifier(doi) source = self.crossref_source record = BareOaiRecord( paper=paper, doi=doi, pubtype=paper.doctype, source=source, identifier=identifier, splash_url=url, pdf_url=oa_location['url']) try: # We disable checks by DOI since we know the paper has been looked up by DOI already. old_pdf_url = paper.pdf_url paper.add_oairecord(record, check_by_doi=False) super(Paper, paper).update_availability() if old_pdf_url != paper.pdf_url: paper.save() if update_index: paper.update_index() except (DataError, ValueError): print('Record does not fit in the DB')
Python
0
@@ -76,16 +76,46 @@ ataError +%0Afrom datetime import datetime %0A%0Afrom p @@ -1049,32 +1049,97 @@ db%0A %22%22%22%0A + last_rate_report = None%0A report_batch_size = 1000%0A with gzi @@ -1166,16 +1166,16 @@ ) as f:%0A - @@ -1459,13 +1459,25 @@ x %25 -10000 +report_batch_size == @@ -1529,16 +1529,333 @@ 'doi'))%0A + if last_rate_report:%0A td = (datetime.utcnow() - last_rate_report).total_seconds()%0A if td:%0A print('importing speed: %7B%7D lines/sec'.format(report_batch_size/float(td)))%0A last_rate_report = datetime.utcnow()%0A%0A %0A
5f522cf58a1566513e874002bdaeb063e8a02497
Update model and add TODO
server/models/checkup.py
server/models/checkup.py
# -*- coding: utf-8 -*- from datetime import datetime from app import db class Checkup(db.Model): __tablename__ = 'checkup' id = db.Column(db.Integer, primary_key=True) created = db.Column(db.DateTime, default=datetime.utcnow) repo_name = db.Column(db.String, unique=True) # github-user/repo-name criteria = db.relationship('Criterion', backref='criterion', lazy='dynamic')
Python
0
@@ -244,77 +244,144 @@ -repo_name = db.Column(db.String, unique=True) # github-user/repo-name +# TODO: add one unique constraint on the column group of owner and repo%0A owner = db.Column(db.String)%0A repo = db.Column(db.String) %0A
2f61692dd05f2ef529c9d2556c59eb7bc720b1f7
Fixed? reset password
oclubs/access/email.py
oclubs/access/email.py
#!/usr/bin/env python # -*- coding: UTF-8 -*- # """ Module to send emails. This module sends emails with either Postfix or SendGrid. """ from __future__ import absolute_import, unicode_literals import traceback from envelopes import Envelope, SMTP from oclubs.access.delay import delayed_func from_email = ('no-reply@connect.shs.cn', 'Connect') @delayed_func def send(to_email, subject, content): """ Send an email. :param tuple to_email: email recipient address and name :param basestring subject: email subject :param basestring content: email content """ try: conn = SMTP('127.0.0.1', 25) mail = Envelope( to_addr=to_email[0], from_addr=from_email, subject=subject, text_body=content ) conn.send(mail) except Exception: traceback.print_exc()
Python
0.999775
@@ -620,17 +620,22 @@ TP(' -127.0.0.1 +connect.shs.cn ', 2
fb1ddcdd789d1c1be02a9f6d63a21548a8cf584e
Fix undo of PlatformPhysicsOperation after the SceneNode changes
printer/PlatformPhysicsOperation.py
printer/PlatformPhysicsOperation.py
from UM.Operations.Operation import Operation from UM.Operations.AddSceneNodeOperation import AddSceneNodeOperation from UM.Operations.TranslateOperation import TranslateOperation from UM.Operations.GroupedOperation import GroupedOperation ## A specialised operation designed specifically to modify the previous operation. class PlatformPhysicsOperation(Operation): def __init__(self, node, translation): super().__init__() self._node = node self._transform = node.getLocalTransformation() self._position = node.getPosition() + translation self._always_merge = True def undo(self): self._node.setLocalTransformation(self._transform) def redo(self): self._node.setPosition(self._position) def mergeWith(self, other): group = GroupedOperation() group.addOperation(self) group.addOperation(other) return group def __repr__(self): return 'PlatformPhysicsOperation(t = {0})'.format(self._position)
Python
0
@@ -475,43 +475,35 @@ lf._ -transform = node.getLocalTransforma +old_position = node.getPosi tion @@ -515,24 +515,28 @@ self._ +new_ position = n @@ -648,43 +648,35 @@ .set -LocalTransformation(self._transform +Position(self._old_position )%0A%0A @@ -727,24 +727,28 @@ ition(self._ +new_ position)%0A%0A
8b3952176203149adc06c2219e29ad72812208b6
Add doc to get_scans_cache for #267
src/scancode/cache.py
src/scancode/cache.py
# # Copyright (c) 2016 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. # ScanCode is a trademark of nexB Inc. # # You may not use this software except in compliance with the License. # You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. # # When you publish or redistribute any data created with ScanCode or any ScanCode # derivative work, you must accompany this data with the following acknowledgment: # # Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND, either express or implied. No content created from # ScanCode should be considered or used as legal advice. Consult an Attorney # for any legal advice. # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. from __future__ import absolute_import, print_function from collections import OrderedDict import os import sys from commoncode import fileutils from commoncode import timeutils from scancode import scans_cache_dir """ Caching scans on disk: A cache of all the scan results. Each scan results for a file or directory is cached on disk. The approach is to use to cache: - the results of a scan, excluding file infos keyed by the hash of a scanned file - the file infos, keyed by the path of a scanned file Once a scan is completed, we iterate the caches to output the scan results using this procedure: iterate the cached file infos and for each lookup the scan details in the cached scan results. This iteration is driving the final streaming of results to the output format (e.g. JSON). Finally once a scan is completed the cache is destroyed to free up disk space. """ # Tracing flags TRACE = False def logger_debug(*args): pass if TRACE: import logging logger = logging.getLogger(__name__) # logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) logging.basicConfig(stream=sys.stdout) logger.setLevel(logging.DEBUG) def logger_debug(*args): return logger.debug(' '.join(isinstance(a, basestring) and a or repr(a) for a in args)) class ScanCache(object): """ A file-based cache for scan results. This is NOT thread-safe, but is multi-process safe. """ def __init__(self, cache_dir): fileutils.create_dir(cache_dir) # create a unique temp directory in cache_dir self.cache_base_dir = fileutils.get_temp_dir(cache_dir, prefix=timeutils.time2tstamp() + '-') # and subdirs for infos and scans caches self.cache_infos_dir = os.path.join(self.cache_base_dir, 'infos') fileutils.create_dir(self.cache_infos_dir) self.cache_scans_dir = os.path.join(self.cache_base_dir, 'scans') fileutils.create_dir(self.cache_scans_dir) # workaround for https://github.com/grantjenks/python-diskcache/issues/32 from diskcache import Disk class DiskWithNoHighPickleProtocol(Disk): "Subclass of diskcache.Disk that always use the lowest pickle protocol." def __init__(self, directory, size_threshold, pickle_protocol): super(DiskWithNoHighPickleProtocol, self).__init__(directory, size_threshold, pickle_protocol) self._protocol = 0 # and finially cache instances from diskcache import Cache self.infos = Cache(self.cache_infos_dir, disk=DiskWithNoHighPickleProtocol) self.scans = Cache(self.cache_scans_dir, disk=DiskWithNoHighPickleProtocol) def scan_key(self, path, file_infos): """ Return a scan cache key for a path and file_infos. """ sha1 = file_infos['sha1'] # we may eventually store directories, in which case we use the path as a key return sha1 or path def put_infos(self, path, file_infos): """ Put file_infos for path in the cache and return True if the file referenced in file_infos has already been scanned or False otherwise. """ self.infos.set(path, file_infos) has_cached_details = self.scan_key(path, file_infos) in self.scans if TRACE: logger_debug('put_infos:', 'path:', path, 'has_cached_details:', has_cached_details, 'file_infos:', file_infos, '\n') logger_debug('put_infos:', 'cached_infos:', self.infos[path], '\n') return has_cached_details def put_scan(self, path, file_infos, scan_result): """ Put scan_result in the cache. Also put file_infos in the cache if needed. """ is_cached = self.put_infos(path, file_infos) if not is_cached: scan_key = self.scan_key(path, file_infos) self.scans.add(scan_key, scan_result) if TRACE: logger_debug('put_scan:', 'scan_key:', scan_key, 'file_infos:', file_infos, 'scan_result:', scan_result, '\n') logger_debug('put_scan:', 'cached_infos:', self.infos[path], '\n') logger_debug('put_scan:', 'scan_key:', scan_key, 'cached_scan:', self.scans[scan_key], '\n') def iterate(self, with_infos=True): """ Return an iterator of scan data for all cached scans e.g. the whole cache. """ for path in self.infos: file_infos = self.infos[path] scan_result = OrderedDict(path=path) if with_infos: # infos is always collected but only returnedd if asked: # we flatten these as direct attributes of a file object scan_result.update(file_infos.items()) else: # always report errors scan_result['scan_errors'] = file_infos.get('scan_errors', []) scan_key = self.scan_key(path, file_infos) scan_details = self.scans[scan_key].items() for scan_name, scan_data in scan_details: if scan_name == 'scan_errors': scan_result['scan_errors'].extend(scan_data) else: scan_result[scan_name] = scan_data if TRACE: logger_debug('iterate:', 'scan_details:', scan_details, 'for path:', path, 'scan_key:', scan_key, '\n') yield scan_result def clear(self, *args): """ Purge the cache by deleting the corresponding cached data files. """ self.infos.close() self.scans.close() fileutils.delete(self.cache_base_dir) def get_scans_cache(): return ScanCache(cache_dir=scans_cache_dir)
Python
0
@@ -7005,48 +7005,124 @@ che( -):%0A return ScanCache(cache_dir=scans_ +cache_dir=scans_cache_dir):%0A %22%22%22%0A Return a new unique persistent cache instance.%0A %22%22%22%0A return ScanCache( cach
e89c20e1ecfadb7e63a1fe80d821afafb8860352
add missing import
tfx/experimental/templates/taxi/launcher/stub_component_launcher.py
tfx/experimental/templates/taxi/launcher/stub_component_launcher.py
# Lint as: python3 # Copyright 2020 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Stub component launcher for launching stub executors in KFP.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tfx.experimental.pipeline_testing import stub_component_launcher class StubComponentLauncher(stub_component_launcher.StubComponentLauncher): """Responsible for launching stub executors in KFP Template. This stub component launcher cannot be defined in the kubeflow_dag_runner.py because launcher class is imported by the module path. """ pass # GCS directory where KFP outputs are recorded test_data_dir = "gs://{}/testdata".format(configs.GCS_BUCKET_NAME) # TODO(StubExecutor): customize self.stubbed_component_ids to replace components # with BaseStubExecutor stubbed_component_ids = ['CsvExampleGen', 'StatisticsGen', 'SchemaGen', 'ExampleValidator', 'Trainer', 'Transform', 'Evaluator', 'Pusher'] # TODO(StubExecutor): (Optional) Use stubbed_component_map to insert custom stub # executor class as a value and component id as a key. stubbed_component_map = {} StubComponentLauncher.get_stub_launcher_class( test_data_dir, stubbed_component_ids, stubbed_component_map)
Python
0.000042
@@ -855,16 +855,77 @@ launcher +%0Afrom tfx.experimental.templates.taxi.pipeline import configs %0A%0Aclass
a791f0f6823c48a6f5671c0fc4f676f203354a02
Use absolute paths for static and media.
eggtimer/settings.py
eggtimer/settings.py
# Django settings for eggtimer project. import dateutil.parser import os import dj_database_url from email.utils import formataddr HOME_DIR = os.path.expanduser("~") BASE_DIR = os.path.realpath(os.path.join(os.path.dirname(__file__), os.path.pardir)) ADMINS = ( (os.environ.get('ADMIN_NAME', 'admin'), os.environ.get('ADMIN_EMAIL', 'example@example.com')), ) # Export a secret value in production; for local development, the default is good enough SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', 'psu&83=i(4wgd@9*go=nps9=1rw#9b_w6psy4mp6yoxqv1i5g') DEBUG = bool(int(os.environ.get('DJANGO_DEBUG', False))) ALLOWED_HOSTS = ['eggtimer.herokuapp.com', 'localhost', '127.0.0.1'] CORS_ORIGIN_WHITELIST = ( 'eggtimer.herokuapp.com' ) SECURE_SSL_REDIRECT = bool(int(os.environ.get('DJANGO_ENABLE_SSL', True))) SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'custom_user', 'settings_context_processor', 'gunicorn', 'corsheaders', 'allauth', 'allauth.account', 'allauth.socialaccount', 'allauth.socialaccount.providers.facebook', 'rest_framework', 'rest_framework.authtoken', 'floppyforms', 'bootstrapform', 'timezone_field', 'periods', ] MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'corsheaders.middleware.CorsMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', ) ROOT_URLCONF = 'eggtimer.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ os.path.join(BASE_DIR, 'eggtimer', 'templates'), ], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ "django.contrib.auth.context_processors.auth", "django.core.context_processors.debug", "django.core.context_processors.i18n", "django.core.context_processors.media", "django.core.context_processors.request", "django.core.context_processors.static", "django.core.context_processors.tz", "django.contrib.messages.context_processors.messages", "settings_context_processor.context_processors.settings", ], 'debug': DEBUG, }, }, ] # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'eggtimer.wsgi.application' # Parse database configuration from $DATABASE_URL DATABASES = { 'default': dj_database_url.config( default="sqlite:///%s" % os.path.join(HOME_DIR, 'eggtimer', 'eggtimer.sqlite') ) } SITE_ID = 1 # https://docs.djangoproject.com/en/1.8/topics/i18n/ TIME_ZONE = 'UTC' LANGUAGE_CODE = 'en-us' USE_I18N = True USE_L10N = True USE_TZ = True CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', } } STATIC_ROOT = 'staticfiles' STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. os.path.join(BASE_DIR, 'bower_components'), os.path.join(BASE_DIR, 'eggtimer', 'static'), ) MEDIA_ROOT = 'media' MEDIA_URL = '/media/' AUTHENTICATION_BACKENDS = ( # Needed to login by username in Django admin, regardless of `allauth` "django.contrib.auth.backends.ModelBackend", # `allauth` specific authentication methods, such as login by e-mail "allauth.account.auth_backends.AuthenticationBackend" ) # auth and allauth set AUTH_USER_MODEL = 'periods.User' LOGIN_REDIRECT_URL = '/calendar/' ACCOUNT_USER_MODEL_USERNAME_FIELD = None ACCOUNT_EMAIL_REQUIRED = True ACCOUNT_USERNAME_REQUIRED = False ACCOUNT_AUTHENTICATION_METHOD = 'email' ACCOUNT_LOGOUT_ON_GET = True SOCIALACCOUNT_QUERY_EMAIL = True SOCIALACCOUNT_PROVIDERS = { 'facebook': { 'SCOPE': ['email'], 'METHOD': 'oauth2', } } ACCOUNT_ACTIVATION_DAYS = 14 # If Heroku addons start using EMAIL_URL, switch to dj-email-url DEFAULT_FROM_EMAIL = formataddr(ADMINS[0]) REPLY_TO = ( os.environ.get('REPLY_TO_EMAIL', 'example@example.com'), ) EMAIL_HOST = os.environ.get('MAILGUN_SMTP_SERVER') EMAIL_PORT = os.environ.get('MAILGUN_SMTP_PORT') EMAIL_HOST_USER = os.environ.get('MAILGUN_SMTP_LOGIN') EMAIL_HOST_PASSWORD = os.environ.get('MAILGUN_SMTP_PASSWORD') EMAIL_USE_TLS = True if not EMAIL_HOST: EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend' EMAIL_FILE_PATH = os.path.join(HOME_DIR, 'eggtimer', 'emails') # TODO Once Ionic app is done, perhaps remove session authentication? REST_FRAMEWORK = { 'DEFAULT_PERMISSION_CLASSES': ( 'rest_framework.permissions.IsAuthenticated', ), 'DEFAULT_AUTHENTICATION_CLASSES': ( 'rest_framework.authentication.SessionAuthentication', 'rest_framework.authentication.TokenAuthentication', ), 'DEFAULT_FILTER_BACKENDS': ('rest_framework.filters.DjangoFilterBackend',) } # Aeris API is used for moon phases AERIS_URL = 'https://api.aerisapi.com' AERIS_CLIENT_ID = os.environ.get('AERIS_CLIENT_ID') AERIS_CLIENT_SECRET = os.environ.get('AERIS_CLIENT_SECRET') # TODO maybe this could be a django plugin? DEPLOY_DATE = dateutil.parser.parse(os.environ.get('DEPLOY_DATE', '')) VERSION = '0.6' TEMPLATE_VISIBLE_SETTINGS = ['DEPLOY_DATE', 'VERSION', 'ADMINS'] TEST_RUNNER = 'django.test.runner.DiscoverRunner' if DEBUG: INSTALLED_APPS.extend([ 'django_extensions', ])
Python
0
@@ -3509,16 +3509,39 @@ C_ROOT = + os.path.join(BASE_DIR, 'static @@ -3546,16 +3546,17 @@ icfiles' +) %0ASTATIC_ @@ -3939,16 +3939,39 @@ OT = + os.path.join(BASE_DIR, 'media' %0AMED @@ -3966,16 +3966,17 @@ 'media' +) %0AMEDIA_U
7f4a02f7058c4e7dfd4bbb01ba847e6990b5e391
update admin
corehq/apps/userreports/admin.py
corehq/apps/userreports/admin.py
from __future__ import absolute_import, unicode_literals from django.contrib import admin from .models import AsyncIndicator, DataSourceActionLog, InvalidUCRData @admin.register(AsyncIndicator) class AsyncIndicatorAdmin(admin.ModelAdmin): model = AsyncIndicator list_display = [ 'doc_id', 'doc_type', 'domain', 'indicator_config_ids', 'date_created', 'date_queued', 'unsuccessful_attempts' ] list_filter = ('doc_type', 'domain', 'unsuccessful_attempts') search_fields = ('doc_id',) @admin.register(InvalidUCRData) class InvalidUCRDataAdmin(admin.ModelAdmin): model = InvalidUCRData list_display = [ 'doc_id', 'doc_type', 'domain', 'indicator_config_id', 'validation_name', ] list_filter = ('doc_type', 'domain', 'indicator_config_id', 'validation_name') search_fields = ('doc_id',) @admin.register(DataSourceActionLog) class DataSourceActionLogAdmin(admin.ModelAdmin): model = DataSourceActionLog list_display = [ 'date_created', 'domain', 'indicator_config_id', 'initiated_by', 'action_source', 'action', ] list_filter = ('action_source', 'action') search_fields = ('domain', 'indicator_config_id',)
Python
0
@@ -1197,24 +1197,51 @@ 'action',%0A + 'skip_destructive'%0A %5D%0A li @@ -1278,16 +1278,36 @@ 'action' +, 'skip_destructive' )%0A se
00f4106ad6bfcaf4f7bd74adca67b14065b760bc
Rename SETTINGS_PATH to PROJECT_PATH
onlineweb4/settings.py
onlineweb4/settings.py
import os TEST_RUNNER = "django_nose.NoseTestSuiteRunner" DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( # ('Your Name', 'your_email@example.com'), ) MANAGERS = ADMINS SETTINGS_PATH = os.path.realpath(os.path.dirname(__file__)) DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'. 'NAME': 'dev.db', # Or path to database file if using sqlite3. 'USER': '', # Not used with sqlite3. 'PASSWORD': '', # Not used with sqlite3. 'HOST': '', # Set to empty string for localhost. Not used with sqlite3. 'PORT': '', # Set to empty string for default. Not used with sqlite3. } } # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # On Unix systems, a value of None will cause Django to use the same # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'America/Chicago' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/home/media/media.lawrence.com/media/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://media.lawrence.com/media/", "http://example.com/media/" MEDIA_URL = '' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/home/media/media.lawrence.com/static/" STATIC_ROOT = '' # URL prefix for static files. # Example: "http://media.lawrence.com/static/" STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY = 'q#wy0df(7&amp;$ucfrxa1j72%do7ko*-6(g!8f$tc2$3x@3cq5@6c' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', # 'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', # Uncomment the next line for simple clickjacking protection: # 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'onlineweb4.urls' # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'onlineweb4.wsgi.application' TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. os.path.join(PROJECT_PATH, '../templates') ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'django_nose', 'south', 'apps.events', # Uncomment the next line to enable the admin: 'django.contrib.admin', # Uncomment the next line to enable admin documentation: 'django.contrib.admindocs', ) # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } }
Python
0.001811
@@ -173,16 +173,15 @@ NS%0A%0A -SETTINGS +PROJECT _PAT
c10f222bb6de5150087a2ddd26ffbef2f8eeb4a3
break down method
corehq/apps/users/permissions.py
corehq/apps/users/permissions.py
from collections import namedtuple from corehq import privileges, toggles from corehq.apps.accounting.utils import domain_has_privilege FORM_EXPORT_PERMISSION = 'corehq.apps.reports.standard.export.ExcelExportReport' DEID_EXPORT_PERMISSION = 'corehq.apps.reports.standard.export.DeidExportReport' CASE_EXPORT_PERMISSION = 'corehq.apps.reports.standard.export.CaseExportReport' SMS_EXPORT_PERMISSION = 'corehq.apps.reports.standard.export.SMSExportReport' EXPORT_PERMISSIONS = {FORM_EXPORT_PERMISSION, DEID_EXPORT_PERMISSION, CASE_EXPORT_PERMISSION} ReportPermission = namedtuple('ReportPermission', ['slug', 'title', 'is_visible']) def get_extra_permissions(): from corehq.apps.export.views.list import ( FormExportListView, DeIdFormExportListView, CaseExportListView ) from corehq.apps.export.views.download import DownloadNewSmsExportView yield ReportPermission( FORM_EXPORT_PERMISSION, FormExportListView.page_title, lambda domain: True) yield ReportPermission( DEID_EXPORT_PERMISSION, DeIdFormExportListView.page_title, lambda domain: domain_has_privilege(domain, privileges.DEIDENTIFIED_DATA)) yield ReportPermission( CASE_EXPORT_PERMISSION, CaseExportListView.page_title, lambda domain: True) yield ReportPermission( SMS_EXPORT_PERMISSION, DownloadNewSmsExportView.page_title, lambda domain: True) def can_download_data_files(domain, couch_user): from corehq.apps.users.models import DomainMembershipError try: role = couch_user.get_role(domain) except DomainMembershipError: return False return toggles.DATA_FILE_DOWNLOAD.enabled(domain) and role.permissions.view_file_dropzone def can_view_sms_exports(couch_user, domain): return has_permission_to_view_report( couch_user, domain, SMS_EXPORT_PERMISSION ) def has_permission_to_view_report(couch_user, domain, report_to_check): from corehq.apps.users.decorators import get_permission_name from corehq.apps.users.models import Permissions return ( couch_user.can_view_reports(domain) or couch_user.has_permission( domain, get_permission_name(Permissions.view_report), data=report_to_check ) ) def can_manage_releases(couch_user, domain, app_id): from corehq.apps.users.decorators import get_permission_name from corehq.apps.users.models import Permissions restricted_app_release = toggles.RESTRICT_APP_RELEASE.enabled(domain) if not restricted_app_release: return True role = couch_user.get_role(domain) return ( couch_user.has_permission( domain, get_permission_name(Permissions.manage_releases), restrict_global_admin=True ) or app_id in role.permissions.manage_releases_list)
Python
0.028979
@@ -2309,16 +2309,257 @@ pp_id):%0A + if _can_manage_releases_for_all_apps(couch_user, domain):%0A return True%0A role = couch_user.get_role(domain)%0A return app_id in role.permissions.manage_releases_list%0A%0A%0Adef _can_manage_releases_for_all_apps(couch_user, domain):%0A from @@ -2806,62 +2806,13 @@ r -ole = couch_user.get_role(domain)%0A return (%0A +eturn cou @@ -2827,36 +2827,32 @@ has_permission(%0A - domain, @@ -2909,20 +2909,16 @@ - - restrict @@ -2944,70 +2944,6 @@ - ) or%0A app_id in role.permissions.manage_releases_list )%0A
57e610836297ef136b892ea1cdea5fe9109c45fa
Change the way that test objects are named.
integration/testing.py
integration/testing.py
# Copyright (c) 2016 Canonical Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest from pylxd.client import Client from integration.busybox import create_busybox_image class IntegrationTestCase(unittest.TestCase): """A base test case for pylxd integration tests.""" def setUp(self): super(IntegrationTestCase, self).setUp() self.client = Client() self.lxd = self.client.api def create_container(self): """Create a container in lxd.""" name = self.id().split('.')[-1].replace('_', '') machine = { 'name': name, 'architecture': 2, 'profiles': ['default'], 'ephemeral': False, 'config': {'limits.cpu': '2'}, 'source': {'type': 'image', 'alias': 'busybox'}, } result = self.lxd['containers'].post(json=machine) operation_uuid = result.json()['operation'].split('/')[-1] result = self.lxd.operations[operation_uuid].wait.get() self.addCleanup(self.delete_container, name) return name def delete_container(self, name, enforce=False): """Delete a container in lxd.""" #response = self.lxd.containers['name'].get() #if response == 200: # enforce is a hack. There's a race somewhere in the delete. # To ensure we don't get an infinite loop, let's count. count = 0 result = self.lxd['containers'][name].delete() while enforce and result.status_code == 404 and count < 10: result = self.lxd['containers'][name].delete() count += 1 try: operation_uuid = result.json()['operation'].split('/')[-1] result = self.lxd.operations[operation_uuid].wait.get() except KeyError: pass # 404 cases are okay. def create_image(self): """Create an image in lxd.""" path, fingerprint = create_busybox_image() with open(path, 'rb') as f: headers = { 'X-LXD-Public': '1', } response = self.lxd.images.post(data=f.read(), headers=headers) operation_uuid = response.json()['operation'].split('/')[-1] self.lxd.operations[operation_uuid].wait.get() self.addCleanup(self.delete_image, fingerprint) return fingerprint def delete_image(self, fingerprint): """Delete an image in lxd.""" self.lxd.images[fingerprint].delete() def assertCommon(self, response): """Assert common LXD responses. LXD responses are relatively standard. This function makes assertions to all those standards. """ self.assertEqual(response.status_code, response.json()['status_code']) self.assertEqual( ['metadata', 'operation', 'status', 'status_code', 'type'], sorted(response.json().keys()))
Python
0.000002
@@ -602,16 +602,28 @@ icense.%0A +import uuid%0A import u @@ -630,16 +630,16 @@ nittest%0A - %0Afrom py @@ -958,16 +958,185 @@ nt.api%0A%0A + def generate_object_name(self):%0A test = self.id().split('.')%5B-1%5D%0A rando = str(uuid.uuid1()).split('-')%5B-1%5D%0A return '%7B%7D-%7B%7D'.format(test, rando)%0A%0A def @@ -1224,43 +1224,30 @@ elf. -id().split('.')%5B-1%5D.replace('_', '' +_generate_object_name( )%0A @@ -1890,91 +1890,8 @@ %22%22%22%0A - #response = self.lxd.containers%5B'name'%5D.get()%0A #if response == 200:%0A @@ -2885,32 +2885,239 @@ id%5D.wait.get()%0A%0A + alias = self.generate_object_name()%0A response = self.lxd.images.aliases.post(json=%7B%0A 'description': '',%0A 'target': fingerprint,%0A 'name': alias%0A %7D)%0A%0A self.add @@ -3156,16 +3156,16 @@ rprint)%0A - @@ -3182,16 +3182,23 @@ gerprint +, alias %0A%0A de
8ef9164deda887ccbef71787e91501578aae2053
Version bump
cubric_mrs/_version.py
cubric_mrs/_version.py
__version__ = "0.1.1"
Python
0
@@ -12,10 +12,10 @@ = %220.1. -1 +2 %22
3010b87ba1c377ce6b98285b5dc88b60e96e7e8a
Delete error handle
cupy/sorting/search.py
cupy/sorting/search.py
import cupy from cupy import core from cupy.core import fusion def argmax(a, axis=None, dtype=None, out=None, keepdims=False): """Returns the indices of the maximum along an axis. Args: a (cupy.ndarray): Array to take argmax. axis (int): Along which axis to find the maximum. ``a`` is flattened by default. dtype: Data type specifier. out (cupy.ndarray): Output array. keepdims (bool): If ``True``, the axis ``axis`` is preserved as an axis of length one. Returns: cupy.ndarray: The indices of the maximum of ``a`` along an axis. .. seealso:: :func:`numpy.argmax` """ # TODO(okuta): check type return a.argmax(axis=axis, dtype=dtype, out=out, keepdims=keepdims) def nanargmax(a, axis=None): """Return the indices of the maximum values in the specified axis ignoring NaNs. For all-NaN slice ``ValueError`` is raised. Subclass cannot be passed yet, subok=True still unsupported Args: a (cupy.ndarray): Array to take nanargmax. axis (int): Along which axis to find the maximum. ``a`` is flattened by default. Returns: cupy.ndarray: The indices of the maximum of ``a`` along an axis ignoring NaN values. .. seealso:: :func:`numpy.nanargmax` """ if a.dtype.kind in 'biu': return argmin(a, axis=axis) mask = cupy.isnan(a) if cupy.any(cupy.all(mask, axis=axis)): raise ValueError('All-NaN slice encountered') return argmax(cupy.where(mask, cupy.inf, a), axis=axis) def argmin(a, axis=None, dtype=None, out=None, keepdims=False): """Returns the indices of the minimum along an axis. Args: a (cupy.ndarray): Array to take argmin. axis (int): Along which axis to find the minimum. ``a`` is flattened by default. dtype: Data type specifier. out (cupy.ndarray): Output array. keepdims (bool): If ``True``, the axis ``axis`` is preserved as an axis of length one. Returns: cupy.ndarray: The indices of the minimum of ``a`` along an axis. .. seealso:: :func:`numpy.argmin` """ # TODO(okuta): check type return a.argmin(axis=axis, dtype=dtype, out=out, keepdims=keepdims) def nanargmin(a, axis=None): """Return the indices of the minimum values in the specified axis ignoring NaNs. For all-NaN slice ``ValueError`` is raised. Subclass cannot be passed yet, subok=True still unsupported Args: a (cupy.ndarray): Array to take nanargmin. axis (int): Along which axis to find the minimum. ``a`` is flattened by default. Returns: cupy.ndarray: The indices of the minimum of ``a`` along an axis ignoring NaN values. .. seealso:: :func:`numpy.nanargmin` """ if a.dtype.kind in 'biu': return argmin(a, axis=axis) mask = cupy.isnan(a) if cupy.any(cupy.all(mask, axis=axis)): raise ValueError('All-NaN slice encountered') return argmin(cupy.where(mask, cupy.inf, a), axis=axis) # TODO(okuta): Implement argwhere def nonzero(a): """Return the indices of the elements that are non-zero. Returns a tuple of arrays, one for each dimension of a, containing the indices of the non-zero elements in that dimension. Args: a (cupy.ndarray): array Returns: tuple of arrays: Indices of elements that are non-zero. .. seealso:: :func:`numpy.nonzero` """ assert isinstance(a, core.ndarray) return a.nonzero() def flatnonzero(a): """Return indices that are non-zero in the flattened version of a. This is equivalent to a.ravel().nonzero()[0]. Args: a (cupy.ndarray): input array Returns: cupy.ndarray: Output array, containing the indices of the elements of a.ravel() that are non-zero. .. seealso:: :func:`numpy.flatnonzero` """ assert isinstance(a, core.ndarray) return a.ravel().nonzero()[0] _where_ufunc = core.create_ufunc( 'cupy_where', ('???->?', '?bb->b', '?BB->B', '?hh->h', '?HH->H', '?ii->i', '?II->I', '?ll->l', '?LL->L', '?qq->q', '?QQ->Q', '?ee->e', '?ff->f', # On CUDA 6.5 these combinations don't work correctly (on CUDA >=7.0, it # works). # See issue #551. '?hd->d', '?Hd->d', '?dd->d', '?FF->F', '?DD->D'), 'out0 = in0 ? in1 : in2') def where(condition, x=None, y=None): """Return elements, either from x or y, depending on condition. If only condition is given, return ``condition.nonzero()``. Args: condition (cupy.ndarray): When True, take x, otherwise take y. x (cupy.ndarray): Values from which to choose on ``True``. y (cupy.ndarray): Values from which to choose on ``False``. Returns: cupy.ndarray: Each element of output contains elements of ``x`` when ``condition`` is ``True``, otherwise elements of ``y``. If only ``condition`` is given, return the tuple ``condition.nonzero()``, the indices where ``condition`` is True. .. seealso:: :func:`numpy.where` """ missing = (x is None, y is None).count(True) if missing == 1: raise ValueError('Must provide both \'x\' and \'y\' or neither.') if missing == 2: return nonzero(condition) if fusion._is_fusing(): return fusion._call_ufunc(_where_ufunc, condition, x, y) return _where_ufunc(condition.astype('?'), x, y) # TODO(okuta): Implement searchsorted # TODO(okuta): Implement extract
Python
0.000001
@@ -1385,133 +1385,8 @@ s)%0A%0A - mask = cupy.isnan(a)%0A%0A if cupy.any(cupy.all(mask, axis=axis)):%0A raise ValueError('All-NaN slice encountered')%0A%0A @@ -1402,36 +1402,45 @@ gmax(cupy.where( -mask +cupy.isnan(a) , cupy.inf, a), @@ -2779,133 +2779,8 @@ s)%0A%0A - mask = cupy.isnan(a)%0A%0A if cupy.any(cupy.all(mask, axis=axis)):%0A raise ValueError('All-NaN slice encountered')%0A%0A @@ -2808,12 +2808,21 @@ ere( -mask +cupy.isnan(a) , cu
67e3a95d7c3227da0b8a06dc29f0e9e868e55153
Check file size before calculating md5sum.
danbooru/downloader.py
danbooru/downloader.py
# -*- coding: utf-8 -*- # Copyright 2012 codestation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import hashlib import logging from time import sleep from os.path import isfile, join from urllib.request import urlopen from urllib.error import URLError, HTTPError class Downloader(object): _total = 1 _stop = False def __init__(self, path): self.path = path def stop(self): logging.debug("Stopping download job") self._stop = True def _calculateMD5(self, name): try: file = open(name, 'rb') md5_hash = hashlib.md5() while True: d = file.read(128) if not d: break md5_hash.update(d) file.close() return md5_hash.hexdigest() except IOError: pass def downloadQueue(self, dl_list, nohash=False, callback=None): for dl in dl_list: if self._stop: break base = dl.image.md5 + dl.image.file_ext subdir = dl.image.md5[0] filename = join(self.path, subdir, base) if nohash and isfile(filename): #logging.debug("(%i) %s already exists, skipping" % (self._total, filename)) #self._total += 1 continue md5 = self._calculateMD5(filename) if md5: if md5 == dl.image.md5: #logging.debug("%s already exists, skipping" % filename) continue else: logging.warning("%s md5sum doesn't match, re-downloading" % filename) try: local_file = open(filename, 'wb') except IOError: logging.error('Error while creating %s' % filename) continue retries = 0 start = 0 while not self._stop and retries < 3: try: remote_file = urlopen(dl.file_url) meta = remote_file.info() if "Content-Length" in meta: remote_size = int(meta['Content-Length']) else: remote_size = -1 if start: remote_file.seek(start) while not self._stop: buf = remote_file.read(16 * 1024) if not buf: break local_file.write(buf) start += len(buf) if callback: callback(base, start, remote_size) remote_file.close() local_file.close() if callback: sys.stdout.write("\r") sys.stdout.flush() if self._stop: logging.debug('(%i) %s [ABORTED]' % (self._total, base)) break logging.debug('(%i) %s [OK]' % (self._total, base)) self._total += 1 sleep(1) break except URLError as e: logging.error('>>> Error %s' % e.reason) except HTTPError as e: logging.error('>>> Error %i: %s' % (e.code, e.msg)) start = local_file.tell() retries += 1 logging.warning('Retrying (%i) in 2 seconds...' % retries) sleep(2)
Python
0
@@ -710,16 +710,25 @@ le, join +, getsize %0Afrom ur @@ -1692,19 +1692,8 @@ if -nohash and isfi @@ -1726,144 +1726,154 @@ -#logging.debug(%22(%25i) %25s already exists, skipping%22 %25 (self._total, filename))%0A #self._total += 1%0A continue%0A +if getsize(filename) == dl.image.file_size:%0A if nohash:%0A continue%0A else:%0A @@ -1931,16 +1931,28 @@ + + if md5:%0A @@ -1943,24 +1943,36 @@ if md5:%0A + @@ -2019,16 +2019,28 @@ + + #logging @@ -2104,33 +2104,57 @@ -continue%0A + continue%0A @@ -2175,32 +2175,44 @@ + logging.warning( @@ -2252,29 +2252,237 @@ loading%22 - %25 filename)%0A +, filename)%0A else:%0A logging.warning(%22%25s filesize doesn't match, re-downloading%22, filename)%0A else:%0A logging.warning(%22%25s doesn't exists, re-downloading%22, filename) %0A @@ -2624,18 +2624,17 @@ ting %25s' - %25 +, filenam @@ -3799,20 +3799,18 @@ BORTED%5D' +, -%25 ( self._to @@ -3811,33 +3811,32 @@ lf._total, base) -) %0A @@ -3899,20 +3899,18 @@ %25s %5BOK%5D' +, -%25 ( self._to @@ -3919,17 +3919,16 @@ l, base) -) %0A @@ -4098,18 +4098,17 @@ rror %25s' - %25 +, e.reaso @@ -4205,12 +4205,10 @@ %25s' +, -%25 ( e.co @@ -4217,17 +4217,16 @@ , e.msg) -) %0A%0A @@ -4354,18 +4354,17 @@ onds...' - %25 +, retries
5450303c975e34265f6fda3c014b9aed7d002a3c
Fix download path, the existing one has been removed from nvidia's site (#10253)
var/spack/repos/builtin/packages/cudnn/package.py
var/spack/repos/builtin/packages/cudnn/package.py
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Cudnn(Package): """NVIDIA cuDNN is a GPU-accelerated library of primitives for deep neural networks""" homepage = "https://developer.nvidia.com/cudnn" version('7.3', '72666d3532850752612706601258a0b2', url='https://developer.nvidia.com/compute/machine-learning/cudnn/secure/v7.3.0/prod/9.0_2018920/cudnn-9.0-linux-x64-v7.3.0.29.tgz') version('6.0', 'a08ca487f88774e39eb6b0ef6507451d', url='http://developer.download.nvidia.com/compute/redist/cudnn/v6.0/cudnn-8.0-linux-x64-v6.0.tgz') version('5.1', '406f4ac7f7ee8aa9e41304c143461a69', url='http://developer.download.nvidia.com/compute/redist/cudnn/v5.1/cudnn-8.0-linux-x64-v5.1.tgz') depends_on('cuda@8:') def install(self, spec, prefix): install_tree('.', prefix)
Python
0
@@ -456,25 +456,24 @@ url='http -s ://developer @@ -465,32 +465,41 @@ ttp://developer. +download. nvidia.com/compu @@ -505,60 +505,26 @@ ute/ -machine-learning/cudnn/secure/v7.3.0/prod/9.0_201892 +redist/cudnn/v7.3. 0/cu
bb042f7bd76e364c3be6791c580b9426a4007627
fix url and add shared variant (#5358)
var/spack/repos/builtin/packages/latte/package.py
var/spack/repos/builtin/packages/latte/package.py
############################################################################## # Copyright (c) 2017, Los Alamos National Security, LLC # Produced at the Los Alamos National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/llnl/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class Latte(CMakePackage): """Open source density functional tight binding molecular dynamics.""" homepage = "https://gitlab.com/exaalt/latte" url = "https://gitlab.com/exaalt/latte/tags/v1.0" version('develop', git='https://gitlab.com/exaalt/latte', branch='cmake') depends_on("cmake@3.1:", type='build') depends_on('blas') depends_on('lapack') root_cmakelists_dir = 'cmake' def cmake_args(self): options = ['-DBUILD_SHARED_LIBS=ON'] return options
Python
0
@@ -1354,38 +1354,36 @@ %22https://git -la +hu b.com/ -exaalt +lanl /latte%22%0A @@ -1401,38 +1401,36 @@ %22https://git -la +hu b.com/ -exaalt +lanl /latte/tags/ @@ -1426,18 +1426,21 @@ latte/ta -gs +rball /v1.0%22%0A%0A @@ -1482,22 +1482,20 @@ /git -la +hu b.com/ -exaalt +lanl /lat @@ -1511,214 +1511,820 @@ ch=' -cmake')%0A%0A depends_on(%22cmake@3.1:%22, type='build')%0A depends_on('blas')%0A depends_on('lapack')%0A%0A root_cmakelists_dir = 'cmake'%0A%0A def cmake_args(self):%0A options = %5B'-DBUILD_SHARED_LIBS=ON'%5D +master')%0A%0A variant('mpi', default=True,%0A description='Build with mpi')%0A variant('progress', default=False,%0A description='Use progress for fast')%0A variant('shared', default=True, description='Build shared libs')%0A%0A depends_on(%22cmake@3.1:%22, type='build')%0A depends_on('blas')%0A depends_on('lapack')%0A depends_on('mpi', when='+mpi')%0A depends_on('qmd-progress', when='+progress')%0A%0A root_cmakelists_dir = 'cmake'%0A%0A def cmake_args(self):%0A options = %5B%5D%0A if '+shared' in self.spec:%0A options.append('-DBUILD_SHARED_LIBS=ON')%0A else:%0A options.append('-DBUILD_SHARED_LIBS=OFF')%0A if '+mpi' in self.spec:%0A options.append('-DO_MPI=yes')%0A if '+progress' in self.spec:%0A options.append('-DPROGRESS=yes') %0A%0A
08b5b565666d42a6802e136fc8e7cf8d355929b0
add v2019.1 and v2020.1 (#17648)
var/spack/repos/builtin/packages/qhull/package.py
var/spack/repos/builtin/packages/qhull/package.py
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Qhull(CMakePackage): """Qhull computes the convex hull, Delaunay triangulation, Voronoi diagram, halfspace intersection about a point, furt hest-site Delaunay triangulation, and furthest-site Voronoi diagram. The source code runs in 2-d, 3-d, 4-d, and higher dimensions. Qhull implements the Quickhull algorithm for computing the convex hull. It handles roundoff errors from floating point arithmetic. It computes volumes, surface areas, and approximations to the convex hull.""" homepage = "http://www.qhull.org" version('2015.2', sha256='78b010925c3b577adc3d58278787d7df08f7c8fb02c3490e375eab91bb58a436', url="http://www.qhull.org/download/qhull-2015-src-7.2.0.tgz") version('2012.1', sha256='a35ecaa610550b7f05c3ce373d89c30cf74b059a69880f03080c556daebcff88', url="http://www.qhull.org/download/qhull-2012.1-src.tgz") patch('qhull-unused-intel-17.02.patch', when='@2015.2') depends_on('cmake@2.6:', type='build')
Python
0
@@ -792,16 +792,358 @@ l.org%22%0A%0A + version('2020.1', sha256='1ac92a5538f61e297c72aebe4d4ffd731ceb3e6045d6d15faf1c212713798df4',%0A url=%22http://www.qhull.org/download/qhull-2020-src-8.0.0.tgz%22)%0A version('2019.1', sha256='2b7990558c363076261564f61b74db4d0d73b71869755108a469038c07dc43fb',%0A url=%22http://www.qhull.org/download/qhull-2019-src-7.3.2.tgz%22)%0A vers @@ -1231,16 +1231,16 @@ 8a436',%0A - @@ -1301,25 +1301,24 @@ 7.2.0.tgz%22)%0A -%0A version( @@ -1560,11 +1560,11 @@ ake@ -2.6 +3.0 :', @@ -1577,8 +1577,260 @@ build')%0A +%0A def flag_handler(self, name, flags):%0A # See https://github.com/qhull/qhull/issues/65%0A if name == 'cxxflags' and self.version == Version('2020.1'):%0A flags.append(self.compiler.cxx11_flag)%0A return (flags, None, None)%0A
13e5c3fbe23e3f57503064bf2c3d7c0a3713101a
build vtk without mpi support (#9649)
var/spack/repos/builtin/packages/visit/package.py
var/spack/repos/builtin/packages/visit/package.py
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Visit(CMakePackage): """VisIt is an Open Source, interactive, scalable, visualization, animation and analysis tool.""" homepage = "https://wci.llnl.gov/simulation/computer-codes/visit/" url = "http://portal.nersc.gov/project/visit/releases/2.10.1/visit2.10.1.tar.gz" version('2.13.0', '716644b8e78a00ff82691619d4d1e7a914965b6535884890b667b97ba08d6a0f') version('2.12.3', '2dd351a291ee3e79926bc00391ca89b202cfa4751331b0fdee1b960c7922161f') version('2.12.2', '355779b1dbf440cdd548526eecd77b60') version('2.10.3', 'a1082a6f6dab3e2dcb58993603456c2b') version('2.10.2', '253de0837a9d69fb689befc98ea4d068') version('2.10.1', '3cbca162fdb0249f17c4456605c4211e') variant('gui', default=True, description='Enable VisIt\'s GUI') variant('hdf5', default=True, description='Enable HDF5 file format') variant('silo', default=True, description='Enable Silo file format') variant('python', default=True, description='Enable Python support') variant('mpi', default=True, description='Enable parallel engine') depends_on('cmake@3.0:', type='build') depends_on('vtk@6.1.0~opengl2') depends_on('qt@4.8.6', when='+gui') depends_on('qwt', when='+gui') depends_on('python', when='+python') depends_on('silo+shared', when='+silo') depends_on('hdf5', when='+hdf5') depends_on('mpi', when='+mpi') conflicts('+hdf5', when='~gui') conflicts('+silo', when='~gui') root_cmakelists_dir = 'src' def cmake_args(self): spec = self.spec args = [ '-DVTK_MAJOR_VERSION={0}'.format(spec['vtk'].version[0]), '-DVTK_MINOR_VERSION={0}'.format(spec['vtk'].version[1]), '-DVISIT_VTK_DIR:PATH={0}'.format(spec['vtk'].prefix), '-DVISIT_USE_GLEW=OFF', '-DCMAKE_CXX_FLAGS=-fPIC', '-DCMAKE_C_FLAGS=-fPIC' ] if(spec.variants['python'].value): args.append('-DPYTHON_DIR:PATH={0}'.format(spec['python'].home)) if(spec.variants['gui'].value): qt_bin = spec['qt'].prefix.bin args.append( '-DVISIT_LOC_QMAKE_EXE:FILEPATH={0}/qmake-qt4'.format(qt_bin)) args.append('-DVISIT_QWT_DIR:PATH={0}'.format(spec['qwt'].prefix)) else: args.append('-DVISIT_SERVER_COMPONENTS_ONLY=ON') args.append('-DVISIT_ENGINE_ONLY=ON') if(spec.variants['hdf5'].value): args.append( '-DVISIT_HDF5_DIR:PATH={0}'.format(spec['hdf5'].prefix)) if spec.satisfies('^hdf5+mpi', strict=True): args.append('-DVISIT_HDF5_MPI_DIR:PATH={0}'.format( spec['hdf5'].prefix)) if(spec.variants['silo'].value): args.append( '-DVISIT_SILO_DIR:PATH={0}'.format(spec['silo'].prefix)) if(spec.variants['mpi'].value): args.append('-DVISIT_PARALLEL=ON') args.append('-DVISIT_C_COMPILER={0}'.format(spec['mpi'].mpicc)) args.append('-DVISIT_CXX_COMPILER={0}'.format(spec['mpi'].mpicxx)) args.append('-DVISIT_MPI_COMPILER={0}'.format(spec['mpi'].mpicxx)) return args
Python
0
@@ -1364,16 +1364,20 @@ ~opengl2 +~mpi ')%0A d
1f6b1d2aca3995a4ac295f7e6a8ab6bf84d6e79b
add logging for ShotDetectorPlotService
shot_detector/services/shot_detector_service.py
shot_detector/services/shot_detector_service.py
# -*- coding: utf8 -*- from __future__ import absolute_import, division, print_function import time from shot_detector.detectors import SimpleDetector from .base_detector_service import BaseDetectorService from .plot_service import PlotService from shot_detector.utils.common import yes_no class ShotDetectorPlotService(PlotService, BaseDetectorService): """ Simple Shot Detector Service. """ def add_arguments(self, parser, **kwargs): parser = super(ShotDetectorPlotService, self) \ .add_arguments(parser, **kwargs) parser = self.add_video_arguments(parser, **kwargs) parser = self.add_plot_arguments(parser, **kwargs) return parser def add_video_arguments(self, parser, **kwargs): parser.add_argument( '--ff', '--first-frame', metavar='sec', dest='first_frame', type=int, default=0, ) parser.add_argument( '--lf', '--last-frame', metavar='sec', dest='last_frame', type=int, default=60, ) parser.add_argument( '--as', '--as-stream', default='no', dest='as_stream', type=yes_no, ) return parser def run(self, *kwargs): options = self.options detector = SimpleDetector() t1 = time.time() detector.detect( input_uri=options.input_uri, format=options.format, service_options=vars(options) ) t2 = time.time() print(t2 - t1)
Python
0
@@ -90,20 +90,23 @@ %0Aimport -time +logging %0A%0Afrom s @@ -291,16 +291,80 @@ yes_no%0A%0A +%0Afrom shot_detector.utils.log_meta import log_method_call_with%0A%0A class Sh @@ -815,17 +815,16 @@ wargs):%0A -%0A @@ -987,33 +987,32 @@ lt=0,%0A )%0A -%0A parser.a @@ -1166,33 +1166,32 @@ t=60,%0A )%0A -%0A parser.a @@ -1321,33 +1321,32 @@ s_no,%0A )%0A -%0A return p @@ -1344,32 +1344,136 @@ return parser%0A%0A + @log_method_call_with(%0A level=logging.WARN,%0A logger=logging.getLogger(__name__)%0A )%0A def run(self @@ -1515,17 +1515,16 @@ options%0A -%0A @@ -1555,35 +1555,8 @@ r()%0A -%0A t1 = time.time()%0A%0A @@ -1694,16 +1694,16 @@ ptions)%0A + @@ -1707,54 +1707,4 @@ ) -%0A%0A t2 = time.time()%0A print(t2 - t1)%0A
6f05fa90a2134c24c753a50a43e91522531c72b6
update update
wsgi/usgs_update_02.py
wsgi/usgs_update_02.py
# Parse USGS JSON files # Populates the sites using the original URL requests # USGS site doesn't seem to let you just dump everything # For this purpose we use the hydrological are # This value goes from 01 to 21 and makes it easy to construct a series of operations # This version creates a customized dump because MongoDB apparently doesn't like # a literal JSON format. # Gordon Haff import json import string import sys import io import urllib2 import pymongo import os output = {} #setup the connection to the gauges database conn = pymongo.Connection(os.environ['OPENSHIFT_MONGODB_DB_URL']) db = conn.gauges # for working purposes, only pulling in New England for i in range(0,1): req = urllib2.Request("http://waterservices.usgs.gov/nwis/iv/?format=json,1.1&huc=01&parameterCd=00060,00065&siteType=ST") opener = urllib2.build_opener() f = opener.open(req) entry = json.loads(f.read()) count = int (len(entry['value']['timeSeries']) - 1) while count >= 0: #We construct an array of the relevant values associated with a guage number #Note that gage height and discharge are in separate entries #Right here we're just filling out the "permanent" values #Gauge Number. This will be the dictionary index agaugenum = entry['value']['timeSeries'][count]['sourceInfo']['siteCode'][0]['value'] #Site Name #Going to assume that all the "permanent" attributes of a guage number are the #same across entries. We'll use the first instance in any case # asitename = entry['value']['timeSeries'][count]['sourceInfo']['siteName'] #Lat # alat = entry['value']['timeSeries'][count]['sourceInfo']['geoLocation']['geogLocation']['latitude'] #Long # along = entry['value']['timeSeries'][count]['sourceInfo']['geoLocation']['geogLocation']['longitude'] # save the variable code variablecode = str(entry['value']['timeSeries'][count]['variable']['variableCode'][0]['variableID']) # save the variable value variablevalue = str(entry['value']['timeSeries'][count]['values'][0]['value'][0]['value']) # save the time stamp creationtime = str(entry['value']['timeSeries'][count]['values'][0]['value'][0]['dateTime']) #Gage ht. ft. variableID 45807202 if variablecode == '45807202': db.gaugepoints.update({"_id":agaugenum},{"$set":{"height":variablevalue}}) #Discharge cfs variableID 45807197 if variablecode == '45807197': db.gaugepoints.update({"_id":agaugenum},{"$set":{"flow":variablevalue}}) #save creation time so that we can throw out any stale data db.gaugepoints.update({"_id":agaugenum},{"$set":{"timestamp":creationtime}}) count = count - 1
Python
0.000001
@@ -1,8 +1,31 @@ +#!/usr/bin/env python%0A%0A # Parse
8b325f04f84259f287a1da78ef63f67eb14d525d
Add `most_likely` test
bayesian/tests.py
bayesian/tests.py
import sys sys.path.append('../') import unittest from bayesian import Bayes class TestBayes(unittest.TestCase): def test_empty_constructor(self): with self.assertRaises(ValueError): b = Bayes() def test_list_constructor(self): self.assertEqual(Bayes([]), []) self.assertEqual(Bayes(()), []) self.assertEqual(Bayes(range(5)), [0, 1, 2, 3, 4]) self.assertEqual(Bayes({'a': 10, 'b': 50}), [10, 50]) self.assertEqual(Bayes([10, 10, 20]), [10, 10, 20]) self.assertEqual(Bayes([('a', 10), ('b', 50)]), [10, 50]) with self.assertRaises(ValueError): b = Bayes([('a', 10), ('b', 50), ('a', 15)]) def test_get_odds(self): b = Bayes({'a': 10, 'b': 50}) self.assertEqual(b['a'], 10) self.assertEqual(b['b'], 50) self.assertEqual(b[0], 10) self.assertEqual(b[1], 50) with self.assertRaises(IndexError): b[2] with self.assertRaises(ValueError): b['c'] def test_set_odds(self): b = Bayes((10, 20, 30)) b[0] = 50 b[1] = 40 b[2] = 30 self.assertEqual(b, [50, 40, 30]) def test_opposite(self): b = Bayes([0.2, 0.8]) opposite = b.opposite() self.assertEqual(opposite[0] / opposite[1], b[1] / b[0]) b = Bayes([0.2, 0.4, 0.4]) opposite = b.opposite() self.assertEqual(opposite[0] / opposite[1], b[1] / b[0]) self.assertEqual(opposite[1] / opposite[2], b[2] / b[1]) self.assertEqual(opposite[0] / opposite[2], b[2] / b[0]) def test_normalized(self): self.assertEqual(Bayes([]).normalized(), []) self.assertEqual(Bayes([2]).normalized(), [1]) self.assertEqual(Bayes([9, 1]).normalized(), [0.9, 0.1]) self.assertEqual(Bayes([2, 4, 4]).normalized(), [0.2, 0.4, 0.4]) self.assertEqual(Bayes([2, 0]).normalized(), [1.0, 0]) with self.assertRaises(ZeroDivisionError): self.assertEqual(Bayes([0, 0]).normalized(), [1.0, 0]) def test_operators(self): b = Bayes([5, 2, 3]) b *= (2, 2, 1) b /= (2, 2, 1) self.assertEqual(b, [5, 2, 3]) self.assertEqual(Bayes([.5, .5]) * (.9, .1), [0.45, 0.05]) self.assertEqual(Bayes([.5, .5]) / (.9, .1), [5 / 9, 5]) self.assertEqual(Bayes([.5, .5]) * {'0': 0.9, '1': 0.1}, [0.45, 0.05]) self.assertEqual(Bayes([.5, .5]) * [('0', 0.9), ('1', 0.1)], [0.45, 0.05]) def test_equality(self): b1 = Bayes([0.5, 0.2, 0.3]) b2 = Bayes([5, 2, 3]) b3 = Bayes([5, 2, 5]) self.assertEqual(b1, b2) self.assertNotEqual(b1, b3) self.assertNotEqual(b2, b3) def test_update(self): b = Bayes([1, 2]) b.update((2, 1)) self.assertEqual(b, [1, 1]) b.update((2, 1)) self.assertEqual(b, [2, 1]) b.update((2, 0)) self.assertEqual(b, [1, 0]) def test_update_from_events(self): b = Bayes([1, 1]) b.update_from_events(['a', 'a', 'a'], {'a': (0.5, 2)}) self.assertEqual(b, [0.5 ** 3, 2 ** 3]) def test_update_from_tests(self): b = Bayes([1, 1]) b.update_from_tests([True], [0.9, 0.1]) self.assertEqual(b, [0.45, 0.05]) b = Bayes([1, 1]) b.update_from_tests([True, True, True, False], [0.5, 2]) self.assertEqual(b, [0.5 ** 2, 2 ** 2]) if __name__ == '__main__': unittest.main()
Python
0.000855
@@ -3442,16 +3442,283 @@ ** 2%5D)%0A%0A + def test_most_likely(self):%0A b = Bayes(%7B'a': 10, 'b': 1%7D)%0A self.assertEqual(b.most_likely(), 'a')%0A self.assertEqual(b.most_likely(0), 'a')%0A self.assertEqual(b.most_likely(0.9), 'a')%0A self.assertEqual(b.most_likely(0.91), None)%0A%0A if __nam
a6a920ee19a988a27920a4fe035a0d57b8cc36b7
Improve error message
wsgi_status/monitor.py
wsgi_status/monitor.py
# -*- coding: utf-8 -*- import fcntl import json import os import psutil import stat import signal import sys import time import threading class Monitor: def __init__(self, app, filename): self.app = app self.pid = os.getpid() self.filename = filename self.thread = False self.worker = { "pid": self.pid, "requests": 0, "status": "idle", # "vss": 0, # "rss": 0, "last_spawn": int(time.time()), # "tx": 0, # "avg_rt": 0, "uri": "", "method": "", } if self.is_threadmodel(): self.thread = True with open(filename, mode="w") as fp: fp.write("{}{}".format( "WSGI status does not support worker thread model. ", "Work only worker pre-fork.")) return self.pre_sigint_handler = signal.getsignal(signal.SIGINT) self.pre_sigterm_handler = signal.getsignal(signal.SIGTERM) self.pre_sigabrt_handler = signal.getsignal(signal.SIGABRT) # Create status file for own process permissions ppid_ctime = psutil.Process(os.getppid()).create_time() file_ctime = 0.0 if os.path.exists(self.filename): file_ctime = os.stat(self.filename).st_ctime if ppid_ctime > file_ctime: os.remove(self.filename) if ppid_ctime > file_ctime: with open(filename, mode="w") as f: obj = { "workers": [], } json.dump(obj, f) os.chown(filename, os.getuid(), os.getgid()) statinfo = os.stat(filename) mode = statinfo.st_mode + stat.S_IWGRP os.chmod(filename, mode=mode) # Handler for receiving termination signal signal.signal(signal.SIGINT, self.handler) signal.signal(signal.SIGTERM, self.handler) signal.signal(signal.SIGABRT, self.handler) self.update_status() def __call__(self, environ, start_response): if self.thread is True: resp = self.app(environ, start_response) return resp self.pre_request(environ) def post_request(status_code, headers, exc_info=None): self.worker["status"] = "idle" self.worker["uri"] = "" self.worker["method"] = "" self.update_status() return start_response(status_code, headers, exc_info) return self.app(environ, post_request) def pre_request(self, environ): self.worker["requests"] += 1 self.worker["status"] = "busy" self.worker["uri"] = environ["PATH_INFO"] self.worker["method"] = environ["REQUEST_METHOD"] self.update_status() def handler(self, signum, stack): self.worker["status"] = str(signum) self.worker["uri"] = "" self.worker["method"] = "" proc = psutil.Process() files = proc.open_files() for f in files: if f.path == self.filename: fcntl.flock(f.fd, fcntl.LOCK_UN) self.update_status() if signum == signal.SIGINT: self.pre_sigint_handler(signum, stack) elif signum == signal.SIGTERM: self.pre_sigterm_handler(signum, stack) elif signum == signal.SIGABRT: self.pre_sigabrt_handler(signum, stack) sys.exit(1) def is_threadmodel(self): if threading.active_count() > 1: return True return False def update_status(self): with open(self.filename, mode="r+") as f: fcntl.flock(f.fileno(), fcntl.LOCK_EX) try: obj = {} try: obj = json.load(f) except ValueError: # Failed to json parse obj = { "workers": [], } workers = [(i, v) for i, v in enumerate(obj["workers"]) if v["pid"] == self.pid] if len(workers) == 1: index = workers[0][0] obj["workers"][index] = self.worker else: obj["workers"].append(self.worker) sys.stderr.write("not find self.pid: %d in workers key", self.pid) f.seek(0) f.truncate(0) json.dump(obj, f) f.flush() finally: fcntl.flock(f.fileno(), fcntl.LOCK_UN)
Python
0.000015
@@ -2076,34 +2076,43 @@ f.update_status( +init=True )%0A - %0A def __call_ @@ -2494,32 +2494,42 @@ f.update_status( +init=False )%0A re @@ -2876,16 +2876,26 @@ _status( +init=False )%0A%0A d @@ -3249,16 +3249,26 @@ _status( +init=False )%0A @@ -3718,24 +3718,30 @@ _status(self +, init ):%0A w @@ -4407,24 +4407,61 @@ elf.worker)%0A + if not init:%0A @@ -4509,10 +4509,10 @@ id: -%25d +%7B%7D in @@ -4523,18 +4523,24 @@ ers key%22 -, +.format( self.pid @@ -4540,16 +4540,17 @@ elf.pid) +) %0A
251e11ef777ece9542b21af1ed43fa580c2186b3
Bump to 2.1.2
opencanada/__init__.py
opencanada/__init__.py
from django.utils.version import get_version VERSION = (2, 1, 1, 'final', 0) __version__ = get_version(VERSION)
Python
0.000219
@@ -56,17 +56,17 @@ (2, 1, -1 +2 , 'final
212d6bbc559c0a7fab74bff647a49817384e10ff
substitute {format} with json in oembed_url
embeddit/__init__.py
embeddit/__init__.py
import os import re import json import requests import fnmatch from urllib import urlencode from BeautifulSoup import BeautifulSoup _ROOT = os.path.abspath(os.path.dirname(__file__)) invalid_url = {'error': 'Invalid URL'} unreachable = {'error': 'Failed to reach the URL'} empty_meta = {'error': 'Found no meta info for that url'} class Embeddit(dict): url = None fetched = False def __init__(self, url=None, *args, **kwargs): if url: self.url = url self.fetch() def fetch(self, force=False): if self.fetched and not force: return self response = self.fetch_oembed_meta() if 'error' in response: # No oembed info found. # Fall back to open graph response = self.fetch_og_meta() self.clear() self.update(response) self.fetched = True return response def to_json(self): if not self.fetched: self.fetch() return json.dumps(self) def fetch_oembed_meta(self): try: f = open(get_data('providers.json'), 'r') providers = json.loads(f.read()) oembed_url = None for provider in providers: for endpoint in provider.get('endpoints', []): for schema in endpoint.get('schemes', []): if not schema.startswith('http://*') or not schema.startswith('https://*'): schema = schema.replace('http://', 'http://*') schema = schema.replace('https://', 'https://*') if fnmatch.fnmatch(self.url, schema): oembed_url = endpoint.get('url') break if not oembed_url: provider_urls = [ provider.get('provider_url'), provider.get('provider_url').replace('http://', 'https://') ] for provider_url in provider_urls: if fnmatch.fnmatch(self.url, provider_url + "*"): oembed_url = provider.get('endpoints')[0].get('url') break if not oembed_url: return invalid_url params = urlencode({'url': self.url}) try: results = requests.get('%s?%s' % (oembed_url, params)) content = json.loads(results.content) content[u'source_type'] = 'oembed' except ValueError: params = urlencode({'url': self.url, 'format': 'json'}) results = requests.get('%s?%s' % (oembed_url, params)) content = json.loads(results.content) content[u'source_type'] = 'oembed' return content except IndexError: return empty_meta except requests.exceptions.InvalidSchema: return invalid_url except requests.exceptions.HTTPError: return unreachable def fetch_og_meta(self): try: results = requests.get(self.url) soup = BeautifulSoup(results.content) meta = soup.findAll('meta') content = {} for tag in meta: if tag.has_key('property'): if re.search('og:', tag['property']) is not None: key = re.sub('og:', '', tag['property']) content[key] = tag['content'] if content == {}: return empty_meta else: content[u'source_type'] = 'open_graph' return content except requests.exceptions.InvalidSchema: return invalid_url except requests.exceptions.HTTPError: return unreachable def get_data(path): return os.path.join(_ROOT, 'data', path)
Python
0.000079
@@ -2434,32 +2434,60 @@ s' %25 (oembed_url +.replace('%7Bformat%7D', 'json') , params))%0A
baa024a9e09607f8295cfe526a9eb25906aca806
modify the filename
PyStudy/loadfile_speed.py
PyStudy/loadfile_speed.py
#!/usr/bin/env python import datetime count = 0 begin_time = datetime.datetime.now() def readInChunks(fileObj, chunkSize=2048): """ Lazy function to read a file piece by piece. Default chunk size: 2kB. """ while True: data = fileObj.read(chunkSize) if not data: break yield data f = open('fastapi-requests.log.1') for chuck in readInChunks(f): count = count + 1 end_time = datetime.datetime.now() total_time = end_time - begin_time print "chunk=%s, count=%i"%(total_time, count) f.close() count = 0 begin_time = datetime.datetime.now() f = open('fastapi-requests.log.1') for line in f: count = count + 1 end_time = datetime.datetime.now() total_time = end_time - begin_time print "read=%s, count=%i"%(total_time, count) f.close()
Python
0.999999
@@ -342,38 +342,23 @@ = open(' -fastapi-requests.log.1 +bigfile ')%0Afor c @@ -595,30 +595,15 @@ en(' -fastapi-requests.log.1 +bigfile ')%0Af
7c293a2d7551e7d83e0a429f836d7aa475157f92
Add get_by_scope class method to OrgTag model.
app/soc/modules/gsoc/models/organization.py
app/soc/modules/gsoc/models/organization.py
#!/usr/bin/python2.5 # # Copyright 2009 the Melange authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This module contains the GSoC specific Organization Model. """ __authors__ = [ '"Daniel Hans" <daniel.m.hans@gmail.com>', '"Lennard de Rijk" <ljvderijk@gmail.com>', ] from google.appengine.ext import db from django.utils.translation import ugettext from taggable.taggable import Tag from taggable.taggable import Taggable from taggable.taggable import tag_property import soc.models.organization class OrgTag(Tag): """Model for storing all Organization tags. """ predefined = db.BooleanProperty(required=True, default=False) @classmethod def get_or_create(cls, scope, tag_name, predefined=False): """Get the Tag object that has the tag value given by tag_value. """ tag_key_name = cls._key_name(scope.key().name(), tag_name) existing_tag = cls.get_by_key_name(tag_key_name) if existing_tag is None: # the tag does not yet exist, so create it. def create_tag_txn(): new_tag = cls(key_name=tag_key_name, tag=tag_name, scope=scope, predefined=predefined) new_tag.put() return new_tag existing_tag = db.run_in_transaction(create_tag_txn) else: # the tag exists, but if predefined argument is True, let us make sure # that its value in the store is updated if predefined and not existing_tag.predefined: existing_tag.predefined = True existing_tag.put() return existing_tag @classmethod def get_predefined_for_scope(cls, scope): """Get a list of predefined tag objects that has a given scope. """ return db.Query(cls).filter('scope = ', scope).filter( 'predefined = ', True).fetch(1000) class GSoCOrganization(Taggable, soc.models.organization.Organization): """GSoC Organization model extends the basic Organization model. """ slots = db.IntegerProperty(required=False, default=0, verbose_name=ugettext('Slots allocated')) slots.help_text = ugettext( 'The amount of slots allocated to this organization.') slots_desired = db.IntegerProperty(required=False, default=0, verbose_name=ugettext('Slots desired')) slots_desired.help_text = ugettext( 'The amount of slots desired by this organization.') slots_calculated = db.IntegerProperty(required=False, default=0, verbose_name=ugettext('Slots calculated')) slots_calculated.help_text = ugettext( 'The amount of slots calculated for this organization.') nr_applications = db.IntegerProperty(required=False, default=0, verbose_name=ugettext('Amount of applications received')) nr_applications.help_text = ugettext( 'The amount of applications received by this organization.') nr_mentors = db.IntegerProperty(required=False, default=0, verbose_name=ugettext('Amount of mentors assigned')) nr_mentors.help_text = ugettext( 'The amount of mentors assigned to a proposal by this organization.') org_tag = tag_property('org_tag') def __init__(self, parent=None, key_name=None, app=None, **entity_values): """Constructor for GSoCOrganization Model. Args: See Google App Engine APIs. """ db.Model.__init__(self, parent, key_name, app, **entity_values) Taggable.__init__(self, org_tag=OrgTag)
Python
0
@@ -2013,16 +2013,196 @@ ng_tag%0A%0A + @classmethod%0A def get_by_scope(cls, scope):%0A %22%22%22Get the list of tag objects that has the given scope.%0A %22%22%22%0A%0A return db.Query(cls).filter('scope =', scope).fetch(1000)%0A%0A @class
211f88cc377b0d9432258d0ebc3fdc2ebd54302f
EDIT requirements updated. imports updated
nsaba/geneinfo.py
nsaba/geneinfo.py
""" geneinfo.py: methods for querying, saving and loading gene information for NIH database. Author: Torben Noto """ import pandas as pd import os import random import urllib2 from BeautifulSoup import BeautifulSoup from time import sleep from collections import namedtuple def gene_info(eid): """ Pulls gene data based on Entrez ID from the NIH and returns summary. Parameters ---------- eid : int Entrez ID of interest Returns ------- (gene_name, gene_description) : (string, string) gene_description contains a string of appropriately 30-80 characters describing function, relevance and attribution of the gene specified by eid. """ if isinstance(eid, str): try: page_name = "http://www.ncbi.nlm.nih.gov/gene/?term=" + eid page = urllib2.urlopen(page_name) sleep(1+random.random()) soup = BeautifulSoup(page) contents = [] for ana in soup.findAll('dd'): if ana.parent.name == 'dl': contents.append(ana.contents) gene_name = contents[1][0] gene_description = contents[9] if not len(gene_description[0]) > 1: gene_description = 'No description found' return gene_name, gene_description except IndexError: print "%s isn't registered with the NIH" % eid return 'No Gene identification found', 'No description found' else: raise TypeError("gene no must be a string") def load_gene_file(path='.'): """ Loads file containing gene descriptions of genes specified by their Entrez IDs from: http://www.ncbi.nlm.nih.gov/gene/ . Parameters ---------- path : str, optional Specifies path to gene_info.csv. Returns ------- pandas.DataFrame Returns a DataFrame where each row contains three fields: 'Entrez', 'Gene Name' and 'Gene Description'. Where 'Entrez' specifies the gene's Entrez ID and the last two fields are of the same form as gene_info()'s returns. NOTE: This assumes that correct CSV has been loaded. """ if isinstance(path, str): gene_file = os.path.join(path, 'gene_info.csv') df = pd.read_csv(gene_file) return df else: raise TypeError("Gene-file path must be a string") def get_gene_info(path, gene_ids): """ Extracts gene information from DataFrame created by load_gene_file() for specific genes based on list of Entrez IDs. Parameters --------- path : str Specifies path to gene_info.csv. gene_ids : list [ int ] List of Entrez IDs of gene descriptions to be fetched Returns ------- output : list [ gi_tuple (long, str, u-str) ] Returns a list of gene information for specified Entrez IDs in form: ('Entrez', 'Gene Name' 'Gene Description'). """ gi_tuple = namedtuple("gi_tuple", "entrez name description") df = load_gene_file(path) output = [] for gene_id in gene_ids: if gene_id in df['Entrez']: gi = df[df['Entrez'] == gene_id].as_matrix()[0] output.append(gi_tuple(gi[0], gi[1], gi[2])) else: print 'Gene %s not found in NIH database' % gene_id return output
Python
0.00325
@@ -176,29 +176,19 @@ b2%0Afrom -BeautifulSoup +bs4 import
96e26b74851c0b54493f3c269ceefb6b2ae53e7d
implement fromXml toXml and defaultInit method of Resolution class
settingMod/Resolution.py
settingMod/Resolution.py
#!/usr/bin/python3.4 # -*-coding:Utf-8 -* '''module to manage resolution settings''' import xml.etree.ElementTree as xmlMod from settingMod.Size import * import os class Resolution: '''class to manage resolution settings''' def __init__(self, xml= None): '''initialize resolution settings with default value or values extracted from an xml object''' if xml is None: self.defaultInit() else: self.fromXml(xml) def defaultInit(self): '''initialize resolution settings with default value''' def fromXml(self, xml): '''initialize resolution settings with values extracted from an xml object''' def toXml(self): '''export resolution settings into xml syntaxed string''' def see(self, log): '''menu to explore and edit resolution settings settings''' def print(self): '''a method to print preset'''
Python
0
@@ -511,25 +511,76 @@ value'''%0A%09%09 +self.pourcent = 100%0A%09%09self.size = Size('1920x1080') %0A - %09%0A%09%0A%09%0A%09%0A%09%0A%09d @@ -680,24 +680,94 @@ object'''%0A%09%09 +self.pourcent = int(xml.get('pourcent'))%0A%09%09self.size = Size(xml = xml) %0A%09%0A%09%0A%09%0A%09%0A%09%0A%09 @@ -839,26 +839,109 @@ d string'''%0A - %09%09 +return '%3Cresolution pourcent=%22'+str(self.pourcent)+'%22 '+self.size.toXmlAttr()+' /%3E' %0A%09%0A%09%0A%09%0A%09%0A%09%0A%09
44dcbfe606377331a40777a7b387768c816b0e61
Increment to .2.11 for new package
nymms/__init__.py
nymms/__init__.py
__version__ = '0.2.10'
Python
0.000017
@@ -13,11 +13,11 @@ = '0.2.1 -0 +1 '%0A
e3a93aff39ed4a876bdfabd5e62271bce9fe11e9
remove unused analyzers import clause
src/cmdlr/amgr.py
src/cmdlr/amgr.py
"""Cmdlr analyzers holder and importer.""" import importlib import pkgutil import os import sys import functools import re from . import analyzers as _analyzers # NOQA from .exception import NoMatchAnalyzer from .exception import ExtraAnalyzersDirNotExists from .exception import AnalyzerRuntimeError class AnalyzerManager: """Import, active, dispatch and hold all analyzer.""" analyzers_pkgpath = 'cmdlr.analyzers' def __init__(self, config): """Import all analyzers.""" self.__analyzers = {} self.__analyzer_picker = None self.config = config self.__import_all_analyzer() self.__build_analyzer_picker() def __import_all_analyzer(self): extra_analyzer_dir = self.config.extra_analyzer_dir disabled_analyzers = self.config.disabled_analyzers analyzer_dirs = [os.path.join(os.path.dirname(__file__), 'analyzers')] if extra_analyzer_dir and not os.path.isdir(extra_analyzer_dir): raise ExtraAnalyzersDirNotExists( 'extra_analyzer_dir already be set but not exists, path: "{}"' .format(extra_analyzer_dir)) elif extra_analyzer_dir: analyzer_dirs[:0] = [extra_analyzer_dir] for finder, module_name, ispkg in pkgutil.iter_modules(analyzer_dirs): if module_name not in disabled_analyzers: full_module_name = (type(self).analyzers_pkgpath + '.' + module_name) spec = finder.find_spec(full_module_name) module = importlib.util.module_from_spec(spec) sys.modules[full_module_name] = module spec.loader.exec_module(module) aname = module_name self.__analyzers[aname] = module.Analyzer( customization=self.config.get_customization(aname), ) self.__analyzers[aname].aname = aname def __build_analyzer_picker(self): retype = type(re.compile('')) mappers = [] for aname, analyzer in self.__analyzers.items(): for pattern in analyzer.entry_patterns: if isinstance(pattern, retype): mappers.append((pattern, analyzer)) elif isinstance(pattern, str): mappers.append((re.compile(pattern), analyzer)) else: raise AnalyzerRuntimeError( 'some entry pattern in analyzer "{}"' ' neither str nor re.compile type' .format(aname) ) def analyzer_picker(curl): for pattern, analyzer in mappers: if pattern.search(curl): return analyzer raise NoMatchAnalyzer( 'No Matched Analyzer: {}'.format(curl), ) self.__analyzer_picker = analyzer_picker @functools.lru_cache(maxsize=None, typed=True) def get_match_analyzer(self, curl): """Get a url matched analyzer.""" return self.__analyzer_picker(curl) @functools.lru_cache(maxsize=None, typed=True) def get_normalized_entry(self, curl): """Return the normalized entry url.""" return self.get_match_analyzer(curl).entry_normalizer(curl) def get_analyzer_infos(self): """Return all analyzer info.""" def get_desc(analyzer): return analyzer.__doc__ unsorted_infos = [ (aname, get_desc(analyzer)) for aname, analyzer in self.__analyzers.items() ] return sorted(unsorted_infos, key=lambda item: item[0])
Python
0.000001
@@ -122,54 +122,8 @@ re%0A%0A -from . import analyzers as _analyzers # NOQA%0A from
e7c39d4d2287309f82ca2c644880505cd896b921
Change normalize_encodings() to avoid using .translate() or depending on the string type. It will always return a Unicode string. The algoritm's specification is unchanged.
Lib/encodings/__init__.py
Lib/encodings/__init__.py
""" Standard "encodings" Package Standard Python encoding modules are stored in this package directory. Codec modules must have names corresponding to normalized encoding names as defined in the normalize_encoding() function below, e.g. 'utf-8' must be implemented by the module 'utf_8.py'. Each codec module must export the following interface: * getregentry() -> codecs.CodecInfo object The getregentry() API must a CodecInfo object with encoder, decoder, incrementalencoder, incrementaldecoder, streamwriter and streamreader atttributes which adhere to the Python Codec Interface Standard. In addition, a module may optionally also define the following APIs which are then used by the package's codec search function: * getaliases() -> sequence of encoding name strings to use as aliases Alias names returned by getaliases() must be normalized encoding names as defined by normalize_encoding(). Written by Marc-Andre Lemburg (mal@lemburg.com). (c) Copyright CNRI, All Rights Reserved. NO WARRANTY. """#" import codecs from . import aliases _cache = {} _unknown = '--unknown--' _import_tail = ['*'] _norm_encoding_map = (' . ' '0123456789 ABCDEFGHIJKLMNOPQRSTUVWXYZ ' ' abcdefghijklmnopqrstuvwxyz ' ' ' ' ' ' ') _aliases = aliases.aliases class CodecRegistryError(LookupError, SystemError): pass def normalize_encoding(encoding): """ Normalize an encoding name. Normalization works as follows: all non-alphanumeric characters except the dot used for Python package names are collapsed and replaced with a single underscore, e.g. ' -;#' becomes '_'. Leading and trailing underscores are removed. Note that encoding names should be ASCII only; if they do use non-ASCII characters, these must be Latin-1 compatible. """ # Make sure we have an 8-bit string, because .translate() works # differently for Unicode strings. if isinstance(encoding, str): # Note that .encode('latin-1') does *not* use the codec # registry, so this call doesn't recurse. (See unicodeobject.c # PyUnicode_AsEncodedString() for details) encoding = encoding.encode('latin-1') return '_'.join(encoding.translate(_norm_encoding_map).split()) def search_function(encoding): # Cache lookup entry = _cache.get(encoding, _unknown) if entry is not _unknown: return entry # Import the module: # # First try to find an alias for the normalized encoding # name and lookup the module using the aliased name, then try to # lookup the module using the standard import scheme, i.e. first # try in the encodings package, then at top-level. # norm_encoding = normalize_encoding(encoding) aliased_encoding = _aliases.get(norm_encoding) or \ _aliases.get(norm_encoding.replace('.', '_')) if aliased_encoding is not None: modnames = [aliased_encoding, norm_encoding] else: modnames = [norm_encoding] for modname in modnames: if not modname or '.' in modname: continue try: # Import is absolute to prevent the possibly malicious import of a # module with side-effects that is not in the 'encodings' package. mod = __import__('encodings.' + modname, fromlist=_import_tail, level=0) except ImportError: pass else: break else: mod = None try: getregentry = mod.getregentry except AttributeError: # Not a codec module mod = None if mod is None: # Cache misses _cache[encoding] = None return None # Now ask the module for the registry entry entry = getregentry() if not isinstance(entry, codecs.CodecInfo): if not 4 <= len(entry) <= 7: raise CodecRegistryError,\ 'module "%s" (%s) failed to register' % \ (mod.__name__, mod.__file__) if not hasattr(entry[0], '__call__') or \ not hasattr(entry[1], '__call__') or \ (entry[2] is not None and not hasattr(entry[2], '__call__')) or \ (entry[3] is not None and not hasattr(entry[3], '__call__')) or \ (len(entry) > 4 and entry[4] is not None and not hasattr(entry[4], '__call__')) or \ (len(entry) > 5 and entry[5] is not None and not hasattr(entry[5], '__call__')): raise CodecRegistryError,\ 'incompatible codecs in module "%s" (%s)' % \ (mod.__name__, mod.__file__) if len(entry)<7 or entry[6] is None: entry += (None,)*(6-len(entry)) + (mod.__name__.split(".", 1)[1],) entry = codecs.CodecInfo(*entry) # Cache the codec registry entry _cache[encoding] = entry # Register its aliases (without overwriting previously registered # aliases) try: codecaliases = mod.getaliases() except AttributeError: pass else: for alias in codecaliases: if alias not in _aliases: _aliases[alias] = modname # Return the registry entry return entry # Register the search_function in the Python codec registry codecs.register(search_function)
Python
0.000002
@@ -1170,415 +1170,8 @@ *'%5D%0A -_norm_encoding_map = (' . '%0A '0123456789 ABCDEFGHIJKLMNOPQRSTUVWXYZ '%0A ' abcdefghijklmnopqrstuvwxyz '%0A ' '%0A ' '%0A ' ')%0A _ali @@ -1193,16 +1193,16 @@ aliases%0A + %0Aclass C @@ -1746,330 +1746,215 @@ -# Make sure we have an 8-bit string, because .translate() works%0A # differently for Unicode strings. +chars = %5B%5D%0A punct = False%0A for c in encoding: %0A -if isinstance(encoding, str):%0A # Note that .encode('latin-1') does *not* use the codec%0A # registry, so this call doesn't recurse. (See unicodeobject.c%0A # PyUnicode_AsEncodedString() for details) + if c.isalnum() or c == '.':%0A if punct and chars:%0A chars.append('_')%0A chars.append(c)%0A punct = False %0A @@ -1963,44 +1963,37 @@ e -ncoding = encoding.encode('latin-1') +lse:%0A punct = True %0A @@ -2005,62 +2005,20 @@ rn ' -_ '.join( -encoding.translate(_norm_encoding_map).split() +chars )%0A%0Ad
7095380ff71947f76ff60765e699da8e31fde944
Build - remove dir directory - not used
project_generator/commands/build.py
project_generator/commands/build.py
# Copyright 2015 0xc0170 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import logging from ..tools_supported import ToolsSupported from ..generate import Generator from ..settings import ProjectSettings help = 'Build a project' def run(args): # Export if we know how, otherwise return if os.path.exists(args.file): generator = Generator(args.file) for project in generator.generate(args.project): export_result = project.export(args.tool, args.copy) build_result = project.build(args.tool) if build_result == 0 and export_result == 0: return 0 else: return -1 else: # not project known by pgen logging.warning("%s not found." % args.file) return -1 def setup(subparser): subparser.add_argument( "-f", "--file", help="YAML projects file", default='projects.yaml') subparser.add_argument("-p", "--project", help="Name of the project to build", default = '') subparser.add_argument( "-t", "--tool", help="Build a project files for provided tool") subparser.add_argument( "-dir", "--directory", help="The projects directory") subparser.add_argument( "-c", "--copy", action="store_true", help="Copy all files to the exported directory")
Python
0
@@ -1629,98 +1629,8 @@ nt(%0A - %22-dir%22, %22--directory%22, help=%22The projects directory%22)%0A subparser.add_argument(%0A
f06a7f0eb8fe79b3fc44a8adece15df5a85ccf27
make some sqlalchemy config options explicit (and not dependent on external specification) because they fundamentally change behaviors
src/server/_config.py
src/server/_config.py
import os from dotenv import load_dotenv from flask import Flask import json load_dotenv() VERSION = "0.3.20" MAX_RESULTS = int(10e6) MAX_COMPATIBILITY_RESULTS = int(3650) SQLALCHEMY_DATABASE_URI = os.environ.get("SQLALCHEMY_DATABASE_URI", "sqlite:///test.db") SQLALCHEMY_ENGINE_OPTIONS = json.loads(os.environ.get("SQLALCHEMY_ENGINE_OPTIONS", "{}")) SECRET = os.environ.get("FLASK_SECRET", "secret") URL_PREFIX = os.environ.get("FLASK_PREFIX", "/") AUTH = { "twitter": os.environ.get("SECRET_TWITTER"), "ght": os.environ.get("SECRET_GHT"), "fluview": os.environ.get("SECRET_FLUVIEW"), "cdc": os.environ.get("SECRET_CDC"), "sensors": os.environ.get("SECRET_SENSORS"), "quidel": os.environ.get("SECRET_QUIDEL"), "norostat": os.environ.get("SECRET_NOROSTAT"), "afhsb": os.environ.get("SECRET_AFHSB"), } # begin sensor query authentication configuration # A multimap of sensor names to the "granular" auth tokens that can be used to access them; excludes the "global" sensor auth key that works for all sensors: GRANULAR_SENSOR_AUTH_TOKENS = { "twtr": os.environ.get("SECRET_SENSOR_TWTR", "").split(","), "gft": os.environ.get("SECRET_SENSOR_GFT", "").split(","), "ght": os.environ.get("SECRET_SENSOR_GHT", "").split(","), "ghtj": os.environ.get("SECRET_SENSOR_GHTJ", "").split(","), "cdc": os.environ.get("SECRET_SENSOR_CDC", "").split(","), "quid": os.environ.get("SECRET_SENSOR_QUID", "").split(","), "wiki": os.environ.get("SECRET_SENSOR_WIKI", "").split(","), } # A set of sensors that do not require an auth key to access: OPEN_SENSORS = [ "sar3", "epic", "arch", ] REGION_TO_STATE = { "hhs1": ["VT", "CT", "ME", "MA", "NH", "RI"], "hhs2": ["NJ", "NY"], "hhs3": ["DE", "DC", "MD", "PA", "VA", "WV"], "hhs4": ["AL", "FL", "GA", "KY", "MS", "NC", "TN", "SC"], "hhs5": ["IL", "IN", "MI", "MN", "OH", "WI"], "hhs6": ["AR", "LA", "NM", "OK", "TX"], "hhs7": ["IA", "KS", "MO", "NE"], "hhs8": ["CO", "MT", "ND", "SD", "UT", "WY"], "hhs9": ["AZ", "CA", "HI", "NV"], "hhs10": ["AK", "ID", "OR", "WA"], "cen1": ["CT", "ME", "MA", "NH", "RI", "VT"], "cen2": ["NJ", "NY", "PA"], "cen3": ["IL", "IN", "MI", "OH", "WI"], "cen4": ["IA", "KS", "MN", "MO", "NE", "ND", "SD"], "cen5": ["DE", "DC", "FL", "GA", "MD", "NC", "SC", "VA", "WV"], "cen6": ["AL", "KY", "MS", "TN"], "cen7": ["AR", "LA", "OK", "TX"], "cen8": ["AZ", "CO", "ID", "MT", "NV", "NM", "UT", "WY"], "cen9": ["AK", "CA", "HI", "OR", "WA"], } NATION_REGION = "nat"
Python
0
@@ -257,16 +257,28 @@ est.db%22) +%0A%0A# defaults %0ASQLALCH @@ -297,16 +297,334 @@ PTIONS = + %7B%0A %22pool_pre_ping%22: True, # enable ping test for validity of recycled pool connections on connect() calls%0A %22pool_recycle%22: 5 # seconds after which a recycled pool connection is considered invalid%0A%7D%0A# update with overrides of defaults or additions from external configs%0ASQLALCHEMY_ENGINE_OPTIONS.update(%0A json.lo @@ -677,16 +677,18 @@ , %22%7B%7D%22)) +)%0A %0ASECRET
ff0ac35dcec8d5a4d0b9b5ba58d56cc865146ff9
add information about the exported RequestConnection method
src/server/connmgr.py
src/server/connmgr.py
# telepathy-python - Base classes defining the interfaces of the Telepathy framework # # Copyright (C) 2005, 2006 Collabora Limited # Copyright (C) 2005, 2006 Nokia Corporation # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA import dbus import dbus.service from telepathy.errors import NotImplemented from telepathy.interfaces import (CONN_MGR_INTERFACE) from telepathy.server.properties import DBusProperties from telepathy._generated.Connection_Manager \ import ConnectionManager as _ConnectionManager class ConnectionManager(_ConnectionManager, DBusProperties): def __init__(self, name): """ Initialise the connection manager. """ bus_name = 'org.freedesktop.Telepathy.ConnectionManager.%s' % name object_path = '/org/freedesktop/Telepathy/ConnectionManager/%s' % name _ConnectionManager.__init__(self, dbus.service.BusName(bus_name, dbus.Bus(), do_not_queue=True), object_path) self._interfaces = set() self._connections = set() self._protos = {} # proto name => Connection constructor self._protocols = {} # proto name => Protocol object DBusProperties.__init__(self) self._implement_property_get(CONN_MGR_INTERFACE, { 'Interfaces': lambda: dbus.Array(self._interfaces, signature='s'), 'Protocols': lambda: dbus.Dictionary(self._protocol_properties, signature='sa{sv}') }) def connected(self, conn): """ Add a connection to the list of connections, emit the appropriate signal. """ self._connections.add(conn) self.NewConnection(conn._name.get_name(), conn._object_path, conn._proto) def disconnected(self, conn): """ Remove a connection from the list of connections. """ self._connections.remove(conn) if hasattr(conn, 'remove_from_connection'): # requires dbus-python >= 0.81.1 conn.remove_from_connection() del conn return False # when called in an idle callback def check_proto(self, proto): if proto not in self._protos: raise NotImplemented('unknown protocol %s' % proto) def check_protocol(self, proto): if proto not in self._protocols: raise NotImplemented('no protocol object for %s' % proto) @dbus.service.method(CONN_MGR_INTERFACE, in_signature='s', out_signature='a(susv)') def GetParameters(self, proto): "Returns the mandatory and optional parameters for the given proto." self.check_proto(proto) self.check_protocol(proto) return self._protocols[proto].parameters @dbus.service.method(CONN_MGR_INTERFACE, in_signature='', out_signature='as') def ListProtocols(self): return self._protos.keys() def RequestConnection(self, proto, parameters): self.check_proto(proto) conn = self._protos[proto](self, parameters) self.connected(conn) return (conn._name.get_name(), conn._object_path) def _implement_protocol(self, name, protocol_class): protocol = protocol_class(self) self._protocols[name] = protocol self._protos[name] = protocol.create_connection @property def _protocol_properties(self): properties = {} for name, protocol in self._protocols.items(): properties[name] = protocol.get_immutable_properties() return properties
Python
0
@@ -3621,16 +3621,181 @@ keys()%0A%0A + @dbus.service.method('org.freedesktop.Telepathy.ConnectionManager',%0A in_signature='sa%7Bsv%7D',%0A out_signature='so')%0A def
9ec02a7cc31766d2b0d46547addddc0ca350e8ed
make pylint even more happy
neuralmonkey/runners/perplexity_runner.py
neuralmonkey/runners/perplexity_runner.py
""" This module contains an implementation of a runner that is supposed to be used in case we train a language model. Instead of decoding sentences in computes its perplexities given the decoder. """ #tests: lint from neuralmonkey.learning_utils import feed_dicts class PerplexityRunner(object): def __init__(self, decoder, batch_size): self.decoder = decoder self.batch_size = batch_size self.vocabulary = decoder.vocabulary def __call__(self, sess, dataset, coders): if not dataset.has_series(self.decoder.data_id): raise Exception("Dataset must have the target values ({})" "for computing perplexity." .format(self.decoder.data_id)) batched_dataset = dataset.batch_dataset(self.batch_size) losses = [self.decoder.train_loss, self.decoder.runtime_loss] perplexities = [] train_loss = 0.0 runtime_loss = 0.0 batch_count = 0 for batch in batched_dataset: batch_count += 1 batch_feed_dict = feed_dicts(batch, coders, train=False) cross_entropies, opt_loss, dec_loss = sess.run( [self.decoder.cross_entropies] + losses, feed_dict=batch_feed_dict) perplexities.extend([2 ** xent for xent in cross_entropies]) train_loss += opt_loss runtime_loss += dec_loss avg_train_loss = train_loss / batch_count avg_runtime_loss = runtime_loss / batch_count return perplexities, avg_train_loss, avg_runtime_loss
Python
0.000001
@@ -259,16 +259,56 @@ _dicts%0A%0A +#pylint: disable=too-few-public-methods%0A class Pe @@ -783,162 +783,8 @@ id)) -%0A%0A batched_dataset = dataset.batch_dataset(self.batch_size)%0A losses = %5Bself.decoder.train_loss,%0A self.decoder.runtime_loss%5D %0A @@ -806,17 +806,16 @@ es = %5B%5D%0A -%0A @@ -858,17 +858,16 @@ s = 0.0%0A -%0A @@ -882,16 +882,17 @@ unt = 0%0A +%0A @@ -908,23 +908,29 @@ in +dataset. batch -ed _dataset :%0A @@ -925,16 +925,33 @@ _dataset +(self.batch_size) :%0A @@ -1151,18 +1151,95 @@ pies -%5D + losses +,%0A self.decoder.train_loss,%0A self.decoder.runtime_loss%5D ,%0A
1fc9561148402c4eb558d183f4d8f3ecce0a0330
Set version to 0.4.1
alignak_backend/__init__.py
alignak_backend/__init__.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Alignak REST backend """ # Application manifest VERSION = (0, 4, 0) __application__ = u"Alignak_Backend" __version__ = '.'.join((str(each) for each in VERSION[:4])) __author__ = u"Alignak team" __copyright__ = u"(c) 2015 - %s" % __author__ __license__ = u"GNU Affero General Public License, version 3" __description__ = u"Alignak REST backend" __releasenotes__ = u"""Alignak REST Backend""" __doc_url__ = "https://github.com/Alignak-monitoring-contrib/alignak-backend" # Application manifest manifest = { 'name': __application__, 'version': __version__, 'author': __author__, 'description': __description__, 'copyright': __copyright__, 'license': __license__, 'release': __releasenotes__, 'doc': __doc_url__ }
Python
0
@@ -110,17 +110,17 @@ (0, 4, -0 +1 )%0A%0A__app
e77381b087acd935bc3dae1f6c2e809970506db9
remove SECRET_KEY, again
bepasty/config.py
bepasty/config.py
# Copyright: 2013 Bastian Blank <bastian@waldi.eu.org> # License: BSD 2-clause, see LICENSE for details. class Config(object): """This is the basic configuration class for bepasty.""" #: name of this site (put YOUR bepasty fqdn here) SITENAME = 'bepasty.example.org' UPLOAD_UNLOCKED = True """ .. warning:: Uploads are default unlocked. Actually the admin should manual unlock the uploaded files to avoid copyright issues. In hosted version you should set ``UPLOAD_UNLOCKED = False``. """ #: Define storage module #: Available: #: - filesystem #: - ceph STORAGE = 'filesystem' #: Filesystem storage specific config STORAGE_FILESYSTEM_DIRECTORY = '/tmp/' #: Config file for CEPH storage STORAGE_CEPH_CONFIG_FILE = '/etc/ceph/ceph.conf' #: CEPH pool name for actually data STORAGE_CEPH_POOL_DATA = 'bepasty-data' #: CEPH pool name for meta data STORAGE_CEPH_POOL_META = 'bepasty-meta' #: server secret key needed for secure cookies #: you must set a very long, very random, very secret string here, #: otherwise bepasty will not work (and crash when trying to log in)! SECRET_KEY = 'xx' #: not logged-in users get these permissions #: usually either nothing ('') or read-only ('read'): DEFAULT_PERMISSIONS = '' #: logged-in users may get more permissions #: you need a login secret to log in and, depending on that secret, you will #: get the configured permissions. #: you can use same secret / same permissions for all privileged users or #: set up different secrets / different permissions. #: PERMISSIONS is a dict that maps secrets to permissions, use it like: #: PERMISSIONS = { #: 'myadminsecret': 'admin,create,read,delete', #: 'myuploadersecret': 'create,read', #: } PERMISSIONS = { }
Python
0.000007
@@ -1206,10 +1206,8 @@ = ' -xx '%0A%0A
b89426c4dbb272492533574572c2413c7671cd1d
Fix for a bug that caused the 'has_delete_permission' method of a view to be called in another totally unrelated one.
xadmin/views/delete.py
xadmin/views/delete.py
from django.core.exceptions import PermissionDenied from django.db import transaction, router from django.http import Http404, HttpResponseRedirect from django.template.response import TemplateResponse from django import VERSION as django_version from django.utils import six from django.utils.encoding import force_text from django.utils.html import escape from django.utils.translation import ugettext as _ from xadmin.util import get_deleted_objects from xadmin.util import unquote from xadmin.views.edit import UpdateAdminView from xadmin.views.detail import DetailAdminView from xadmin.views.base import ModelAdminView, filter_hook, csrf_protect_m class DeleteAdminView(ModelAdminView): delete_confirmation_template = None def __init__(self, *args, **kwargs): super(DeleteAdminView, self).__init__(*args, **kwargs) admin_site_registry = self.admin_site._registry for model in admin_site_registry: if not hasattr(admin_site_registry[model], 'has_delete_permission'): setattr(admin_site_registry[model], 'has_delete_permission', self.has_delete_permission) def init_request(self, object_id, *args, **kwargs): """The 'delete' admin view for this model.""" self.obj = self.get_object(unquote(object_id)) if not self.has_delete_permission(self.obj): raise PermissionDenied if self.obj is None: raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {'name': force_text(self.opts.verbose_name), 'key': escape(object_id)}) using = router.db_for_write(self.model) # Populate deleted_objects, a data structure of all related objects that # will also be deleted. self.deleted_objects, model_count, self.perms_needed, self.protected = self.get_deleted_objects([self.obj]) @filter_hook def get_deleted_objects(self, queryset): # Populate deleted_objects, a data structure of all related objects that # will also be deleted. deleted_objects, model_count, perms_needed, protected = get_deleted_objects(queryset, self) return deleted_objects, model_count, perms_needed, protected @csrf_protect_m @filter_hook def get(self, request, object_id): context = self.get_context() return TemplateResponse(request, self.delete_confirmation_template or self.get_template_list("views/model_delete_confirm.html"), context) @csrf_protect_m @transaction.atomic @filter_hook def post(self, request, object_id): if self.perms_needed: raise PermissionDenied self.delete_model() response = self.post_response() if isinstance(response, str): response = HttpResponseRedirect(response) return response @filter_hook def delete_model(self): """ Given a model instance delete it from the database. """ self.log('delete', '', self.obj) self.obj.delete() @filter_hook def get_context(self): if self.perms_needed or self.protected: title = _("Cannot delete %(name)s") % {"name": force_text(self.opts.verbose_name)} else: title = _("Are you sure?") new_context = { "title": title, "object": self.obj, "deleted_objects": self.deleted_objects, "perms_lacking": self.perms_needed, "protected": self.protected, } context = super(DeleteAdminView, self).get_context() context.update(new_context) return context @filter_hook def get_breadcrumb(self): bcs = super(DeleteAdminView, self).get_breadcrumb() bcs.append({ 'title': force_text(self.obj), 'url': self.get_object_url(self.obj) }) item = {'title': _('Delete')} if self.has_delete_permission(): item['url'] = self.model_admin_url('delete', self.obj.pk) bcs.append(item) return bcs @filter_hook def post_response(self): self.message_user(_('The %(name)s "%(obj)s" was deleted successfully.') % {'name': force_text(self.opts.verbose_name), 'obj': force_text(self.obj)}, 'success') if not self.has_view_permission(): return self.get_admin_url('index') return self.model_admin_url('changelist')
Python
0
@@ -732,397 +732,8 @@ ne%0A%0A - def __init__(self, *args, **kwargs):%0A super(DeleteAdminView, self).__init__(*args, **kwargs)%0A admin_site_registry = self.admin_site._registry%0A for model in admin_site_registry:%0A if not hasattr(admin_site_registry%5Bmodel%5D, 'has_delete_permission'):%0A setattr(admin_site_registry%5Bmodel%5D, 'has_delete_permission', self.has_delete_permission)%0A%0A
0945d69af170d8eca723564a443f2d250c56bb0c
Switch to newer form of flask imports
ox_herd/scripts/serve_ox_herd.py
ox_herd/scripts/serve_ox_herd.py
"""Script to start the ox_herd server. This is the main script to run ox_herd and provide a python Flask based web server to respond to web requests. """ import configparser import argparse import logging import os from flask import Flask, redirect, url_for from flask_login import LoginManager, UserMixin, login_required from ox_herd import settings as ox_herd_settings DEFAULT_PORT = 6617 def prepare_parser(parser): "Prepare an arg parser to read command line." parser.add_argument('--debug', type=int, default=0, help=( 'Use 1 for debug mode else 0; cannot have host=0.0.0.0 w/debug')) parser.add_argument('--host', default='0.0.0.0', help=( 'IP address for allowed host (0.0.0.0 for all access).')) parser.add_argument('--plugin', action='append', help=( 'You can provide as many --plugin options as you like. Each\n' 'must be the path to an ox herd plugin to enable. For example\n' 'providing "--plugin ox_herd.core.plugins.pytest_plugin" would\n' 'enable the pytest plugin if it was not already enabled.')) parser.add_argument('--port', default=DEFAULT_PORT, help=( 'IP port to listen on.')) parser.add_argument('--base_url', help=( 'Base URL to use for ox_herd site. This is usually automatically\n' 'but you can override when testing.')) parser.add_argument('--logging', type=int, default=logging.INFO, help=( 'Python logLevel. Use %i for DEBUG, %i for INFO, etc.' % ( logging.DEBUG, logging.INFO))) def run(): "Main function to run server." parser = argparse.ArgumentParser( description='Command line tool to run ox_herd server') prepare_parser(parser) args = parser.parse_args() _do_setup(args) _serve(args) def _do_setup(args): "Should be called by run() to do basic setup based on args." if args.debug and args.host == '0.0.0.0': logging.warning('Setting host to 127.0.0.1 since in debug mode') args.host = '127.0.0.1' logging.getLogger('').setLevel(args.logging) logging.info('Set log level to %s', args.logging) plugin_list = args.plugin if args.plugin else [] plug_set = set(plugin_list) if len(plug_set) < len(plugin_list): raise ValueError('Duplicates in args.plugin = %s' % plugin_list) cur_plugs = set(ox_herd_settings.OX_PLUGINS) for item in plugin_list: if item not in cur_plugs: logging.info('Adding plugin %s to OX_PLUGINS.', item) ox_herd_settings.OX_PLUGINS.append(item) else: logging.info( 'Not adding plugin %s to OX_PLUGINS since already there.', item) def _setup_stub_login(app): conf_file = ox_herd_settings.OX_HERD_CONF if os.path.exists(conf_file): from ox_herd.core import login_stub app.register_blueprint(login_stub.LOGIN_STUB_BP) my_config = configparser.ConfigParser() my_config.read(conf_file) if 'STUB_USER_DB' in my_config: for user, hash_password in my_config.items('STUB_USER_DB'): ox_herd_settings.STUB_USER_DB[user] = hash_password else: logging.warning('Unable to find STUB_USER_DB in conf %s', conf_file) else: logging.warning('Unable to find OX_HERD_CONF at %s', ox_herd_settings.OX_HERD_CONF) def _serve(args): "Run the server. Should only be called by run after doing setup." if args.host == '0.0.0.0': if args.debug: raise TypeError('Cannot have host 0.0.0.0 with debug mode') else: logging.warning('Host = %s. When host != 0.0.0.0 non-local connections ' '\nwill be *IGNORED*. Only use for testing.', args.host) app = Flask('ox_herd') settings = {'SECRET_KEY' : os.urandom(128), 'USERNAME' : 'admin', 'DEBUG' : args.debug} app.config.from_object(__name__) app.config.update(settings) from ox_herd.ui.flask_web_ui import ox_herd from ox_herd.ui.flask_web_ui.ox_herd import views app.register_blueprint(ox_herd.OX_HERD_BP, url_prefix='/ox_herd') _setup_stub_login(app) assert bool(settings['DEBUG']) == bool(args.debug), ( 'Inconsistent debug values from settings and args.') @app.route("/") def redirect_to_ox_herd(): """Simple redirect to blueprint root. This is required so we can redirect from the top-level path when running ox_herd in stand-alone mode. """ return redirect(url_for("ox_herd.index")) logging.debug('Created %s for initial redirection', redirect_to_ox_herd) app.run(host=args.host, debug=args.debug, port=int(args.port)) if __name__ == '__main__': run()
Python
0
@@ -270,16 +270,30 @@ sk_login +.login_manager import @@ -304,17 +304,40 @@ nManager -, +%0Afrom flask_login import UserMix
fd9039ac78985fc5f06f3f01bfafeacdb22f354b
Create sortable tables within the excel sheets
src/spz/spz/tables.py
src/spz/spz/tables.py
# -*- coding: utf-8 -*- """Table export utility. Used to format course lists for download. """ import csv import io from tempfile import NamedTemporaryFile from openpyxl import Workbook from flask import make_response, url_for, redirect, flash def export_course_list(courses, format): if format == 'csv': return export(CSVWriter(), courses) elif format == 'xlsx': return export(ExcelWriter(), courses) else: flash('Ungueltiges Export-Format: {0}'.format(format), 'error') return redirect(url_for('lists')) class CSVWriter: mimetype = 'text/csv' def __init__(self): self.buf = io.StringIO() self.out = csv.writer(self.buf, delimiter=";", dialect=csv.excel) self.mimetype = 'text/csv' self.filename = 'Kursliste.csv' self.header_written = False def write_heading(self, values): if not self.header_written: self.write_row(values) self.header_written = True def write_row(self, values): string_values = [str(v) if v else '' for v in values] self.out.writerow(string_values) def new_section(self, name): pass def get_data(self): return self.buf.getvalue() class ExcelWriter: def __init__(self): self.workbook = Workbook(write_only=True) self.mimetype = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' self.filename = 'Kursliste.xlsx' def write_heading(self, values): self.write_row(values) def write_row(self, values): self.workbook._sheets[-1].append(values) def new_section(self, name): self.workbook.create_sheet(name) def get_data(self): with NamedTemporaryFile() as file: self.workbook.save(file.name) file.seek(0) stream = file.read() return stream def export(writer, courses): # XXX: header -- not standardized header = ['Kurs', 'Kursplatz', 'Bewerbernummer', 'Vorname', 'Nachname', 'Mail', 'Matrikelnummer', 'Telefon', 'Studienabschluss', 'Semester', 'Bewerberkreis'] for course in courses: writer.new_section(course.full_name()) writer.write_heading(header) active_no_debt = [attendance.applicant for attendance in course.attendances if not attendance.waiting and (not attendance.has_to_pay or attendance.amountpaid > 0)] idx = 1 for applicant in active_no_debt: writer.write_row([course.full_name(), idx, applicant.id, applicant.first_name, applicant.last_name, applicant.mail, applicant.tag, applicant.phone, applicant.degree.name if applicant.degree else None, applicant.semester, applicant.origin.name if applicant.origin else None]) idx += 1 resp = make_response(writer.get_data()) resp.headers['Content-Disposition'] = 'attachment; filename="{0}"'.format(writer.filename) resp.mimetype = writer.mimetype return resp
Python
0.000002
@@ -195,16 +195,60 @@ orkbook%0D +%0Afrom openpyxl.worksheet.table import Table%0D %0A%0D%0Afrom @@ -1241,32 +1241,73 @@ n(self, name):%0D%0A + # CSV does not support sections%0D%0A pass%0D%0A%0D%0A @@ -1410,32 +1410,197 @@ _init__(self):%0D%0A + # write_only=True would require additional logic to keep track of sheet dimension so we keep it at False%0D%0A # (see sheet.dimensions in end_section())%0D%0A self.wor @@ -1627,19 +1627,20 @@ te_only= -Tru +Fals e)%0D%0A @@ -1769,16 +1769,83 @@ e.xlsx'%0D +%0A self.workbook._sheets.clear() # start off with no sheets%0D %0A%0D%0A d @@ -2023,32 +2023,99 @@ n(self, name):%0D%0A + if self.workbook._sheets:%0D%0A self.end_section()%0D%0A self.wor @@ -2159,32 +2159,99 @@ et_data(self):%0D%0A + if self.workbook._sheets:%0D%0A self.end_section()%0D%0A with Nam @@ -2402,24 +2402,375 @@ n stream%0D%0A%0D%0A + def end_section(self):%0D%0A sheet = self.workbook._sheets%5B-1%5D%0D%0A # create a table within the excel sheet to simplify sorting by values%0D%0A tableName = sheet.title.replace(' ', '_') # needs to be unique and must not contain spaces%0D%0A table = Table(displayName=tableName, ref=sheet.dimensions)%0D%0A sheet.add_table(table)%0D%0A %0D%0Adef export
b8e53ed353bf28bc1e532ae1577bf4a8b4ce976f
Add missing import
hackeriet/cardreaderd/__init__.py
hackeriet/cardreaderd/__init__.py
#!/usr/bin/env python from hackeriet import mifare from hackeriet.mqtt import MQTT from hackeriet.door import users import os, logging logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(message)s') door_name = os.getenv("DOOR_NAME", 'hackeriet') door_topic = "hackeriet/door/%s/open" % door_name door_timeout = int(os.getenv("DOOR_TIMEOUT", 2)) mqtt = MQTT() def main(): logging.debug('Starting main loop') while True: users.load() # Read data from card reader logging.debug('mifare: waiting for data...') data = mifare.try_read() if data: logging.debug('mifare: data read') user = users.auth(data[0:16]) if user: ascii_user = user.encode('ascii', 'replace').decode('ascii') logging.info('auth: card read for user %s' % ascii_user) mqtt(door_topic, user) else: logging.debug('auth: card data does not belong to a user: %s' % data[0:16]) # Avoid spewing messages every single ms while a card is in front of the reader time.sleep(door_timeout) else: logging.debug('mifare: no data read in last attempt') if __name__ == "__main__": main()
Python
0.000466
@@ -127,16 +127,22 @@ logging +, time %0A%0Aloggin
2e042201d6c0e0709d7056d399052389d1ea54b0
Move imports inside initialize() method so that we don’t break things on initial setup.
shopify_auth/__init__.py
shopify_auth/__init__.py
import shopify from django.conf import settings from django.core.exceptions import ImproperlyConfigured VERSION = (0, 1, 5) __version__ = '.'.join(map(str, VERSION)) __author__ = 'Gavin Ballard' def initialize(): if not settings.SHOPIFY_APP_API_KEY or not settings.SHOPIFY_APP_API_SECRET: raise ImproperlyConfigured("SHOPIFY_APP_API_KEY and SHOPIFY_APP_API_SECRET must be set in settings") shopify.Session.setup(api_key = settings.SHOPIFY_APP_API_KEY, secret = settings.SHOPIFY_APP_API_SECRET)
Python
0
@@ -1,111 +1,4 @@ -import shopify%0A%0Afrom django.conf import settings%0Afrom django.core.exceptions import ImproperlyConfigured%0A%0A%0A VERS @@ -18,9 +18,9 @@ 1, -5 +6 )%0A__ @@ -109,16 +109,134 @@ lize():%0A + import shopify%0A%0A from django.conf import settings%0A from django.core.exceptions import ImproperlyConfigured%0A%0A if n