commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
a26ed666b657286ecbd619631ce19c3cacf43c2b
|
Update docs
|
src/psd_tools/user_api/shape.py
|
src/psd_tools/user_api/shape.py
|
# -*- coding: utf-8 -*-
"""Shape layer API."""
from __future__ import absolute_import
import logging
from psd_tools.debug import pretty_namedtuple
from psd_tools.constants import TaggedBlock, PathResource
logger = logging.getLogger(__name__)
class StrokeStyle(object):
"""StrokeStyle contains decorative infromation for strokes."""
STROKE_STYLE_LINE_CAP_TYPES = {
b'strokeStyleButtCap': 'butt',
b'strokeStyleRoundCap': 'round',
b'strokeStyleSquareCap': 'square',
}
STROKE_STYLE_LINE_JOIN_TYPES = {
b'strokeStyleMiterJoin': 'miter',
b'strokeStyleRoundJoin': 'round',
b'strokeStyleBevelJoin': 'bevel',
}
STROKE_STYLE_LINE_ALIGNMENTS = {
b'strokeStyleAlignInside': 'inner',
b'strokeStyleAlignOutside': 'outer',
b'strokeStyleAlignCenter': 'center',
}
def __init__(self, descriptor):
self._descriptor = descriptor
assert self.get(b'classID') == b'strokeStyle'
def get(self, key, default=None):
return self._descriptor.get(key, default)
@property
def enabled(self):
"""If the stroke is enabled."""
return self.get(b'strokeEnabled')
@property
def fill_enabled(self):
"""If the stroke fill is enabled."""
return self.get(b'fillEnabled')
@property
def line_width(self):
"""Stroke width in float."""
return self.get(b'strokeStyleLineWidth', 1.0)
@property
def line_dash_set(self):
"""
Line dash set in list of float.
:rtype: list
"""
return self.get(b'strokeStyleLineDashSet')
@property
def line_dash_offset(self):
"""
Line dash offset in float.
:rtype: float
"""
return self.get(b'strokeStyleLineDashOffset', 0.0)
@property
def miter_limit(self):
"""Miter limit in float."""
return self.get(b'strokeStyleMiterLimit', 100.0)
@property
def line_cap_type(self):
"""Cap type, one of `butt`, `round`, `square`."""
key = self.get(b'strokeStyleLineCapType')
return self.STROKE_STYLE_LINE_CAP_TYPES.get(key, str(key))
@property
def line_join_type(self):
"""Join type, one of `miter`, `round`, `bevel`."""
key = self.get(b'strokeStyleLineJoinType')
return self.STROKE_STYLE_LINE_JOIN_TYPES.get(key, str(key))
@property
def line_alignment(self):
"""Alignment, one of `inner`, `outer`, `center`."""
key = self.get(b'strokeStyleLineAlignment')
return self.STROKE_STYLE_LINE_ALIGNMENTS.get(key, str(key))
@property
def scale_lock(self):
return self.get(b'strokeStyleScaleLock')
@property
def stroke_adjust(self):
"""Stroke adjust"""
return self.get(b'strokeStyleStrokeAdjust')
@property
def blend_mode(self):
"""Blend mode."""
return self.get(b'strokeStyleBlendMode')
@property
def opacity(self):
"""Opacity from 0 to 100."""
return self.get(b'strokeStyleOpacity', 100)
@property
def content(self):
"""
Fill effect, one of
:py:class:`~psd_tools.user_api.effects.ColorOverlay`,
:py:class:`~psd_tools.user_api.effects.PatternOverlay`,
or :py:class:`~psd_tools.user_api.effects.GradientOverlay`.
:rtype: :py:class:`~psd_tools.user_api.effects._OverlayEffect`
"""
return self.get(b'strokeStyleContent')
def __repr__(self):
return self._descriptor.__repr__()
Path = pretty_namedtuple("Path", "closed num_knots knots")
Knot = pretty_namedtuple("Knot", "anchor leaving_knot preceding_knot")
class VectorMask(object):
"""Shape path data."""
_KNOT_KEYS = (
PathResource.CLOSED_SUBPATH_BEZIER_KNOT_LINKED,
PathResource.CLOSED_SUBPATH_BEZIER_KNOT_UNLINKED,
PathResource.OPEN_SUBPATH_BEZIER_KNOT_LINKED,
PathResource.OPEN_SUBPATH_BEZIER_KNOT_UNLINKED,
)
def __init__(self, setting):
self._setting = setting
self._paths = []
self._build()
def _build(self):
for p in self._setting.path:
selector = p.get('selector')
if selector == PathResource.CLOSED_SUBPATH_LENGTH_RECORD:
self._paths.append(Path(True, p.get('num_knot_records'), []))
elif selector == PathResource.OPEN_SUBPATH_LENGTH_RECORD:
self._paths.append(Path(False, p.get('num_knot_records'), []))
elif selector in self._KNOT_KEYS:
knot = Knot(p.get('anchor'),
p.get('control_leaving_knot'),
p.get('control_preceding_knot'))
self._paths[-1].knots.append(knot)
elif selector == PathResource.PATH_FILL_RULE_RECORD:
pass
elif selector == PathResource.CLIPBOARD_RECORD:
self._clipboard_record = p
elif selector == PathResource.INITIAL_FILL_RULE_RECORD:
self._initial_fill_rule = p.get('initial_fill_rule', 0)
for path in self.paths:
assert path.num_knots == len(path.knots)
@property
def invert(self):
"""Invert the mask."""
return self._setting.invert
@property
def not_link(self):
"""If the knots are not linked."""
return self._setting.not_link
@property
def disabled(self):
"""If the mask is disabled."""
return self._setting.disable
@property
def paths(self):
"""
List of `Path`. Path contains `closed`, `num_knots`, and `knots`.
:rtype: Path
"""
return self._paths
@property
def initial_fill_rule(self):
"""
Initial fill rule.
When 0, fill inside of the path. When 1, fill outside of the shape.
"""
return self._initial_fill_rule
@property
def anchors(self):
"""List of vertices of all subpaths."""
return [p['anchor'] for p in self._setting.path
if p.get('selector') in self._KNOT_KEYS]
|
Python
| 0.000001
|
@@ -198,16 +198,64 @@
Resource
+%0Afrom psd_tools.decoder.actions import UnitFloat
%0A%0Alogger
@@ -1426,32 +1426,68 @@
width in float.
+%0A%0A :rtype: UnitFloat%0A
%22%22%22%0A retu
@@ -1526,13 +1526,34 @@
th',
+ UnitFloat('PIXELS',
1.0)
+)
%0A%0A
@@ -1637,22 +1637,73 @@
list of
- float
+%0A :py:class:%60~psd_tools.decoder.actions.UnitFloat%60
.%0A%0A
@@ -3164,22 +3164,50 @@
ity
-from 0 to 100.
+value.%0A%0A :rtype: UnitFloat%0A
%22%22%22%0A
@@ -3253,19 +3253,43 @@
acity',
-100
+UnitFloat('PERCENT', 100.0)
)%0A%0A @
|
cdf7dfc01cca8472c517d2a93d89e97e1f838103
|
Add metanode to degree_df functionality
|
hetio/stats.py
|
hetio/stats.py
|
import pandas
import matplotlib
import matplotlib.backends.backend_pdf
import seaborn
def get_degrees_for_metanode(graph, metanode):
"""
Return a dataframe that reports the degree of each metaedge for
each node of kind metanode.
"""
metanode_to_nodes = graph.get_metanode_to_nodes()
nodes = metanode_to_nodes.get(metanode, [])
rows = list()
for node in nodes:
for metaedge, edges in node.edges.items():
rows.append((str(node), node.name, str(metaedge), len(edges)))
df = pandas.DataFrame(rows, columns=['node_id', 'node_name', 'metaedge', 'degree'])
return df.sort_values(['node_name', 'metaedge'])
def plot_degrees_for_metanode(graph, metanode, col_wrap=2, facet_height=4):
"""
Plots histograms of the degree distribution of each metaedge
incident to the metanode. Each metaedge receives a facet in
a seaborn.FacetGrid.
"""
degree_df = get_degrees_for_metanode(graph, metanode)
grid = seaborn.FacetGrid(degree_df, col='metaedge', sharex=False, sharey=False, col_wrap=col_wrap, size=facet_height)
grid.map(seaborn.distplot, 'degree', kde=False)
grid.set_titles('{col_name}')
return grid
def plot_degrees(graph, path):
"""
Creates a multipage pdf with a page for each metanode showing degree
distributions.
"""
# Temporarily disable `figure.max_open_warning`
max_open = matplotlib.rcParams['figure.max_open_warning']
matplotlib.rcParams['figure.max_open_warning'] = 0
pdf_pages = matplotlib.backends.backend_pdf.PdfPages(path)
for metanode in graph.metagraph.get_nodes():
grid = plot_degrees_for_metanode(graph, metanode)
grid.savefig(pdf_pages, format='pdf')
pdf_pages.close()
matplotlib.rcParams['figure.max_open_warning'] = max_open
def get_metanode_df(graph):
rows = list()
for metanode, nodes in graph.get_metanode_to_nodes().items():
series = pandas.Series()
series['metanode'] = metanode
series['abbreviation'] = metanode.abbrev
metaedges = set()
for metaedge in metanode.edges:
metaedges |= {metaedge, metaedge.inverse}
series['metaedges'] = sum([not metaedge.inverted for metaedge in metaedges])
series['nodes'] = len(nodes)
series['unconnected_nodes'] = sum(not any(node.edges.values()) for node in nodes)
rows.append(series)
metanode_df = pandas.DataFrame(rows).sort_values('metanode')
return metanode_df
def get_metaedge_df(graph):
rows = list()
for metaedge, edges in graph.get_metaedge_to_edges(exclude_inverts=True).items():
series = pandas.Series()
series['metaedge'] = str(metaedge)
series['abbreviation'] = metaedge.get_abbrev()
series['edges'] = len(edges)
series['source_nodes'] = len(set(edge.source for edge in edges))
series['target_nodes'] = len(set(edge.target for edge in edges))
rows.append(series)
metaedge_df = pandas.DataFrame(rows).sort_values('metaedge')
return metaedge_df
|
Python
| 0
|
@@ -466,17 +466,23 @@
nd((
-str(node)
+node.identifier
, no
@@ -606,16 +606,16 @@
gree'%5D)%0A
-
retu
@@ -660,16 +660,1031 @@
dge'%5D)%0A%0A
+def get_metanode_to_degree_df(graph):%0A %22%22%22%0A Return a dictionary of metanode to degree_df, where degree_df is a%0A wide-format dataframe of node degrees.%0A %22%22%22%0A metanode_to_degree_df = dict()%0A for metanode in graph.metagraph.get_nodes():%0A degree_df = get_degrees_for_metanode(graph, metanode)%0A degree_df = pandas.pivot_table(degree_df, values='degree',%0A index=%5B'node_id', 'node_name'%5D, columns='metaedge').reset_index()%0A metanode_to_degree_df%5Bmetanode%5D = degree_df%0A return metanode_to_degree_df%0A%0Adef degrees_to_excel(graph, path):%0A %22%22%22%0A Write node degrees to a multisheet excel spreadsheet. Path should end in%0A a valid excel extension that %60pandas.ExcelWriter%60 can detect, such as%0A %60.xlsx%60.%0A %22%22%22%0A metanode_to_degree_df = get_metanode_to_degree_df(graph)%0A writer = pandas.ExcelWriter(path)%0A for metanode, degree_df in metanode_to_degree_df.items():%0A degree_df.to_excel(writer, sheet_name=str(metanode), index=False)%0A writer.close()%0A%0A
def plot
|
e5e23fd278051c12bd3fea0de6f0e46791f97b65
|
add a test for issue57 which currently needs to be fixed on py-trunk though
|
testing/test_remote.py
|
testing/test_remote.py
|
import py
py.test.importorskip("execnet")
from xdist.remote import LooponfailingSession, LoopState, RemoteControl
class TestRemoteControl:
def test_nofailures(self, testdir):
item = testdir.getitem("def test_func(): pass\n")
control = RemoteControl(item.config)
control.setup()
failures = control.runsession()
assert not failures
def test_failures_somewhere(self, testdir):
item = testdir.getitem("def test_func(): assert 0\n")
control = RemoteControl(item.config)
control.setup()
failures = control.runsession()
assert failures
control.setup()
item.fspath.write("def test_func(): assert 1\n")
pyc = item.fspath.new(ext=".pyc")
if pyc.check():
pyc.remove()
failures = control.runsession(failures)
assert not failures
def test_failure_change(self, testdir):
modcol = testdir.getitem("""
def test_func():
assert 0
""")
control = RemoteControl(modcol.config)
control.setup()
failures = control.runsession()
assert failures
control.setup()
modcol.fspath.write(py.code.Source("""
def test_func():
assert 1
def test_new():
assert 0
"""))
pyc = modcol.fspath.new(ext=".pyc")
if pyc.check():
pyc.remove()
failures = control.runsession(failures)
assert not failures
control.setup()
failures = control.runsession()
assert failures
assert str(failures).find("test_new") != -1
class TestLooponFailing:
def test_looponfail_from_fail_to_ok(self, testdir):
modcol = testdir.getmodulecol("""
def test_one():
x = 0
assert x == 1
def test_two():
assert 1
""")
session = LooponfailingSession(modcol.config)
loopstate = LoopState()
session.loop_once(loopstate)
assert len(loopstate.colitems) == 1
modcol.fspath.write(py.code.Source("""
def test_one():
x = 15
assert x == 15
def test_two():
assert 1
"""))
assert session.statrecorder.check()
session.loop_once(loopstate)
assert not loopstate.colitems
def test_looponfail_from_one_to_two_tests(self, testdir):
modcol = testdir.getmodulecol("""
def test_one():
assert 0
""")
session = LooponfailingSession(modcol.config)
loopstate = LoopState()
loopstate.colitems = []
session.loop_once(loopstate)
assert len(loopstate.colitems) == 1
modcol.fspath.write(py.code.Source("""
def test_one():
assert 1 # passes now
def test_two():
assert 0 # new and fails
"""))
assert session.statrecorder.check()
session.loop_once(loopstate)
assert len(loopstate.colitems) == 0
session.loop_once(loopstate)
assert len(loopstate.colitems) == 1
def test_looponfail_removed_test(self, testdir):
modcol = testdir.getmodulecol("""
def test_one():
assert 0
def test_two():
assert 0
""")
session = LooponfailingSession(modcol.config)
loopstate = LoopState()
loopstate.colitems = []
session.loop_once(loopstate)
assert len(loopstate.colitems) == 2
modcol.fspath.write(py.code.Source("""
def test_xxx(): # renamed test
assert 0
def test_two():
assert 1 # pass now
"""))
assert session.statrecorder.check()
session.loop_once(loopstate)
assert len(loopstate.colitems) == 0
session.loop_once(loopstate)
assert len(loopstate.colitems) == 1
def test_looponfail_functional_fail_to_ok(self, testdir):
p = testdir.makepyfile("""
def test_one():
x = 0
assert x == 1
""")
child = testdir.spawn_pytest("-f %s" % p)
child.expect("def test_one")
child.expect("x == 1")
child.expect("1 failed")
child.expect("### LOOPONFAILING ####")
child.expect("waiting for changes")
p.write(py.code.Source("""
def test_one():
x = 1
assert x == 1
"""))
child.expect(".*1 passed.*")
child.kill(15)
|
Python
| 0
|
@@ -4618,8 +4618,409 @@
ll(15)%0A%0A
+ def test_looponfail_xfail_passes(self, testdir):%0A p = testdir.makepyfile(%22%22%22%0A import py%0A @py.test.mark.xfail%0A def test_one():%0A pass%0A %22%22%22)%0A child = testdir.spawn_pytest(%22-f %25s%22 %25 p)%0A child.expect(%221 xpass%22)%0A child.expect(%22### LOOPONFAILING ####%22)%0A child.expect(%22waiting for changes%22)%0A child.kill(15)%0A%0A
|
b21ada1cb1b0fc1be4dc10bd72aaabcc3a72389e
|
Beer+python=fail
|
etcdocker/util.py
|
etcdocker/util.py
|
import ast
import docker
import etcd
import logging
LOG = logging.getLogger("etcdocker")
def _get_docker_client():
# Assume docker is local
return docker.Client()
def _get_etcd_client():
# Assume docker is local
return etcd.Client()
def get_containers():
client = _get_docker_client()
return client.containers(all=True)
def get_container_names(containers):
# Returns list of container names from etcd key list
container_names = []
for container in containers:
container_names.append(container['key'].rsplit('/')[-1])
return container_names
def get_etcd_container_names(base_key_dir):
"""
Get container name list from etcd
args:
base_key_dir (str) - etcd path for etcdocker
Returns: (list)
List of container names
"""
# Returns list of container names from etcd key list
client = _get_etcd_client()
# Get container key list
containers = get_container_names(client.read(
base_key_dir, recursive=True, sorted=True)._children)
return containers
def get_params(container_path):
"""
Get params for container from etcd
args:
container_path (str) - etcd path to container params
Returns: (dict)
Raw etcd params
"""
client = _get_etcd_client()
children = client.read(container_path)._children
params = {}
for child in children:
name = child['key'].rsplit('/')[-1]
params[name] = child['value']
return params
def convert_params(params):
"""
Converts etcd params to docker params
args:
params (dict) - raw etcd key value pairs
Returns: (dict)
Converted docker params
"""
converted_params = {
'ports': None,
'volumes_from': None,
'volumes': None}
for param in params.iterkeys():
if params.get(param) and param in converted_params.keys():
try:
converted_params[param] = ast.literal_eval(
params.get(param))
except (ValueError, SyntaxError):
LOG.error("Malformed param '%s'. Skipping..." % param)
else:
converted_params[param] = params.get(param)
converted_params['image'] = "%s:%s" % (
params.get('image'), params.get('tag'))
return converted_params
def create_docker_container(name, params):
"""
Create a Docker container
args:
name (str) - Name of container
params (dict) - Docker params
"""
client = _get_docker_client()
try:
ports = params.get('ports').keys()
except AttributeError:
ports = []
client.create_container(
image=params.get('image'),
detach=True,
volumes=params.get('volumes'),
ports=ports,
name=name)
def start_docker_container(name, params):
"""
Start a Docker container
args:
name (str) - Name of container
params (dict) - Docker params
"""
client = _get_docker_client()
client.start(
container=name,
port_bindings=params.get('ports'),
volumes_from=params.get('volumes_from'),
privileged=params.get('privileged'))
def stop_and_rm_docker_container(name):
"""
Stop and remove a Docker container
args:
name (str) - Name of container
"""
client = _get_docker_client()
# Try to stop the container, kill after 5 secs
client.stop(name, 5)
client.remove_container(name)
def get_docker_images(filter=None):
"""
Get a list of images
args:
filter (str) - Filter string
Returns: (list)
List of image IDs, optionally filtered
"""
client = _get_docker_client()
return client.images(name=filter)
def get_docker_image_latest(image_name):
"""
Get a list of image names that are the same
args:
image_name (str) - Latest image
Returns: (list)
List of image names
"""
from etcdocker.watcher.ImagesWatcher import IMAGES
cur_image_id = None
cur_images = []
for i in IMAGES:
for tag in i.get('RepoTags'):
if tag == image_name:
cur_image_id = i.get('Id')
for i in IMAGES:
if i.get('Id') == cur_image_id:
for tag in i.get('RepoTags'):
cur_images.append(tag)
return cur_images
|
Python
| 0.999974
|
@@ -3778,20 +3778,22 @@
ker_
+s
im
-age_lat
+ilar_imag
es
-t
(ima
|
3846907435da720c075ab89579b970da5019b49f
|
Add Tapastic/AmpleTime
|
dosagelib/plugins/tapastic.py
|
dosagelib/plugins/tapastic.py
|
# SPDX-License-Identifier: MIT
# Copyright (C) 2019-2020 Tobias Gruetzmacher
# Copyright (C) 2019-2020 Daniel Ring
import json
import re
from ..scraper import _ParserScraper
from ..helpers import indirectStarter
class Tapastic(_ParserScraper):
baseUrl = 'https://tapas.io/'
imageSearch = '//article[contains(@class, "js-episode-article")]//img/@data-src'
prevSearch = '//a[contains(@class, "js-prev-ep-btn")]'
latestSearch = '//ul[contains(@class, "js-episode-list")]//a'
starter = indirectStarter
multipleImagesPerStrip = True
def __init__(self, name, url):
super(Tapastic, self).__init__('Tapastic/' + name)
self.url = self.baseUrl + 'series/' + url
self.stripUrl = self.baseUrl + 'episode/%s'
def fetchUrls(self, url, data, urlSearch):
# Save link order for position-based filenames
self.imageUrls = super().fetchUrls(url, data, urlSearch)
return self.imageUrls
def namer(self, imageUrl, pageUrl):
# Construct filename from episode number and image position on page
episodeNum = pageUrl.rsplit('/', 1)[-1]
imageNum = self.imageUrls.index(imageUrl)
imageExt = pageUrl.rsplit('.', 1)[-1]
if len(self.imageUrls) > 1:
filename = "%s-%d.%s" % (episodeNum, imageNum, imageExt)
else:
filename = "%s.%s" % (episodeNum, imageExt)
return filename
@classmethod
def getmodules(cls):
return (
# Manually-added comics
cls('NoFuture', 'NoFuture'),
cls('OrensForge', 'OrensForge'),
cls('RavenWolf', 'RavenWolf'),
cls('TheCatTheVineAndTheVictory', 'The-Cat-The-Vine-and-The-Victory'),
cls('TheGodsPack', 'The-Gods-Pack'),
# START AUTOUPDATE
# END AUTOUPDATE
)
|
Python
| 0.000001
|
@@ -1499,16 +1499,60 @@
comics%0A
+ cls('AmpleTime', 'Ample-Time'),%0A
|
531d4d02db38e6b42266303d680d6289352bf8a5
|
Revert "Update pendulum.py (#2444)" (#2475)
|
gym/envs/classic_control/pendulum.py
|
gym/envs/classic_control/pendulum.py
|
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
from os import path
class PendulumEnv(gym.Env):
"""
Description:
The inverted pendulum swingup problem is a classic
problem in the control literature. In this version of the
problem, the pendulum starts in a random position, and the
goal is to swing it up so it stays upright.
Source:
Observation:
Type: Box(1)
Num Observation Min Max
0 Speed -8 8
Actions:
Type: Box(1)
Num Action Min Max
0 Torque -2 2
Reward:
Starting State:
Episode Termination:
"""
metadata = {"render.modes": ["human", "rgb_array"], "video.frames_per_second": 30}
def __init__(self, g=10.0):
self.max_speed = 8
self.max_torque = 2.0
self.dt = 0.05
self.g = g
self.m = 1.0
self.l = 1.0
self.viewer = None
high = np.array([1.0, 1.0, self.max_speed], dtype=np.float32)
self.action_space = spaces.Box(
low=-self.max_torque, high=self.max_torque, shape=(1,), dtype=np.float32
)
self.observation_space = spaces.Box(low=-high, high=high, dtype=np.float32)
self.seed()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, u):
th, thdot = self.state # th := theta
g = self.g
m = self.m
l = self.l
dt = self.dt
u = np.clip(u, -self.max_torque, self.max_torque)[0]
self.last_u = u # for rendering
costs = angle_normalize(th) ** 2 + 0.1 * thdot ** 2 + 0.001 * (u ** 2)
newthdot = thdot + (3 * g / (2 * l) * np.sin(th) + 3.0 / (m * l ** 2) * u) * dt
newthdot = np.clip(newthdot, -self.max_speed, self.max_speed)
newth = th + newthdot * dt
self.state = np.array([newth, newthdot])
return self._get_obs(), -costs, False, {}
def reset(self):
high = np.array([np.pi, 1])
self.state = self.np_random.uniform(low=-high, high=high)
self.last_u = None
return self._get_obs()
def _get_obs(self):
theta, thetadot = self.state
return np.array([np.cos(theta), np.sin(theta), thetadot], dtype=np.float32)
def render(self, mode="human"):
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(500, 500)
self.viewer.set_bounds(-2.2, 2.2, -2.2, 2.2)
rod = rendering.make_capsule(1, 0.2)
rod.set_color(0.8, 0.3, 0.3)
self.pole_transform = rendering.Transform()
rod.add_attr(self.pole_transform)
self.viewer.add_geom(rod)
axle = rendering.make_circle(0.05)
axle.set_color(0, 0, 0)
self.viewer.add_geom(axle)
fname = path.join(path.dirname(__file__), "assets/clockwise.png")
self.img = rendering.Image(fname, 1.0, 1.0)
self.imgtrans = rendering.Transform()
self.img.add_attr(self.imgtrans)
self.viewer.add_onetime(self.img)
self.pole_transform.set_rotation(self.state[0] + np.pi / 2)
if self.last_u is not None:
self.imgtrans.scale = (-self.last_u / 2, np.abs(self.last_u) / 2)
return self.viewer.render(return_rgb_array=mode == "rgb_array")
def close(self):
if self.viewer:
self.viewer.close()
self.viewer = None
def angle_normalize(x):
return ((x + np.pi) % (2 * np.pi)) - np.pi
|
Python
| 0
|
@@ -130,690 +130,8 @@
v):%0A
- %22%22%22%0A Description:%0A The inverted pendulum swingup problem is a classic%0A problem in the control literature. In this version of the%0A problem, the pendulum starts in a random position, and the%0A goal is to swing it up so it stays upright.%0A Source:%0A Observation:%0A Type: Box(1)%0A Num Observation Min Max%0A 0 Speed -8 8%0A Actions:%0A Type: Box(1)%0A Num Action Min Max%0A 0 Torque -2 2%0A Reward:%0A Starting State:%0A Episode Termination:%0A%22%22%22%0A
|
62314491b148c51e7c27e13aded283a0622c47f4
|
improve h5py config check
|
hpat/config.py
|
hpat/config.py
|
try:
from .io import _hdf5
except ImportError:
_has_h5py = False
else:
_has_h5py = True
try:
import pyarrow
except ImportError:
_has_pyarrow = False
else:
_has_pyarrow = True
try:
from . import ros_cpp
except ImportError:
_has_ros = False
else:
_has_ros = True
try:
from . import cv_wrapper
except ImportError:
_has_opencv = False
else:
_has_opencv = True
import hpat.cv_ext
try:
from . import hxe_ext
except ImportError:
_has_xenon = False
else:
_has_xenon = True
import hpat.io.xenon_ext
|
Python
| 0
|
@@ -24,16 +24,82 @@
t _hdf5%0A
+ import h5py%0A # TODO: make sure h5py/hdf5 supports parallel%0A
except I
|
75729e3e06c560892f0bf285fdd8a15f9f58b7d5
|
Delete local file with no signature, without trying reget
|
lib/oelite/fetch/url.py
|
lib/oelite/fetch/url.py
|
import oelite.fetch
import bb.utils
import os
import urlgrabber
import hashlib
class UrlFetcher():
SUPPORTED_SCHEMES = ("http", "https", "ftp")
def __init__(self, uri, d):
if not uri.scheme in self.SUPPORTED_SCHEMES:
raise Exception(
"Scheme %s not supported by oelite.fetch.UrlFetcher"%(
uri.scheme))
self.url = "%s://%s"%(uri.scheme, uri.location)
try:
isubdir = uri.params["isubdir"]
except KeyError:
isubdir = uri.isubdir
self.localname = os.path.basename(uri.location)
self.localpath = os.path.join(uri.ingredients, isubdir, self.localname)
self.signatures = d.get("FILE") + ".sig"
self.uri = uri
self.fetch_signatures = d["__fetch_signatures"]
return
def signature(self):
try:
self._signature = self.fetch_signatures[self.localname]
return self._signature
except KeyError:
raise oelite.fetch.NoSignature(self.uri, "signature unknown")
def grab(self, url, reget=None):
print "grabbing %s"%(url)
if reget:
try:
return urlgrabber.urlgrab(url, self.localpath, reget=reget)
except urlgrabber.grabber.URLGrabError as e:
print 'URLGrabError %i: %s' % (e.errno, e.strerror)
if not (e[0] == 14 and e[1].startswith("HTTP Error 416")):
return None
try:
return urlgrabber.urlgrab(url, self.localpath)
except urlgrabber.grabber.URLGrabError as e:
print 'URLGrabError %i: %s' % (e.errno, e.strerror)
return None
def fetch(self):
localdir = os.path.dirname(self.localpath)
if not os.path.exists(localdir):
bb.utils.mkdirhier(localdir)
url = self.url
while url:
if os.path.exists(self.localpath):
if "_signature" in dir(self):
m = hashlib.sha1()
m.update(open(self.localpath, "r").read())
if self._signature == m.hexdigest():
return True
else:
print "Expected signature: %s"%self._signature
print "Obtained signature: %s"%m.hexdigest()
raise Exception("Signature mismatch")
f = self.grab(url, reget="simple")
else:
f = self.grab(url)
if f:
break
url = self.uri.alternative_mirror()
if not f or f != self.localpath:
return False
m = hashlib.sha1()
m.update(open(self.localpath, "r").read())
signature = m.hexdigest()
if not "_signature" in dir(self):
return (self.localname, signature)
return signature == self._signature
|
Python
| 0
|
@@ -2411,65 +2411,34 @@
-f = self.grab(url, reget=%22simple%22)%0A else:%0A
+os.unlink(self.localpath)%0A
|
ae7a5bef1e3ee0216651dc4aeef3abcbab3cf76e
|
update code
|
Strings/alternating-characters.py
|
Strings/alternating-characters.py
|
# Alternating Characters
# Developer: Murillo Grubler
# Link: https://www.hackerrank.com/challenges/alternating-characters/problem
def alternatingCharacters(s):
sumChars = 0
for i in range(len(s)):
if i == 0 or tempChar != s[i]:
tempChar = s[i]
continue
if tempChar == s[i]:
sumChars += 1
return sumChars
q = int(input().strip())
for a0 in range(q):
print(alternatingCharacters(input().strip()))
|
Python
| 0
|
@@ -125,16 +125,40 @@
roblem%0A%0A
+# Time complexity: O(n)%0A
def alte
|
11f1005653fff98025bb9a7b20d59bd85563c5eb
|
test for shape problem
|
test/tst_fancyslicing.py
|
test/tst_fancyslicing.py
|
from netCDF4 import Dataset
from numpy.random import seed, randint
from numpy.testing import assert_array_equal, assert_equal
import tempfile, unittest, os, random
import numpy as np
file_name = tempfile.mktemp(".nc")
xdim=9; ydim=10; zdim=11
i = np.array([2,5,7],'i4')
ib = np.zeros(ydim,dtype=np.bool)
ib[2] = True; ib[5] = True; ib[7] = True
ib2 = np.zeros(xdim, dtype=np.bool)
ib2[1] = True; ib2[4] = True; ib2[6] = True
# this one should be converted to a slice.
ib3 = np.zeros(xdim, dtype=np.bool)
ib3[0] = True; ib2[4] = True; ib2[8] = True
#seed(9) # fix seed
data = randint(0,10,size=(xdim,ydim,zdim)).astype('i2')
data1 = data[:,0,0].copy()
class VariablesTestCase(unittest.TestCase):
def setUp(self):
self.file = file_name
f = Dataset(file_name,'w')
f.createDimension('x',None)
f.createDimension('y',ydim)
f.createDimension('z',zdim)
f.createDimension('time', None)
v = f.createVariable('data','i2',('x','y','z'))
vu = f.createVariable('datau', 'i2', ('x', 'y', 'time'))
v[:] = data
v1 = f.createVariable('data1','i2','x')
self.data1 = data1
self.data = data
# test __setitem___
v[0:xdim] = self.data
# integer array slice.
v[:,i,:] = -100
self.data[:,i,:] = -100
# boolen array slice.
v[ib2] = -200
self.data[ib2] = -200
v[ib3,:,:] = -300
self.data[ib3,:,:] = -300
# same as above, for 1d array
v1[0:xdim] = self.data1
v1[i] = -100
self.data1[i] = -100
v1[ib2] = -200
self.data1[ib2] = -200
v1[ib3] = -300
self.data1[ib3] = -300
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def test_get(self):
"""testing 'fancy indexing'"""
f = Dataset(self.file, 'r')
v = f.variables['data']
# slice with an array of integers.
assert_array_equal(v[0:-1:2,i,:],self.data[0:-1:2,i,:])
# slice with an array of booleans.
assert_array_equal(v[0:-1:2,ib,:],self.data[0:-1:2,ib,:])
# Two slices
assert_array_equal(v[1:2,1:3,:], self.data[1:2,1:3,:])
# Three sequences
assert_array_equal(v[i,i,i], self.data[i,i,i])
assert_equal(v[i,i,i].shape, (3,))
# Two booleans and one slice. Different from NumPy
# ibx,ibz should be converted to slice, iby not.
ibx = np.array([True, False, True, False, True, False, True, False, True])
iby = np.array([True, False, True, False, False, False, True, False, True, False])
ibz = np.array([True, False, True, False, True, False, True, False,\
True, False, True])
datatmp = self.data[::2,:,:]
datatmp = datatmp[:,iby,:]
assert_array_equal(v[ibx, iby, :], datatmp)
# Three booleans
datatmp = self.data[::2,:,:]
datatmp = datatmp[:,iby,::2]
assert_array_equal(v[ibx,iby,ibz], datatmp)
# Ellipse
assert_array_equal(v[...,::2],self.data[..., ::2])
assert_array_equal(v[...,::-2],self.data[..., ::-2])
assert_array_equal(v[[1,2],...],self.data[[1,2],...])
assert_array_equal(v[0], self.data[0])
f.close()
def test_set(self):
f = Dataset(self.file, 'a')
data = np.arange(180).reshape((9,10,2))
vu = f.variables['datau']
vu[:,:,0] = data[:,:,0]
assert_array_equal(vu[:,:,:], data[:,:,:1])
vu[:,:,1:] = data
assert_array_equal(vu[:, :, 1:], data)
vu[:,:,0] = 0.0
assert_array_equal(vu[:, :, 0], 0.)
f.close()
if __name__ == '__main__':
unittest.main()
|
Python
| 0.00001
|
@@ -3529,32 +3529,71 @@
%5B:,:,0%5D%0A
+print vu.shape # This is OK%0A #
assert_array_equ
@@ -3621,32 +3621,41 @@
:,:1%5D)%0A %0A
+ %0A
vu%5B:,:,1
@@ -3659,24 +3659,27 @@
:,1:%5D = data
+%5B:%5D
%0A ass
@@ -3667,32 +3667,131 @@
data%5B:%5D%0A
+print data%5B:%5D.shape, vu.shape # This is not OK%0A %0A #print vu%5B:,:,0%5D%0A #
assert_array_equ
@@ -3822,32 +3822,33 @@
%0A
+#
vu%5B:,:,0%5D = 0.0%0A
@@ -3847,32 +3847,33 @@
%5D = 0.0%0A
+#
assert_array_equ
|
222ae0b817f7b5304b9c199428bf82709c3bb6af
|
change error msg on version check
|
dashlib/mnb_explorer.py
|
dashlib/mnb_explorer.py
|
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '.'))
# use block explorer to check balance, block height to check fork
import requests
from config import *
from mnb_misc import *
import simplejson as json
def make_request(url):
USERAGET = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/602.2.14 (KHTML, like Gecko) Version/10.0.1 Safari/602.2.14'
headers = {'user-agent': USERAGET}
try:
response = requests.get(url, headers=headers, timeout=(5, 5))
if response.status_code == requests.codes.ok and float(
response.text) >= 0:
return response.text
else:
return None
except requests.exceptions.RequestException:
err_msg = 'requests.exceptions.RequestException'
print_err_exit(
get_caller_name(),
get_function_name(),
err_msg)
except Exception as e:
err_msg = str(e.args)
print_err_exit(
get_caller_name(),
get_function_name(),
err_msg)
def make_request_version_txt(url):
USERAGET = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/602.2.14 (KHTML, like Gecko) Version/10.0.1 Safari/602.2.14'
headers = {'user-agent': USERAGET}
try:
response = requests.get(url, headers=headers, timeout=(4, 3))
if response.status_code == requests.codes.ok and len(
response.text) > 2:
return response.json()
else:
return None
except requests.exceptions.RequestException:
err_msg = 'requests.exceptions.RequestException : error to check version use -k to skip version check'
print_err_exit(
get_caller_name(),
get_function_name(),
err_msg)
except Exception as e:
err_msg = str(e.args)
print_err_exit(
get_caller_name(),
get_function_name(),
err_msg)
def make_insight_request(url):
USERAGET = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/602.2.14 (KHTML, like Gecko) Version/10.0.1 Safari/602.2.14'
headers = {'user-agent': USERAGET}
try:
response = requests.get(url, headers=headers, timeout=(2,5))
try:
if response.status_code == requests.codes.ok and len(response.text) > 2:
if isinstance(response.json(), list):
return response.json()[0]
else:
return response.json()
except Exception as e:
print(e.args[0])
return None
except requests.exceptions.RequestException:
return None
def getinfo_insight(url):
getinfourl = url + '/status?q=getinfo'
rawjson = make_insight_request(getinfourl)
if rawjson:
blockcnt = rawjson['info'].get('blocks', 0)
return blockcnt
else:
return 0
def get_insight_blockcount():
import random
exp = [
"http://insight.dev.dash.org/api",
"http://insight.dash.org/api",
"https://insight.dash.siampm.com/api",
"http://insight.masternode.io:3000/api"
]
IURL = exp[random.randrange(0,len(exp))]
response = getinfo_insight(IURL)
return response
def get_explorer_blockcount():
if MAINNET:
url = 'https://explorer.dash.org/chain/Dash/q/getblockcount'
else:
url = 'https://test.explorer.dash.org/chain/tDash/q/getblockcount'
response = make_request(url)
return response
def get_version_txt():
url = 'https://raw.githubusercontent.com/chaeplin/dashmnb/master/dashlib/version.txt'
response = make_request_version_txt(url)
return response
def get_mnstatus_dashninja(vins):
url = 'https://www.dashninja.pl/api/masternodes?testnet=0&portcheck=0&balance=0&exstatus=0&vins=' + json.dumps(vins)
response = make_request_version_txt(url)
return response
# end
|
Python
| 0
|
@@ -1650,24 +1650,27 @@
ror
-t
o
+n
check
+ing
version
use
@@ -1665,16 +1665,17 @@
version
+,
use -k
|
09275166c9652171e191100c4d5cd0f600077da0
|
Fix typo in settings.py. Refs #1010
|
seattlegeni/website/settings.py
|
seattlegeni/website/settings.py
|
"""
These are the django settings for the seattlegeni project. See the README.txt
file for details on what needs to be set in this file. At a minimum for
development, it will be the database connection info and the SECRET_KEY value.
For public deployment, see the README.txt file for information about which
additional changes you'll need to make to this file.
"""
import os
from seattlegeni.common.util import log
# If DEBUG is True, then error details will be shown on the website and ADMINS
# will not receive an email when an error occurs. So, this should be False in
# production.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# The log level used by the seattlegeni log module. All messages at this level
# or more severe will be logged.
SEATTLECLEARINGHOUSE_LOG_LEVEL = log.LOG_LEVEL_DEBUG
# Rather than make the log module have to import this settings file to set the
# log level, just set it right here.
log.set_log_level(SEATTLECLEARING_HOUSE_LOG_LEVEL)
# This is needed to allow xmlrpc requests to work when they don't have a slash
# on the end of the url.
APPEND_SLASH = False
# The directory the settings.py file is in is what we consider the root of the website.
SEATTLECLEARINGHOUSE_WEBSITE_ROOT = os.path.dirname(__file__)
# The directory where we keep the public keys of the node state keys.
SEATTLECLEARINGHOUSE_STATE_KEYS_DIR = os.path.join(SEATTLECLEARINGHOUSE_WEBSITE_ROOT, '..', 'node_state_transitions', 'statekeys')
# The XML-RPC interface to the Custom Installer Builder.
SEATTLECLEARINGHOUSE_INSTALLER_BUILDER_XMLRPC = "https://seattlegeni.cs.washington.edu/custom_install/xmlrpc/"
# The directory where the base installers named seattle_linux.tgz, seattle_mac.tgz,
# and seattle_win.zip are located.
SEATTLECLEARINGHOUSE_BASE_INSTALLERS_DIR = "/var/www/dist"
# The directory in which customized installers created by seattlegeni will be
# stored. A directory within this directory will be created for each user.
SEATTLECLEARINGHOUSE_USER_INSTALLERS_DIR = os.path.join(SEATTLECLEARINGHOUSE_BASE_INSTALLERS_DIR, "geni")
# The url that corresponds to SEATTLECLEARINGHOUSE_USER_INSTALLERS_DIR
SEATTLECLEARINGHOUSE_USER_INSTALLERS_URL = "https://blackbox.cs.washington.edu/dist/geni"
# Need to specify the LOGIN_URL, as our login page isn't at the default login
# location (the default is /accounts/login).
LOGIN_URL = 'login'
# Email addresses of people that should be emailed when a 500 error occurs on
# the site when DEBUG = False (that is, in production). Leave this to be empty
# if nobody should receive an email.
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
# To be able to send mail to ADMINS when there is an error, django needs to
# know about an SMTP server it can use. That info is defined here.
#EMAIL_HOST = 'smtp.gmail.com'
#EMAIL_HOST_USER = 'an.error.sending.account@gmail.com'
#EMAIL_HOST_PASSWORD = 'PASSWORD_HERE'
#EMAIL_PORT = 587
#EMAIL_USE_TLS = True
# Email address that error notifications will be sent from.
#SERVER_EMAIL = "error@seattlegeni.server.hostname"
# We use this so we know which server the email came from by the subject line.
#EMAIL_SUBJECT_PREFIX = "[localhost] "
MANAGERS = ADMINS
DATABASE_ENGINE = 'mysql' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = 'FILL_THIS_IN' # Or path to database file if using sqlite3.
DATABASE_USER = 'FILL_THIS_IN' # Not used with sqlite3.
DATABASE_PASSWORD = 'FILL_THIS_IN' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
if DATABASE_ENGINE == 'mysql':
DATABASE_OPTIONS = {'init_command': 'SET storage_engine=INNODB'}
# Make this unique, and don't share it with anybody.
# Fill this in!
SECRET_KEY = ''
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Los_Angeles'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = SEATTLECLEARINGHOUSE_WEBSITE_ROOT + '/html/media/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/site_media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/admin_media/'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.csrf.middleware.CsrfViewMiddleware',
'django.contrib.csrf.middleware.CsrfResponseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
# Our own middleware that logs when a request is initially received and
# sets up the logger to log other messages with per-request unique ids.
'seattlegeni.website.middleware.logrequest.LogRequestMiddleware',
# Our own middleware that logs when unhandled exceptions happen.
'seattlegeni.website.middleware.logexception.LogExceptionMiddleware',
)
ROOT_URLCONF = 'seattlegeni.website.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
SEATTLECLEARINGHOUSE_WEBSITE_ROOT + '/html/templates'
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.csrf',
'django.contrib.sessions',
'django.contrib.sites',
# We have our maindb model defined here, so it must be listed.
'seattlegeni.website.control',
)
# The number of seconds sessions are valid for. Django uses this for the
# session expiration in the database in addition to the cookie expiration,
# which is good.
SESSION_COOKIE_AGE = 3600
# Use session cookies, not persistent cookies.
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
|
Python
| 0.000983
|
@@ -937,17 +937,16 @@
CLEARING
-_
HOUSE_LO
|
44fbc835354b7612d5d203250255a323c8759b64
|
fix log %(levelname)-8s to align
|
torequests/logs.py
|
torequests/logs.py
|
#! coding:utf-8
import logging
dummy_logger = logging.getLogger('torequests.dummy')
main_logger = logging.getLogger('torequests.main')
def init_logger(name='', handler_path_levels=None,
level=logging.INFO, formatter=None,
formatter_str=None, datefmt="%Y-%m-%d %H:%M:%S"):
"""Args:
name = '' or logger obj.
handler_path_levels = [['loggerfile.log',13],['','DEBUG'],['','info'],['','notSet']] # [[path,level]]
level : the least level for the logger.
formatter = logging.Formatter(
'%(levelname)-6s %(asctime)s %(name)s (%(filename)s: %(lineno)s): %(message)s',
"%Y-%m-%d %H:%M:%S")
formatter_str = '%(levelname)-6s %(asctime)s %(name)s (%(funcName)s: %(lineno)s): %(message)s'
custom formatter:
%(asctime)s %(created)f %(filename)s %(funcName)s %(levelname)s %(levelno)s %(lineno)s %(message)s %(module)s %(name)s %(pathname)s %(process)s %(relativeCreated)s %(thread)s %(threadName)s
"""
levels = {'NOTSET': logging.NOTSET, 'DEBUG': logging.DEBUG, 'INFO': logging.INFO,
'WARNING': logging.WARNING, 'ERROR': logging.ERROR, 'CRITICAL': logging.CRITICAL}
if not formatter:
if formatter_str:
formatter_str = formatter_str
else:
formatter_str = '%(levelname)-6s %(asctime)s %(name)s (%(filename)s: %(lineno)s): %(message)s'
formatter = logging.Formatter(formatter_str, datefmt=datefmt)
logger = name if isinstance(
name, logging.Logger) else logging.getLogger(str(name))
logger.setLevel(level)
handler_path_levels = handler_path_levels or [['', 'INFO']]
# ---------------------------------------
for each_handler in handler_path_levels:
path, handler_level = each_handler
handler = logging.FileHandler(
path) if path else logging.StreamHandler()
handler.setLevel(levels.get(handler_level.upper(), 1) if isinstance(
handler_level, str) else handler_level)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
|
Python
| 0.000001
|
@@ -545,33 +545,33 @@
'%25(levelname)-
-6
+8
s %25(asctime)s
@@ -681,33 +681,33 @@
= '%25(levelname)-
-6
+8
s %25(asctime)s
@@ -1341,9 +1341,9 @@
me)-
-6
+8
s %25
|
db47a651e380709c33c54c86f9a3861187772406
|
Add metrics to MNIST
|
eva/examples/mnist.py
|
eva/examples/mnist.py
|
#%% Setup.
from collections import namedtuple
import numpy as np
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import Nadam
from keras.layers.advanced_activations import PReLU
from keras.utils import np_utils
from keras.utils.visualize_util import plot
from keras import backend as K
from eva.models.pixelcnn import PixelCNN
Data = namedtuple('Data', 'x y')
nb_classes = 10
img_rows, img_cols = 28, 28
nb_filters = 128
blocks = 4
batch_size = 128
nb_epoch = 4
def clean_data(x, y, rows, cols):
if K.image_dim_ordering() == 'th':
x = x.reshape(x.shape[0], 1, rows, cols)
input_shape = (1, rows, cols)
else:
x = x.reshape(x.shape[0], rows, cols, 1)
input_shape = (rows, cols, 1)
x = x.astype('float32') / 255
y = np_utils.to_categorical(y, nb_classes)
# New way
x[np.where(x > 0)] = 1
print('X shape:', x.shape)
print(x.shape[0], 'samples')
return x, y
def get_data(rows, cols):
return [Data(*clean_data(*data, rows, cols)) for data in mnist.load_data()]
def get_input(rows, cols):
return (1, rows, cols) if K.image_dim_ordering() == 'th' else (rows, cols, 1)
train, test = get_data(img_rows, img_cols)
input_shape = get_input(img_rows, img_cols)
input_dims = np.prod(input_shape)
model = PixelCNN(input_shape, nb_filters, blocks)
model.summary()
plot(model)
#%% Train.
model.fit(train.x, train.x, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(test.x, test.x))
score = model.evaluate(test.x, test.x, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
#%% Save model.
model.save('pixelcnn.h5')
|
Python
| 0.000019
|
@@ -1812,8 +1812,9 @@
cnn.h5')
+%0A
|
034d25f22253d19a8020b7d88a93a1305c57c1da
|
Fix pretty print response test
|
eve/tests/response.py
|
eve/tests/response.py
|
# -*- coding: utf-8 -*-
from ast import literal_eval
from eve.tests import TestBase
import simplejson as json
import eve
import os
class TestResponse(TestBase):
def setUp(self):
super(TestResponse, self).setUp()
self.r = self.test_client.get('/%s/' % self.empty_resource)
def test_response_data(self):
response = None
try:
response = literal_eval(self.r.get_data().decode())
except:
self.fail('standard response cannot be converted to a dict')
self.assertTrue(isinstance(response, dict))
def test_response_object(self):
response = literal_eval(self.r.get_data().decode())
self.assertTrue(isinstance(response, dict))
self.assertEqual(len(response), 3)
resource = response.get('_items')
self.assertTrue(isinstance(resource, list))
links = response.get('_links')
self.assertTrue(isinstance(links, dict))
meta = response.get('_meta')
self.assertTrue(isinstance(meta, dict))
def test_response_pretty(self):
# check if pretty printing was successful by checking the length of the
# response since pretty printing the respone makes it longer and not
# type dict anymore
self.r = self.test_client.get('/%s/?pretty' % self.empty_resource)
response = self.r.get_data().decode()
self.assertEqual(len(response), 300)
# python2 and python3 compatible (check for unicode or str)
self.assertTrue(isinstance(response, basestring))
class TestNoHateoas(TestBase):
def setUp(self):
super(TestNoHateoas, self).setUp()
self.app.config['HATEOAS'] = False
self.domain[self.known_resource]['hateoas'] = False
def test_get_no_hateoas_resource(self):
r = self.test_client.get(self.known_resource_url)
response = json.loads(r.get_data().decode())
self.assertTrue(isinstance(response, dict))
self.assertEqual(len(response['_items']), 25)
item = response['_items'][0]
self.assertTrue(isinstance(item, dict))
self.assertTrue('_links' not in response)
def test_get_no_hateoas_item(self):
r = self.test_client.get(self.item_id_url)
response = json.loads(r.get_data().decode())
self.assertTrue(isinstance(response, dict))
self.assertTrue('_links' not in response)
def test_get_no_hateoas_homepage(self):
r = self.test_client.get('/')
self.assert200(r.status_code)
def test_get_no_hateoas_homepage_reply(self):
r = self.test_client.get('/')
resp = json.loads(r.get_data().decode())
self.assertEqual(resp, {})
self.app.config['INFO'] = '_info'
r = self.test_client.get('/')
resp = json.loads(r.get_data().decode())
self.assertEqual(resp['_info']['server'], 'Eve')
self.assertEqual(resp['_info']['version'], eve.__version__)
settings_file = os.path.join(self.this_directory, 'test_version.py')
self.app = eve.Eve(settings=settings_file)
self.app.config['INFO'] = '_info'
r = self.app.test_client().get('/v1')
resp = json.loads(r.get_data().decode())
self.assertEqual(resp['_info']['api_version'],
self.app.config['API_VERSION'])
self.assertEqual(resp['_info']['server'], 'Eve')
self.assertEqual(resp['_info']['version'], eve.__version__)
def test_post_no_hateoas(self):
data = {'item1': json.dumps({"ref": "1234567890123456789054321"})}
headers = [('Content-Type', 'application/x-www-form-urlencoded')]
r = self.test_client.post(self.known_resource_url, data=data,
headers=headers)
response = json.loads(r.get_data().decode())
self.assertTrue('_links' not in response)
def test_patch_no_hateoas(self):
data = {'item1': json.dumps({"ref": "0000000000000000000000000"})}
headers = [('Content-Type', 'application/x-www-form-urlencoded'),
('If-Match', self.item_etag)]
r = self.test_client.patch(self.item_id_url, data=data,
headers=headers)
response = json.loads(r.get_data().decode())
self.assertTrue('_links' not in response)
|
Python
| 0.00086
|
@@ -1418,134 +1418,8 @@
00)%0A
- # python2 and python3 compatible (check for unicode or str)%0A self.assertTrue(isinstance(response, basestring))%0A
%0A%0Acl
|
df12bb251bbb6ab1b7efc1e955eb87faa73c6c15
|
Add message for correct orfik answer
|
events/orfik/views.py
|
events/orfik/views.py
|
from django.shortcuts import render, redirect, get_object_or_404
from events.orfik import models
from django.utils import timezone
from django.contrib.auth.decorators import login_required
from django.contrib.auth import get_user_model
from general import models as generalmodels
from django.contrib import messages
def make_player(request):
try:
player = request.user.player
except:
user = request.user
p = models.Player()
p.nickname = user.username
p.user = request.user
p.save()
def check_end():
return generalmodels.Variable.objects.get(name='orfikend').time <= timezone.now()
def check_start():
return generalmodels.Variable.objects.get(name='orfikstart').time <= timezone.now()
def home(request):
data = {}
template = 'orfik/home.html'
data['starttime'] = generalmodels.Variable.objects.get(name='orfikstart').time
data['started'] = check_start()
if request.user.is_authenticated():
make_player(request)
data['new_nick_form'] = models.NickForm()
ended = check_end()
# Has orfik ended?
if ended:
data['endtime'] = ended
data['winner'] = models.Player.objects.all().order_by('-max_level','last_solve')[0] == request.user.player
return render(request, template, data)
# If it has not ended, has it started?
if data['started']:
return redirect('events:orfik:question', q_no=0)
# It has not started, get the available questions
data['questions'] = models.Question.objects.filter(number__lte=request.user.player.max_level).order_by('number')
if request.method == 'POST':
form = models.Nickform(request.POST)
if form.is_valid():
form.save()
return render(request, template, data)
def instructions(request):
return render(request, 'orfik/instructions.html')
def leader(request):
data = {}
template = 'orfik/leader.html'
endtime = generalmodels.Variable.objects.get(name='orfikend').time
data['players'] = models.Player.objects.all().order_by('-max_level','last_solve')
if endtime <= timezone.now():
data['winner'] = data['players'][0]
return render(request, template, data)
@login_required
def question(request, q_no):
make_player(request)
starttime = generalmodels.Variable.objects.get(name='orfikstart').time
player = request.user.player
# Check if orfik has started
if starttime > timezone.now():
return redirect('events:orfik:home')
q_no = int(q_no)
# If player is not on question
if player.max_level != q_no:
return redirect('events:orfik:question', q_no=player.max_level)
data = {}
template = 'orfik/question.html'
question = get_object_or_404(models.Question,number=q_no)
data['question'] = question
if request.method == 'GET':
data['form'] = models.AnswerForm()
if request.method == 'POST':
form = models.AnswerForm(request.POST)
if question.number == player.max_level: # This is his first potential
# Correct answer
if form.is_valid():
attempt = form.save(commit=False)
attempt.player = player
attempt.question = question
attempt.save()
if attempt.is_correct():
player.last_solve = timezone.now()
player.max_level += 1
player.save()
return redirect('events:orfik:question', q_no=question.number+1)
else:
messages.info(request, 'Wrong answer. Try again!')
return redirect('events:orfik:question', q_no=question.number)
else:
data['form'] = form
return render(request, template, data)
|
Python
| 0.000093
|
@@ -3493,32 +3493,94 @@
player.save()%0A
+ messages.info(request, 'Corrent answer!')%0A
|
6269cc65f12edea301952907a928d554fb40d632
|
update tests
|
test/tests.py
|
test/tests.py
|
#!/usr/bin/python3
# namespace shenanigans
import sys, os
sys.path.insert(1, os.path.join(sys.path[0], '..') + '/lib')
import ostrich
import unittest
class OstrichTests(unittest.TestCase):
def setUp(self):
self.program = ostrich.Ostrich()
def expect(self, code, result):
self.assertEqual(self.program.run(code), result)
def test_whitespace(self):
self.expect(' \n \n\n', '')
def test_negate(self):
self.expect('42!', '0')
self.expect(';1!', '0')
self.expect(';0!', '1')
self.expect(';`foo`!', '0')
self.expect(';``!', '1')
self.expect(';{foo}!', '0')
self.expect(';{}!', '1')
self.expect(';[`foo`]!', '0')
self.expect(';[``]!', '0')
self.expect(';[]!', '1')
def test_quote(self):
pass # TODO
def test_arrset(self):
pass # TODO
def test_dollar(self):
self.expect('[3 2 4 1 5]$', '[1 2 3 4 5]')
self.expect(';[`test``tesu``tess`]$', '[`tess` `test` `tesu`]')
self.expect(';[{a}{c}{b}]$', '[{a} {b} {c}]')
self.expect(';[[1][3][2]]$', '[[1] [2] [3]]')
self.expect(';[]$', '[]')
self.expect(';`potato`$', '`aooptt`')
self.expect(';``$', '``')
# TODO block
# TODO stack nth (number)
def test_mod(self):
pass # TODO
def test_bitand(self):
pass # TODO
def test_inspect(self):
pass # TODO
def test_leftparen(self):
pass # TODO
def test_rightparen(self):
pass # TODO
def test_times(self):
pass # TODO
def test_plus(self):
self.expect('[1 2][3 4]+', '[1 2 3 4]')
self.expect(';[{foo}{bar}]{baz}+', '[{foo} {bar} {baz}]')
self.expect(';[`foo``bar`]`baz`+', '[`foo` `bar` `baz`]')
self.expect(';[1 2]3+', '[1 2 3]')
self.expect(';{foo}{bar}+', '{foobar}')
self.expect(';{foo}`bar`+', '{foobar}')
self.expect(';{foo}1+', '{foo1}')
self.expect(';`foo``bar`+', '`foobar`')
self.expect(';`foo`1+', '`foo1`')
self.expect(';2 2+', '4')
def test_comma(self):
pass # TODO
def test_minus(self):
pass # TODO
def test_duplicate(self):
self.expect('42.', '42 42')
self.expect(';;.', '')
def test_div(self):
pass # TODO
def test_num(self):
pass # TODO
def test_assign(self):
pass # TODO
def test_pop(self):
self.expect('42;', '')
self.expect(';', '')
def test_lt(self):
pass # TODO
def test_eq(self):
pass # TODO
def test_gt(self):
pass # TODO
def test_question(self):
pass # TODO
def test_roll(self):
self.expect('1 2 3 4 5', '1 2 3 4 5')
self.expect('2@', '1 2 3 5 4')
self.expect('2@', '1 2 3 4 5')
self.expect('3@', '1 2 4 5 3')
self.expect('4@', '1 4 5 3 2')
self.expect('5@', '4 5 3 2 1')
# TODO negative rolls
def test_leftbracket(self):
self.expect('[1 2 3]', '[1 2 3]')
self.expect(';[]', '[]')
self.expect(';[1 2 3', '[1 2 3]')
self.expect(';[', '[]')
self.expect(';[[1[[[2[3', '[[1 [[[2 [3]]]]]]')
def test_swaptwo(self):
self.expect('1 2 3\\', '1 3 2')
def test_rightbracket(self):
self.expect('1 2 3]', '[1 2 3]')
self.expect('1]', '[[1 2 3] 1]')
self.expect(']', '[[[1 2 3] 1]]')
def test_bitxor(self):
pass # TODO
def test_backtick(self):
self.expect('`foo`', '`foo`')
self.expect(';``', '``')
self.expect(';`foo', '`foo`')
self.expect(';`', '``')
# TODO escaping (not implemented)
def test_letter_p(self):
pass # TODO
def test_letter_q(self):
pass # TODO
def test_letter_v(self):
pass # TODO
def test_letter_z(self):
pass # TODO
def test_leftcurlybracket(self):
pass # TODO
def test_bitor(self):
pass # TODO
def test_rightcurlybracket(self):
pass # TODO
def test_tilde(self):
self.expect('1 2 3]~', '1 2 3')
self.expect(';;;{1 1+}~', '2')
self.expect(';`1 1+`~', '2')
self.expect(';42~', '-42')
if __name__ == '__main__':
unittest.main()
|
Python
| 0.000001
|
@@ -3783,111 +3783,264 @@
ter_
-p(self):%0A pass # TODO%0A%0A def test_letter_q(self):%0A pass # TODO%0A%0A def test_letter_v
+B(self):%0A pass # TODO%0A%0A def test_letter_E(self):%0A pass # TODO%0A%0A def test_letter_G(self):%0A pass # TODO%0A%0A def test_letter_I(self):%0A pass # TODO%0A%0A def test_letter_P(self):%0A pass # TODO%0A%0A def test_letter_R
(sel
@@ -4085,17 +4085,17 @@
_letter_
-z
+Z
(self):%0A
|
f6672fd0074052ba71bc1266590f0ef0db8f14d0
|
fix import.
|
blackgate/cli.py
|
blackgate/cli.py
|
# -*- coding: utf-8 -*-
import click
from blackgate.core import component
from blackgate.server importrun
@click.group()
def main():
# README CONFIG
component.install_from_config(config)
@main.command()
def start():
run(config.get('port', 9654))
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -97,16 +97,17 @@
r import
+
run%0A%0A@cl
|
41d7d1e2c87945d8101702d405b8b777f8f3908f
|
fix accuracy claculator
|
problems/performance_review/evaluation.py
|
problems/performance_review/evaluation.py
|
def calculate(counts,result):
n1 = 0.0
n2 = 0.0
a = 0.0
c = 0.0
for f1,f2,d in counts:
f1f2 = max(f1+f2-1,0)
nf1f2 = max(-f1+f2,0)
n1 += f1f2
n2 += nf1f2
if d[0]:
a+= max(f1f2 - d[1],0)
c+= max(nf1f2 - d[1],0)
else:
if f1f2==1:
a+= 1-result[d[1]]
else:
a+= 0
if nf1f2==1:
c+= 1-result[d[1]]
else:
c+=0
if (a==n1):
p1=1
else:
p1 = (a/n1)
if (c==n2):
p2 =1
else:
p2 = (c/n2)
return p1,p2
def evaluate(result, counts, fairMeasureCode):
p1,p2 = calculate(counts,result)
if fairMeasureCode=='RR':
RR = p1/p2
return RR
elif fairMeasureCode == 'RD':
RD = p1-p2
return RD
elif fairMeasureCode =='RC':
RC = (1-p1)/(1-p2)
return RC
def accuracy(dataPath, result, atoms):
employees = []
with open(dataPath+'employee.txt') as f:
for line in f:
line = line.strip()
if not line: continue
employees.append(line.split()[0])
vardic = atoms['promotion']
score = 0.0
for e in employees:
var = vardic[e][0]
if var in result:
predict = float(result[var])
truth = float(vardic[e][1])
if round(predict, 1)>=0.5:
if truth ==1.0:
score+=1.0
else:
if truth ==0.0:
score+=1.0
score = (float(score) / float(len(employees)))
return score
def accuracy_all(dataPath, result, atoms):
employees = []
with open(dataPath+'employee.txt') as f:
for line in f:
line = line.strip()
if not line: continue
employees.append(line.split()[0])
labels = dict()
with open(dataPath+'label.txt') as f:
for line in f:
line = line.strip()
if not line: continue
[employee, label] = line.split()
labels[employee] = label
vardic = atoms['promotion']
score = 0.0
score_A = 0.0
score_B = 0.0
size_A = 0.0
size_B = 0.0
for e in employees:
if labels[e] =='A':
size_A+=1
else:
size_B+=1
var = vardic[e][0]
if var in result:
predict = float(result[var])
truth = float(vardic[e][1])
if round(predict, 1)>=0.5:
if truth ==1.0:
score+=1.0
if labels[e] =='A':
score_A+=1
else:
score_B+=1
else:
if truth ==0.0:
score+=1.0
if labels[e] =='A':
score_A+=1
else:
score_B+=1
score = (float(score) / float(len(employees)))
score_A = (float(score_A) / float(size_A))
score_B = (float(score_B) / float(size_B))
return score, score_A, score_A
def accuracy_opinion(dataPath, result, atoms):
employees = []
with open(dataPath+'employee.txt') as f:
for line in f:
line = line.strip()
if not line: continue
employees.append(line.split()[0])
vardic = atoms['opinion']
score = 0.0
for e1 in employees:
for e2 in employees:
if e1==e2: continue
var = vardic[(e1,e2)][0]
if var in result:
predict = float(result[var])
truth = float(vardic[(e1,e2)][1])
if round(predict, 1)>=0.5:
if truth ==1.0:
score+=1.0
else:
if truth ==0.0:
score+=1.0
size = (float(len(employees))*float(len(employees)))- float(len(employees))
score = (float(score) / size)
return score
|
Python
| 0.000001
|
@@ -3139,17 +3139,17 @@
, score_
-A
+B
%0A%0Adef ac
|
1f72dcbc02b122aece83cb33d5440938e0ad8a18
|
Update urlize.py
|
markdown/extensions/urlize.py
|
markdown/extensions/urlize.py
|
# From https://github.com/r0wb0t/markdown-urlize/blob/master/urlize.py
"""A more liberal autolinker
Inspired by Django's urlize function.
Positive examples:
>>> import markdown
>>> md = markdown.Markdown(extensions=['urlize'])
>>> md.convert('http://example.com/')
u'<p><a href="http://example.com/">http://example.com/</a></p>'
>>> md.convert('go to http://example.com')
u'<p>go to <a href="http://example.com">http://example.com</a></p>'
>>> md.convert('example.com')
u'<p><a href="http://example.com">example.com</a></p>'
>>> md.convert('example.net')
u'<p><a href="http://example.net">example.net</a></p>'
>>> md.convert('www.example.us')
u'<p><a href="http://www.example.us">www.example.us</a></p>'
>>> md.convert('(www.example.us/path/?name=val)')
u'<p>(<a href="http://www.example.us/path/?name=val">www.example.us/path/?name=val</a>)</p>'
>>> md.convert('go to <http://example.com> now!')
u'<p>go to <a href="http://example.com">http://example.com</a> now!</p>'
Negative examples:
>>> md.convert('del.icio.us')
u'<p>del.icio.us</p>'
"""
import markdown
# Global Vars
URLIZE_RE = = ur'(^|(?<=\s))({0})((?=\s)|$)'.format("|".join((
# mail adress :
r"[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?",
# Anything with protocol between < >
r"<(?:f|ht)tps?://[^>]*>",
# with protocol : any valid domain match
r"((?:f|ht)tps?:\/\/)([\da-z\.-]+)\.([a-z\.]{2,6})([\/\w \.-]*)*\/?",
# without protocol, only somes specified protocols match
r"((?:f|ht)tps?:\/\/)?([\da-z\.-]+)\.(?:com|net|org|fr)([\/\w \.-]*)*\/?")))
class CorrectURLProcessor(markdown.treeprocessors.Treeprocessor):
def __init__(self):
markdown.treeprocessors.Treeprocessor.__init__(self)
def run(self, node):
for child in node.getiterator():
if child.tag == 'a' and 'href' in child.attrib and child.attrib['href'].split('://')[0] not in ('http','https','ftp') and not child.attrib['href'].startswith('#') not child.attrib['href'].startswith('mailto:'):
child.attrib['href'] = 'http://' + child.attrib['href']
return node
class UrlizePattern(markdown.inlinepatterns.Pattern):
""" Return a link Element given an autolink (`http://example/com`). """
def handleMatch(self, m):
url = m.group(2)
if url.startswith('<'):
url = url[1:-1]
text = url
if not url.split('://')[0] in ('http','https','ftp'):
if '@' in url and not '/' in url:
url = 'mailto:' + url
else:
url = 'http://' + url
el = markdown.util.etree.Element("a")
el.set('href', url)
el.text = markdown.util.AtomicString(text)
return el
class UrlizeExtension(markdown.Extension):
""" Urlize Extension for Python-Markdown. """
def extendMarkdown(self, md, md_globals):
""" Replace autolink with UrlizePattern """
md.inlinePatterns['autolink'] = UrlizePattern(URLIZE_RE, md)
md.treeprocessors.add('CorrectURLProcessor', CorrectURLProcessor(), '_end')
def makeExtension(configs=None):
return UrlizeExtension(configs=configs)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
Python
| 0.000001
|
@@ -1137,18 +1137,16 @@
IZE_RE =
- =
ur'(%5E%7C(
|
325f680099ec2498c0c98f5ecba5f571b607df4c
|
remove trends and reduce api calls for new limit
|
marketWatch/papirus_ticker.py
|
marketWatch/papirus_ticker.py
|
#!/usr/bin/env python3
import config
import requests
try:
from papirus import PapirusTextPos
display = True
except ImportError:
display = False
thing1 = {'name': 'ETSY', 'type': 'security', 'price': 0, 'last_daily_price': 0, 'daily_trend': 'U'}
thing2 = {'name': 'BTC', 'type': 'currency', 'price': 0, 'last_daily_price': 0, 'daily_trend': 'U'}
thing3 = {'name': 'AAPL', 'type': 'security', 'price': 0, 'last_daily_price': 0, 'daily_trend': 'U'}
things = [thing1, thing2, thing3]
def get_cur_price(currency):
r = requests.get("https://www.alphavantage.co/query?function=DIGITAL_CURRENCY_INTRADAY&symbol=" + currency + "&market=USD&apikey=" + config.av_key)
full_data = r.json()
last_refresh = full_data["Meta Data"]["7. Last Refreshed"]
cur_price = full_data['Time Series (Digital Currency Intraday)'][last_refresh]['1a. price (USD)']
return "{:.2f}".format(float(cur_price))
def get_cur_last_daily_close(currency):
r = requests.get("https://www.alphavantage.co/query?function=DIGITAL_CURRENCY_DAILY&symbol=" + currency + "&market=USD&apikey=" + config.av_key)
full_data = r.json()
last_refresh = str(full_data["Meta Data"]["7. Last Refreshed"].split(" ")[0])
last_close = full_data['Time Series (Digital Currency Daily)'][last_refresh]['4a. close (USD)']
return "{:.2f}".format(float(last_close))
def get_sec_price(symbol):
r = requests.get("https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=" + symbol + "&interval=1min&apikey=" + config.av_key)
full_data = r.json()
last_refresh = full_data["Meta Data"]["3. Last Refreshed"]
cur_price = full_data['Time Series (1min)'][last_refresh]['4. close']
return "{:.2f}".format(float(cur_price))
def get_sec_last_daily_close(symbol):
r = requests.get("https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=" + symbol + "&apikey=" + config.av_key)
full_data = r.json()
last_refresh = str(full_data["Meta Data"]["3. Last Refreshed"].split(" ")[0])
last_close = full_data['Time Series (Daily)'][last_refresh]['4. close']
print(last_close)
return "{:.2f}".format(float(last_close))
def eval_trends(thing):
if thing['last_daily_price'] > thing['price']:
thing['daily_trend'] = "V"
elif thing['last_daily_price'] < thing['price']:
thing['daily_trend'] = "^"
else:
thing['daily_trend'] = "-"
return thing
def write_display(things):
text = PapirusTextPos(False)
pos = 10
for i in things:
text.AddText(i['name'] + ": " + str(i['price']) + " " + i['daily_trend'], 10, pos)
pos = pos + 25
text.WriteAll()
def write_console(things):
for i in things:
print(i['name'] + ": " + str(i['price']) + " " + i['daily_trend'])
if __name__ == "__main__":
for i in things:
if i['type'] == 'security':
i['price'] = get_sec_price(i['name'])
i['last_daily_price'] = get_sec_last_daily_close(i['name'])
i = eval_trends(i)
if i['type'] == 'currency':
i['price'] = get_cur_price(i['name'])
i['last_daily_price'] = i['price'] # get_cur_last_daily_close(i['name'])
i = eval_trends(i)
if display:
write_display(things)
else:
write_console(things)
|
Python
| 0
|
@@ -2505,32 +2505,99 @@
or i in things:%0A
+ text.AddText(i%5B'name'%5D + %22: %22 + str(i%5B'price'%5D), 10, pos)%0A#
text.Add
@@ -2756,32 +2756,83 @@
or i in things:%0A
+ print(i%5B'name'%5D + %22: %22 + str(i%5B'price'%5D))%0A#
print(i%5B
@@ -3017,32 +3017,33 @@
rice(i%5B'name'%5D)%0A
+#
i%5B'l
@@ -3090,32 +3090,33 @@
lose(i%5B'name'%5D)%0A
+#
i =
@@ -3209,32 +3209,33 @@
rice(i%5B'name'%5D)%0A
+#
i%5B'l
@@ -3303,16 +3303,17 @@
name'%5D)%0A
+#
|
1c9a16a0896cd39aca2b44c0ef5c4eb155d1dab7
|
Add a test for 2 framgnets case.
|
server/kcaa/manipulator_util_test.py
|
server/kcaa/manipulator_util_test.py
|
#!/usr/bin/env python
import pytest
import manipulator_util
class TestManipulatorManager(object):
def pytest_funcarg__manager(self, request):
return manipulator_util.ManipulatorManager(None, {}, 0)
def test_in_schedule_fragment(self):
in_schedule_fragment = (
manipulator_util.ManipulatorManager.in_schedule_fragment)
assert in_schedule_fragment(0, [0, 3600])
assert in_schedule_fragment(1800, [0, 3600])
assert in_schedule_fragment(3599, [0, 3600])
assert not in_schedule_fragment(3600, [0, 3600])
assert not in_schedule_fragment(5400, [0, 3600])
def test_are_auto_manipulator_scheduled_disabled(self, manager):
manager.set_auto_manipulator_schedules(False, [[0, 3600]])
assert not manager.are_auto_manipulator_scheduled(0)
def test_are_auto_manipulator_scheduled_one_fragment(self, manager):
manager.set_auto_manipulator_schedules(True, [[0, 3600]])
assert manager.are_auto_manipulator_scheduled(0)
assert manager.are_auto_manipulator_scheduled(1800)
assert manager.are_auto_manipulator_scheduled(3599)
assert not manager.are_auto_manipulator_scheduled(3600)
assert not manager.are_auto_manipulator_scheduled(5400)
def main():
import doctest
doctest.testmod(manipulator_util)
pytest.main(args=[__file__.replace('.pyc', '.py')])
if __name__ == '__main__':
main()
|
Python
| 0.000001
|
@@ -1267,16 +1267,590 @@
(5400)%0A%0A
+ def test_are_auto_manipulator_scheduled_two_fragments(self, manager):%0A manager.set_auto_manipulator_schedules(True, %5B%5B0, 3600%5D,%0A %5B7200, 10800%5D%5D)%0A assert manager.are_auto_manipulator_scheduled(0)%0A assert not manager.are_auto_manipulator_scheduled(3600)%0A assert manager.are_auto_manipulator_scheduled(7200)%0A assert manager.are_auto_manipulator_scheduled(10799)%0A assert not manager.are_auto_manipulator_scheduled(10800)%0A assert manager.are_auto_manipulator_scheduled(0)%0A%0A
%0Adef mai
|
147d1afb9bbef3444396dce57be4b03f94666a10
|
Add simple calculation of query formulation times
|
session.py
|
session.py
|
from data_record import DataRecord
from has_actions import HasActions
class Session(DataRecord, HasActions):
no_delays_filter = lambda session: session.condition.record_id == str(6)
query_delay_filter = lambda session: session.condition.record_id == str(7)
document_delay_filter = lambda session: session.condition.record_id == str(8)
combined_delay_filter = lambda session: session.condition.record_id == str(9)
identity_filter = lambda session: True
@staticmethod
def combine_filters( *filters ):
return lambda session: all([fil( session ) for fil in filters])
def __init__(self, session_id, user, topic, condition):
DataRecord.__init__( self, session_id )
self.topic = topic
self.user = user
self.condition = condition
self.seen_documents = {}
self.viewed_documents = {}
self.marked_relevant_documents = {}
def add_seen_documents(self, *documents):
for document in documents:
self.seen_documents[ document.record_id ] = document
def add_viewed_documents( self, *documents ):
for document in documents:
self.viewed_documents[ document.record_id ] = document
def add_marked_relevant_documents( self, *documents ):
for document in documents:
self.marked_relevant_documents[ document.record_id ] = document
def seen_highly_relevant_documents(self):
return [document for document in self.seen_documents.values() if document.is_highly_relevant_for_topic( self.topic )]
def seen_moderately_relevant_documents(self):
return [document for document in self.seen_documents.values() if document.is_moderately_relevant_for_topic( self.topic )]
def seen_non_relevant_documents(self):
return [document for document in self.seen_documents.values() if not document.is_relevant_for_topic( self.topic )]
def viewed_highly_relevant_documents(self):
return [document for document in self.viewed_documents.values() if document.is_highly_relevant_for_topic( self.topic )]
def viewed_moderately_relevant_documents(self):
return [document for document in self.viewed_documents.values() if document.is_moderately_relevant_for_topic( self.topic )]
def viewed_non_relevant_documents(self):
return [document for document in self.viewed_documents.values() if not document.is_relevant_for_topic( self.topic )]
def duration_in_seconds(self):
first_timestamp = self.actions[0].timestamp
last_timestamp = self.actions[-1].timestamp
delta = last_timestamp - first_timestamp
return delta.total_seconds()
def actions_by_type( self, action_type ):
return [(idx,action) for idx, action in enumerate(self.actions) if action.action_type == action_type]
def action_duration_in_seconds_for( self, idx, action ):
next_timestamp = self.actions[ idx + 1 ].timestamp
current_timestamp = action.timestamp
delta = next_timestamp - current_timestamp
return delta.total_seconds()
def average_document_reading_time_in_seconds(self):
read_actions = self.actions_by_type( 'DOC_MARKED_VIEWED' )
read_times = {}
for idx, action in read_actions:
action_duration = self.action_duration_in_seconds_for( idx, action )
document = action.document
if read_times.has_key( document.record_id ):
read_times[ document.record_id ] += action_duration
else:
read_times[ document.record_id ] = action_duration
return sum(read_times.values()) / len(read_times)
@classmethod
def build_session_id( cls, user_id, topic_id ):
return str( user_id ) + '-' + str( topic_id )
@classmethod
def amount_of_seen_highly_relevant_documents(cls, filter_func = identity_filter):
sessions = cls.get_store().values()
return reduce( lambda acc, session: acc + len(session.seen_highly_relevant_documents()), filter(filter_func, sessions), 0 )
@classmethod
def amount_of_viewed_highly_relevant_documents(cls, filter_func = identity_filter):
sessions = cls.get_store().values()
return reduce( lambda acc, session: acc + len(session.viewed_highly_relevant_documents()), filter(filter_func, sessions), 0 )
@classmethod
def amount_of_seen_moderately_relevant_documents(cls, filter_func = identity_filter):
sessions = cls.get_store().values()
return reduce( lambda acc, session: acc + len(session.seen_moderately_relevant_documents()), filter(filter_func, sessions), 0 )
@classmethod
def amount_of_viewed_moderately_relevant_documents(cls, filter_func = identity_filter):
sessions = cls.get_store().values()
return reduce( lambda acc, session: acc + len(session.viewed_moderately_relevant_documents()), filter(filter_func, sessions), 0 )
@classmethod
def amount_of_seen_non_relevant_documents(cls, filter_func = identity_filter):
sessions = cls.get_store().values()
return reduce( lambda acc, session: acc + len(session.seen_non_relevant_documents()), filter(filter_func, sessions), 0 )
@classmethod
def amount_of_viewed_non_relevant_documents(cls, filter_func = identity_filter):
sessions = cls.get_store().values()
return reduce( lambda acc, session: acc + len(session.viewed_non_relevant_documents()), filter(filter_func, sessions), 0 )
@classmethod
def average_duration_in_seconds(cls, filter_func = identity_filter):
sessions = filter( filter_func, cls.get_store().values() )
return reduce( lambda acc, session: acc + session.duration_in_seconds(), sessions, 0 ) / len(sessions)
@classmethod
def global_average_document_reading_time_in_seconds(cls, filter_func = identity_filter):
sessions = filter( filter_func, cls.get_store().values() )
return reduce( lambda acc, session: acc + session.average_document_reading_time_in_seconds(), sessions, 0 ) / len(sessions)
|
Python
| 0.000785
|
@@ -3422,24 +3422,322 @@
ead_times)%0A%0A
+ def average_query_formulation_time_in_seconds(self):%0A query_start_actions = self.actions_by_type( 'QUERY_FOCUS' )%0A querying_durations = %5Bself.action_duration_in_seconds_for( idx, action ) for idx, action in query_start_actions%5D%0A return sum(querying_durations) / len(querying_durations)%0A%0A
@classmeth
@@ -5999,28 +5999,327 @@
sions, 0 ) / len(sessions)%0A%0A
+ @classmethod%0A def global_average_query_formulation_time_in_seconds(cls, filter_func = identity_filter):%0A sessions = filter( filter_func, cls.get_store().values() )%0A return reduce( lambda acc, session: acc + session.average_query_formulation_time_in_seconds(), sessions, 0 ) / len(sessions)%0A
|
5e307aff39f448c6e57d7fa4b0195bfccfe7794f
|
Fix the version detection
|
blues/mongodb.py
|
blues/mongodb.py
|
"""
MongoDB Blueprint
=================
**Fabric environment:**
.. code-block:: yaml
blueprints:
- blues.mongodb
settings:
mongodb:
# bind: 0.0.0.0 # Set the bind address specifically (Default: 127.0.0.1)
replSet: webscale
keyfile: 'mongodb-keyfile'
admin:
user: admin # Superuser username
password: foobar123 # Superuser password
schemas:
some_schema_name: # The schema name
user: foo # Username to connect to schema
password: bar # Password to connect to schema (optional)
"""
import json
from fabric.decorators import task
from refabric.context_managers import sudo
from refabric.contrib import blueprints
from refabric.api import run, info
from . import debian
__all__ = ['start', 'stop', 'restart', 'setup', 'configure', 'setup_schemas',
'setup_admin']
blueprint = blueprints.get(__name__)
start = debian.service_task('mongodb', 'start')
stop = debian.service_task('mongodb', 'stop')
restart = debian.service_task('mongodb', 'restart')
@task
def setup():
"""
Install and configure mongodb
"""
install()
configure()
if not blueprint.get('slave'):
setup_admin()
setup_schemas()
def install():
with sudo():
debian.apt_get('install', 'mongodb')
def get_version():
r = _client_exec('db.version()', auth=False)
return map(int, r.split('.'))
def upload_conf(auth=None):
context = {
'bind': blueprint.get('bind', '127.0.0.1'),
'auth': blueprint.get('auth', True) if auth is None else auth,
}
return blueprint.upload('mongodb.conf', '/etc/mongodb.conf', context)
@task
def configure():
"""
Configure mongodb
"""
uploads = [
upload_conf()
]
keyfile = blueprint.get('keyfile')
if keyfile is not None:
uploaded_file = blueprint.upload(keyfile, '/var/lib/mongodb/keyfile')
uploads.append(uploaded_file)
run('chmod 600 /var/lib/mongodb/keyfile')
run('chown mongodb:nogroup /var/lib/mongodb/keyfile')
if any(uploads):
restart()
@task
def setup_admin():
"""
Creates a superuser account.
"""
if not blueprint.get('auth', True) or blueprint.get('slave'):
return
upload_conf(auth=False) and restart()
admin = blueprint.get('admin', {})
roles = [
{'role': 'userAdminAnyDatabase', 'db': 'admin'},
{'role': 'readWriteAnyDatabase', 'db': 'admin'},
{'role': 'dbAdminAnyDatabase', 'db': 'admin'},
{'role': 'clusterAdmin', 'db': 'admin'}
]
# Restart mongod without the auth flag
_ensure_user('admin', admin['user'], admin['password'], roles, auth=False)
upload_conf(auth=True) and restart()
@task
def setup_schemas():
"""
Creates database schemas and grant user privileges.
"""
if blueprint.get('slave'):
return
schemas = blueprint.get('schemas', {})
for schema, config in schemas.iteritems():
info('Setting up schema {}', schema)
roles = [{'role': 'readWrite', 'db': schema}]
_ensure_user(schema, config['user'], config['password'], roles)
def _add_user(schema, user, password, roles, auth=True):
roles = [r['role'] for r in roles]
roles = json.dumps(roles).replace('"', '\'')
_client_exec("""
use %(schema)s;
db.removeUser('%(user)s');
db.addUser({
user: '%(user)s',
pwd: '%(password)s',
roles: %(roles)s
})
""", user=user, password=password, roles=roles, auth=auth, schema=schema)
def _ensure_user(schema, user, password, roles, auth=True):
info('Creating/updating user {}', user)
version = get_version()
if version < (2, 4):
raise NotImplementedError
elif version < (2, 6):
return _add_user(schema, user, password, roles, auth=auth)
roles = json.dumps(roles).replace('"', '\'')
r = _client_exec("""
use %(schema)s;
db.updateUser('%(user)s', {
pwd: '%(password)s',
roles: %(roles)s
})
""", user=user, password=password, roles=roles, auth=auth, schema=schema)
if 'not found' in r:
_client_exec("""
use %(schema)s;
db.createUser({
user: '%(user)s',
pwd: '%(password)s',
roles: %(roles)s
})
""", user=user, password=password, roles=roles, auth=auth, schema=schema
)
def _client_exec(cmd, auth=True, **kwargs):
with sudo():
schema = kwargs.get('schema')
cmd = "echo \"%s;\" | mongo --quiet" % (cmd % kwargs)
if auth:
admin = blueprint.get('admin')
extra = " -u \"%s\" -p \"%s\" --authenticationDatabase \"admin\""
cmd += extra % (admin['user'], admin['password'])
return run(cmd)
|
Python
| 0.000253
|
@@ -1441,16 +1441,22 @@
return
+tuple(
map(int,
@@ -1469,16 +1469,21 @@
it('.'))
+%5B:2%5D)
%0A%0A%0Adef u
|
e791616c161cb7bea222098ac90235b402e62ffa
|
fix saving with bootloader
|
boards/PUCKJS.py
|
boards/PUCKJS.py
|
#!/bin/false
# This file is part of Espruino, a JavaScript interpreter for Microcontrollers
#
# Copyright (C) 2013 Gordon Williams <gw@pur3.co.uk>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# ----------------------------------------------------------------------------------------
# This file contains information for a specific board - the available pins, and where LEDs,
# Buttons, and other in-built peripherals are. It is used to build documentation as well
# as various source and header files for Espruino.
# ----------------------------------------------------------------------------------------
import pinutils;
info = {
'name' : "PuckJS",
'link' : [ "" ],
'default_console' : "EV_SERIAL1",
'default_console_tx' : "D28",
'default_console_rx' : "D29",
'default_console_baudrate' : "9600",
# Number of variables can be WAY higher on this board
'variables' : 2000, # How many variables are allocated for Espruino to use. RAM will be overflowed if this number is too high and code won't compile.
'bootloader' : 1,
'binary_name' : 'espruino_%v_puckjs.bin',
'build' : {
'defines' : [
'USE_BLUETOOTH'
]
}
};
chip = {
'part' : "NRF52832",
'family' : "NRF52",
'package' : "QFN48",
'ram' : 64,
'flash' : 512,
'speed' : 64,
'usart' : 1,
'spi' : 3,
'i2c' : 2,
'adc' : 1,
'dac' : 0,
'saved_code' : {
'address' : ((128 - 3) * 4096),
'page_size' : 4096,
'pages' : 3,
'flash_available' : (512 - 124 - 12) # Softdevice uses 31 plages of flash. Each page is 4 kb.
},
};
devices = {
'LED1' : { 'pin' : 'D5' },
'LED2' : { 'pin' : 'D4' },
'LED3' : { 'pin' : 'D3' },
'IR' : { 'pin_anode' : 'D26', 'pin_cathode' : 'D25' },
'BTN1' : { 'pin' : 'D0', 'pinstate' : 'IN_PULLDOWN' },
'CAPSENSE' : { 'pin_rx' : 'D11', 'pin_tx' : 'D12' }
# NFC D9/D10
};
# left-right, or top-bottom order
board = {
'left' : [ 'PD28', 'PD29', 'PD30', 'PD31'],
'right' : [ 'GND', '3V', 'D2', 'D1' ],
};
def get_pins():
pins = pinutils.generate_pins(0,31) # 32 General Purpose I/O Pins.
pinutils.findpin(pins, "PD0", True)["functions"]["XL1"]=0;
pinutils.findpin(pins, "PD1", True)["functions"]["XL2"]=0;
pinutils.findpin(pins, "PD9", True)["functions"]["NFC1"]=0;
pinutils.findpin(pins, "PD10", True)["functions"]["NFC2"]=0;
pinutils.findpin(pins, "PD2", True)["functions"]["ADC1_IN0"]=0;
pinutils.findpin(pins, "PD3", True)["functions"]["ADC1_IN1"]=0;
pinutils.findpin(pins, "PD4", True)["functions"]["ADC1_IN2"]=0;
pinutils.findpin(pins, "PD5", True)["functions"]["ADC1_IN3"]=0;
pinutils.findpin(pins, "PD28", True)["functions"]["ADC1_IN4"]=0;
pinutils.findpin(pins, "PD29", True)["functions"]["ADC1_IN5"]=0;
pinutils.findpin(pins, "PD30", True)["functions"]["ADC1_IN6"]=0;
pinutils.findpin(pins, "PD31", True)["functions"]["ADC1_IN7"]=0;
#The boot/reset button will function as a reset button in normal operation. Pin reset on PD21 needs to be enabled on the nRF52832 device for this to work.
return pins
|
Python
| 0.000001
|
@@ -1519,17 +1519,17 @@
' : ((12
-8
+2
- 3) *
@@ -1534,16 +1534,49 @@
* 4096),
+ # Bootloader takes pages 122-127
%0A 'pa
|
3bd383a15902d8367097a4348de64c929732767b
|
Fix Test
|
tests/NewsParser_Test.py
|
tests/NewsParser_Test.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: balicanta
# @Date: 2014-10-25 09:57:26
# @Last Modified by: bustta
# @Last Modified time: 2014-10-27 23:22:08
from NewsParser import NewsParser
from requests.utils import get_encodings_from_content
test_fixtures = [
{"url": "http://udn.com/NEWS/NATIONAL/NAT3/9017464.shtml",
"title": "聯合報直擊", "author": "呂思逸"},
{"url": "http://world.yam.com/post.php?id=2732",
"title": "海潮人潮兇", "content": "這座遊人如織的水都"},
{"url": "http://news.ltn.com.tw/news/business/breakingnews/1142153",
"title": "魏家退出101", "content": "財政部次長吳當傑今天傍晚表示"}
]
def test_parser():
for test_fixture in test_fixtures:
parser = NewsParser(test_fixture['url'])
title = parser.getTitle()
author = parser.getAuthor()
content = parser.getContent()
assert test_fixture['title'] in title.encode('utf-8')
assert test_fixture['author'] in author.encode('utf-8')
assert test_fixture['content'] in content.encode('utf-8')
|
Python
| 0.000001
|
@@ -119,11 +119,14 @@
b
-ust
+alican
ta%0A#
@@ -165,13 +165,13 @@
23:
-22:08
+44:57
%0A%0Afr
@@ -379,16 +379,39 @@
%22: %22%E5%91%82%E6%80%9D%E9%80%B8%22
+,%22content%22:%22%E6%98%AF%E7%94%B1%E9%99%B3%E8%80%81%E9%97%86%E6%89%B9%E4%BA%86%E6%A3%89%E8%8A%B1%E6%A3%92%22
%7D,%0A %7B
@@ -483,16 +483,29 @@
%22%E6%B5%B7%E6%BD%AE%E4%BA%BA%E6%BD%AE%E5%85%87%22,
+ %22author%22:%22%22,
%22conten
@@ -622,16 +622,29 @@
%E5%AE%B6%E9%80%80%E5%87%BA101%22,
+ %22author%22:%22%22,
%22conten
|
97478d2bb38b94a5effbbc74db3ae1a0360f9a19
|
remove vm.id usage in exeption message
|
vmpool/endpoint.py
|
vmpool/endpoint.py
|
# coding: utf-8
from core.utils import generator_wait_for
from core.logger import log_pool
from core.config import config
from core.exceptions import PlatformException, NoSuchEndpoint, \
CreationException
from vmpool.virtual_machines_pool import pool
from vmpool.platforms import Platforms
from vmpool.vmqueue import q
def get_vm_from_pool(endpoint_name):
vm = pool.get_by_name(endpoint_name)
if vm:
log_pool.debug('Got vm with params: %s' % vm.info)
return vm
else:
raise NoSuchEndpoint('No such endpoint: %s' % endpoint_name)
def new_vm(desired_caps):
platform = desired_caps.get("platform", None)
if hasattr(config, "PLATFORM") and config.PLATFORM:
log_pool.info(
'Using %s. Desired platform %s has been ignored.' %
(config.PLATFORM, platform)
)
platform = config.PLATFORM
desired_caps["platform"] = platform
if isinstance(platform, unicode):
platform = platform.encode('utf-8')
if not platform:
raise CreationException(
'Platform parameter for new endpoint not found in dc'
)
if not Platforms.check_platform(platform):
raise PlatformException('No such platform %s' % platform)
delayed_vm = q.enqueue(desired_caps)
yield delayed_vm
for condition in generator_wait_for(
lambda: delayed_vm.vm, timeout=config.GET_VM_TIMEOUT
):
yield delayed_vm
if not delayed_vm.vm:
raise CreationException(
"Timeout while waiting for vm with platform %s" % platform
)
yield delayed_vm.vm
for condition in generator_wait_for(
lambda: delayed_vm.vm.ready, timeout=config.GET_VM_TIMEOUT
):
yield delayed_vm.vm
if not delayed_vm.vm.ready:
raise CreationException(
'Timeout while building vm %s (platform: %s)' %
(delayed_vm.vm.id, platform)
)
log_pool.info('Got vm for request with params: %s' % delayed_vm.vm.info)
yield delayed_vm.vm
def delete_vm(endpoint_name):
vm = pool.get_by_name(endpoint_name)
if vm:
if vm.is_preloaded():
vm.rebuild()
else:
vm.delete()
msg = "Vm %s has been deleted" % endpoint_name
log_pool.info(msg)
else:
msg = "Vm %s not found in pool or vm is busy" % endpoint_name
log_pool.info(msg)
|
Python
| 0
|
@@ -1909,10 +1909,12 @@
.vm.
-id
+name
, pl
|
01bb6723b2bc7ab7a7fb6629e304f5ed42f40af4
|
Add GSM characters test case for a unicode message.
|
tests/clockwork_tests.py
|
tests/clockwork_tests.py
|
import unittest
import clockwork
import clockwork_exceptions
class ApiTests(unittest.TestCase):
api_key = "YOUR_API_KEY_HERE"
def test_should_send_single_message(self):
"""Sending a single SMS with the minimum detail and no errors should work"""
api = clockwork.API(self.api_key)
sms = clockwork.SMS(to="441234567890", message="This is a test message")
response = api.send(sms)
self.assertTrue(response.success)
def test_should_fail_with_no_message(self):
"""Sending a single SMS with no message should fail"""
api = clockwork.API(self.api_key)
sms = clockwork.SMS(to="441234567890", message="")
response = api.send(sms)
self.assertFalse(response.success)
def test_should_fail_with_no_to(self):
"""Sending a single SMS with no message should fail"""
api = clockwork.API(self.api_key)
sms = clockwork.SMS(to="", message="This is a test message")
response = api.send(sms)
self.assertFalse(response.success)
def test_should_send_multiple_messages(self):
"""Sending multiple sms messages should work"""
api = clockwork.API(self.api_key)
sms1 = clockwork.SMS(to="441234567890", message="This is a test message 1")
sms2 = clockwork.SMS(to="441234567890", message="This is a test message 2")
response = api.send([sms1,sms2])
for r in response:
self.assertTrue(r.success)
def test_should_send_multiple_messages_with_erros(self):
"""Sending multiple sms messages, one of which has an invalid message should work"""
api = clockwork.API(self.api_key)
sms1 = clockwork.SMS(to="441234567890", message="This is a test message 1")
sms2 = clockwork.SMS(to="441234567890", message="")
response = api.send([sms1,sms2])
self.assertTrue(response[0].success)
self.assertFalse(response[1].success)
def test_should_fail_with_invalid_key(self):
api = clockwork.API("this_key_is_wrong")
sms = clockwork.SMS(to="441234567890", message="This is a test message 1")
self.assertRaises(clockwork_exceptions.ApiException, api.send, sms)
def test_should_be_able_to_get_balance(self):
api = clockwork.API(self.api_key)
balance = api.get_balance()
self.assertEqual('PAYG',balance['account_type'])
if __name__ == "__main__":
unittest.main()
|
Python
| 0
|
@@ -1,12 +1,37 @@
+# -*- coding: utf-8 -*-%0A%0A
import unitt
@@ -438,32 +438,849 @@
ponse.success)%0A%0A
+%09def test_should_send_single_unicode_message(self):%0A%09%09%22%22%22Sending a single SMS with the full GSM character set (apart from ESC and form feed) should work%22%22%22%0A%09%09api = clockwork.API(self.api_key)%0A%09%09sms = clockwork.SMS(%0A to=%22441234567890%22,%0A%09%09 #Message table copied from http://www.clockworksms.com/doc/reference/faqs/gsm-character-set/%0A #Note, the %22/f%22 (form feed) character does not work as lxml prohibits it.%0A%09%09%09message= %09u'''@%C2%A3$%C2%A5%C3%A8%C3%A9%C3%B9%C3%AC%C3%B2%C3%87%5Cn%C3%98%C3%B8%5Cr%C3%85%C3%A5%CE%94_%CE%A6%CE%93%CE%9B%CE%A9%CE%A0%CE%A8%CE%A3%CE%98%CE%9E%C3%86%C3%A6%C3%9F%C3%89'''%0A%09%09%09%09 %09%09u''' !%E2%80%9C#%C2%A4%25&%E2%80%98()*+,-./'''%0A%09%09%09%09 u'''0123456789:;%3C=%3E?'''%0A%09%09%09%09 u'''%C2%A1ABCDEFGHIJKLMNO'''%0A%09%09%09%09 u'''PQRSTUVWXYZ%C3%84%C3%96%C3%91%C3%9C%C2%A7'''%0A%09%09%09%09 u'''%C2%BFabcdefghijklmno'''%0A%09%09%09%09 u'''pqrstuvwxyz%C3%A4%C3%B6%C3%B1%C3%BC%C3%A0'''%0A u'''%E2%82%AC%5B%5C%5D%5E%7B%7C%7D~'''%0A%09%09%09,long=True)%0A%09%09response = api.send(sms)%0A%09%09self.assertTrue(response.success)%0A%0A%0A
%09def test_should
|
5270f88f4c19b68f31092db8395adeb2e7028065
|
Use .succeeded when checking for an already existing settings.php file.
|
drupal/AdjustConfiguration.py
|
drupal/AdjustConfiguration.py
|
from fabric.api import *
from fabric.contrib.files import *
import Revert
# Adjust settings.php. Copy the relevant file based on the branch, delete the rest.
@task
@roles('app_all')
def adjust_settings_php(repo, branch, build, buildtype, alias, site):
# In some cases it seems jenkins loses write permissions to the site directory
# Let's make sure!
sudo("chmod -R 775 /var/www/%s_%s_%s/www/sites/%s" % (repo, branch, build, site))
# Check there is a settings.inc file, there are no cases where there should not be!
if run("stat /var/www/config/%s_%s.settings.inc" % (alias, branch)):
with settings(warn_only=True):
if run("stat /var/www/%s_%s_%s/www/sites/%s/settings.php" % (repo, branch, build, site)):
run("mv /var/www/%s_%s_%s/www/sites/%s/settings.php /var/www/%s_%s_%s/www/sites/%s/unused.settings.php" % (repo, branch, build, site, repo, branch, build, site))
if run("ln -s /var/www/config/%s_%s.settings.inc /var/www/%s_%s_%s/www/sites/%s/settings.php" % (alias, branch, repo, branch, build, site)).failed:
raise SystemExit("######## Couldn't symlink in settings.inc file! Aborting build.")
else:
raise SystemExit("######## Couldn't find any settings.inc! This site probably failed its initial build and needs fixing. Aborting early! TIP: Add a /var/www/config/%s_%s.settings.inc file manually and do a file_exists() check for /var/www/%s_%s_%s/www/sites/%s/%s.settings.php and if it exists, include it. Then symlink that to /var/www/%s_%s_%s/www/sites/%s/settings.php." % (alias, branch, repo, branch, build, site, buildtype, repo, branch, build, site))
with settings(warn_only=True):
# Let's make sure we're checking for $buildtype.settings.php.
# If so, we'll update the build number - if not, we'll add the check to the bottom of the file.
settings_file = "/var/www/config/%s_%s.settings.inc" % (alias, branch)
if run('grep "\$file = \'\/var\/www\/%s" %s' % (repo, settings_file)).return_code == 0:
print "===> %s already has a file_exists() check. We need to replace the build number so the newer %s.settings.php file is used." % (settings_file, buildtype)
replace_string = "/var/www/.+_.+_build_[0-9]+/.+\.settings\.php"
replace_with = "/var/www/%s_%s_%s/www/sites/%s/%s.settings.php" % (repo, branch, build, site, buildtype)
sed(settings_file, replace_string, replace_with, limit='', use_sudo=False, backup='.bak', flags="i", shell=False)
else:
append_string = """$file = '/var/www/%s_%s_%s/www/sites/%s/%s.settings.php';
if (file_exists($file)) {
include($file);
}""" % (repo, branch, build, site, buildtype)
append(settings_file, append_string, use_sudo=True)
print "===> %s did not have a file_exists() check, so it was appended to the bottom of the file." % settings_file
# Adjust shared files symlink
@task
@roles('app_all')
def adjust_files_symlink(repo, branch, build, alias, site):
print "===> Setting the symlink for files"
sudo("ln -s /var/www/shared/%s_%s_files/ /var/www/%s_%s_%s/www/sites/%s/files" % (alias, branch, repo, branch, build, site))
# If we have a drushrc.php file in the site that reflects this branch, copy that into place
@task
@roles('app_all')
def adjust_drushrc_php(repo, branch, build, site):
with settings(warn_only=True):
print "===> Copying %s.drushrc.php to drushrc.php if it exists" % branch
if run("stat /var/www/%s_%s_%s/www/sites/%s/%s.drushrc.php" % (repo, branch, build, site, branch)).failed:
print "===> Couldn't find /var/www/%s_%s_%s/www/sites/%s/%s.drushrc.php, so moving on..." % (repo, branch, build, site, branch)
else:
if sudo("cp /var/www/%s_%s_%s/www/sites/%s/%s.drushrc.php /var/www/%s_%s_%s/www/sites/%s/drushrc.php" % (repo, branch, build, site, branch, repo, branch, build, site)).failed:
print "####### Could not copy /var/www/%s_%s_%s/www/sites/%s/%s.drushrc.php to /var/www/%s_%s_%s/www/sites/%s/drushrc.php. Continuing with build, but perhaps have a look into why the file couldn't be copied." % (repo, branch, build, site, branch, repo, branch, build, site)
else:
print "===> Copied /var/www/%s_%s_%s/www/sites/%s/%s.drushrc.php to /var/www/%s_%s_%s/www/sites/%s/drushrc.php" % (repo, branch, build, site, branch, repo, branch, build, site)
|
Python
| 0
|
@@ -721,16 +721,26 @@
, site))
+.succeeded
:%0A
|
4548b24c17caf6149b741c7f8a8f743f4ff431b4
|
Remove partitions
|
2011/candy_splitting.py
|
2011/candy_splitting.py
|
#!/usr/bin/env python
from __future__ import print_function
from functools import reduce
def split_candies(candies):
assert isinstance(candies, list)
partitions = sorted_k_partitions(candies, 2)
print(partitions)
max_candy = 0
for partition in partitions:
xor0 = reduce(lambda x, y: x ^ y, partition[0])
sum0 = sum(partition[0])
xor1 = reduce(lambda x, y: x ^ y, partition[1])
sum1 = sum(partition[1])
print(xor0, xor1, sum0, sum1)
if xor0 == xor1:
max_candy = max(max_candy, max(sum0, sum1))
return max_candy
def sorted_k_partitions(seq, k):
"""Returns a list of all unique k-partitions of `seq`.
Each partition is a list of parts, and each part is a tuple.
The parts in each individual partition will be sorted in shortlex
order (i.e., by length first, then lexicographically).
The overall list of partitions will then be sorted by the length
of their first part, the length of their second part, ...,
the length of their last part, and then lexicographically.
"""
n = len(seq)
groups = [] # a list of lists, currently empty
def generate_partitions(i):
if i >= n:
yield list(map(tuple, groups))
else:
if n - i > k - len(groups):
for group in groups:
group.append(seq[i])
for item in generate_partitions(i + 1):
yield item # Python3: yield from generate_partitions(i + 1)
group.pop()
if len(groups) < k:
groups.append([seq[i]])
for item in generate_partitions(i + 1):
yield item # Python3: yield from generate_partitions(i + 1)
groups.pop()
result = generate_partitions(0)
# Sort the parts in each partition in shortlex order
result = [sorted(ps, key = lambda p: (len(p), p)) for ps in result]
# Sort partitions by the length of each part, then lexicographically.
result = sorted(result, key = lambda ps: (len(ps), ps)) # Python3: *map(len, ps)
return result
if __name__ == '__main__':
import os
samples = [
[1,2,3,4,5],
[3,5,6]
]
for sample in samples:
max_candy = split_candies(sample)
if max_candy > 0:
print(max_candy)
else:
print('NO')
data_files = ['C-small-practice',
'C-large-practice']
for f in data_files:
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'{0}.in'.format(f)), 'r') as input_file:
lines = input_file.readlines()
input_count = int(lines[0].replace('\n' ,''))
inputs = [line.replace('\n', '') for line in lines[1:]]
test_cases = [[int(_) for _ in in_.split(' ')] for in_ in inputs[1::2]]
i = 1
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'{0}.out'.format(f)), 'w') as output_file:
for test_case in test_cases:
print(test_case)
max_candy = split_candies(test_case)
if max_candy > 0:
output_file.write('Case #{0}: {1}\n'.format(i, max_candy))
else:
output_file.write('Case #{0}: {1}\n'.format(i, 'NO'))
i += 1
|
Python
| 0.000011
|
@@ -153,352 +153,54 @@
st)%0A
-%0A
-partitions = sorted_k_partitions(candies, 2)%0A print(partitions)%0A%0A max_candy = 0%0A for partition in partitions:%0A xor0 = reduce(lambda x, y: x %5E y, partition%5B0%5D)%0A sum0 = sum(partition%5B0%5D)%0A xor1 = reduce(lambda x, y: x %5E y, partition%5B1%5D)%0A sum1 = sum(partition%5B1%5D)%0A print(xor0, xor1, sum0, sum1)%0A
+xor = reduce(lambda x, y: x %5E y, candies)%0A
@@ -209,17 +209,13 @@
xor
-0
==
-xor1
+0
:%0A
@@ -224,1611 +224,57 @@
-
- max_candy = max(max_candy, max(sum0, sum1))%0A%0A return max_candy%0A%0Adef sorted_k_partitions(seq, k):%0A %22%22%22Returns a list of all unique k-partitions of %60seq%60.%0A%0A Each partition is a list of parts, and each part is a tuple.%0A%0A The parts in each individual partition will be sorted in shortlex%0A order (i.e., by length first, then lexicographically).%0A%0A The overall list of partitions will then be sorted by the length%0A of their first part, the length of their second part, ...,%0A the length of their last part, and then lexicographically.%0A %22%22%22%0A n = len(seq)%0A groups = %5B%5D # a list of lists, currently empty%0A%0A def generate_partitions(i):%0A if i %3E= n:%0A yield list(map(tuple, groups))%0A else:%0A if n - i %3E k - len(groups):%0A for group in groups:%0A group.append(seq%5Bi%5D)%0A for item in generate_partitions(i + 1):%0A yield item # Python3: yield from generate_partitions(i + 1)%0A group.pop()%0A%0A if len(groups) %3C k:%0A groups.append(%5Bseq%5Bi%5D%5D)%0A for item in generate_partitions(i + 1):%0A yield item # Python3: yield from generate_partitions(i + 1)%0A groups.pop()%0A%0A result = generate_partitions(0)%0A%0A # Sort the parts in each partition in shortlex order%0A result = %5Bsorted(ps, key = lambda p: (len(p), p)) for ps in result%5D%0A # Sort partitions by the length of each part, then lexicographically.%0A result = sorted(result, key = lambda ps: (len(ps), ps)) # Python3: *map(len, ps)%0A%0A
+return sum(candies) - min(candies)%0A else:%0A
@@ -284,14 +284,9 @@
urn
-result
+0
%0A%0Aif
|
9ac43293dc133bf4b9b55d39fc6502b65c265843
|
Remove hardcoded value
|
tests/api/test_core.py
|
tests/api/test_core.py
|
from unittest.mock import Mock
import graphene
from django.utils import timezone
from tests.api.utils import (
_get_graphql_content_from_response, get_graphql_content)
from saleor.graphql.core.types import ReportingPeriod
from saleor.graphql.core.utils import clean_seo_fields, snake_to_camel_case
from saleor.graphql.product import types as product_types
from saleor.graphql.utils import get_database_id, reporting_period_to_date
def test_clean_seo_fields():
title = 'lady title'
description = 'fantasy description'
data = {'seo':
{'title': title,
'description': description}}
clean_seo_fields(data)
assert data['seo_title'] == title
assert data['seo_description'] == description
def test_user_error_field_name_for_related_object(
staff_api_client, permission_manage_products):
query = """
mutation {
categoryCreate(input: {name: "Test"}, parent: "123456") {
errors {
field
message
}
category {
id
}
}
}
"""
response = staff_api_client.post_graphql(
query, permissions=[permission_manage_products])
content = get_graphql_content(response)
data = content['data']['categoryCreate']['category']
assert data is None
error = content['data']['categoryCreate']['errors'][0]
assert error['field'] == 'parent'
def test_get_database_id(product):
info = Mock(
schema=Mock(
get_type=Mock(
return_value=Mock(graphene_type=product_types.Product))))
node_id = graphene.Node.to_global_id('Product', product.pk)
pk = get_database_id(info, node_id, product_types.Product)
assert int(pk) == product.pk
def test_snake_to_camel_case():
assert snake_to_camel_case('test_camel_case') == 'testCamelCase'
assert snake_to_camel_case('testCamel_case') == 'testCamelCase'
assert snake_to_camel_case(123) == 123
def test_mutation_returns_error_field_in_camel_case(
staff_api_client, variant, permission_manage_products):
# costPrice is snake case variable (cost_price) in the backend
query = """
mutation testCamel($id: ID!, $cost: Decimal) {
productVariantUpdate(id: $id,
input: {costPrice: $cost, trackInventory: false}) {
errors {
field
message
}
productVariant {
id
}
}
}
"""
variables = {
'id': graphene.Node.to_global_id('ProductVariant', variant.id),
'cost': 12.1234}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products])
content = get_graphql_content(response)
error = content['data']['productVariantUpdate']['errors'][0]
assert error['field'] == 'costPrice'
def test_reporting_period_to_date():
now = timezone.now()
start_date = reporting_period_to_date(ReportingPeriod.TODAY)
assert start_date.day == now.day
assert start_date.hour == 0
assert start_date.minute == 0
assert start_date.second == 0
assert start_date.microsecond == 0
start_date = reporting_period_to_date(ReportingPeriod.THIS_MONTH)
assert start_date.month == now.month
assert start_date.day == 1
assert start_date.hour == 0
assert start_date.minute == 0
assert start_date.second == 0
assert start_date.microsecond == 0
def test_require_pagination(api_client):
query = """
query {
products {
edges {
node {
name
}
}
}
}
"""
response = api_client.post_graphql(query)
content = _get_graphql_content_from_response(response)
assert 'errors' in content
assert content['errors'][0]['message'] == (
'You must provide a `first` or `last` value to properly paginate the '
'`products` connection.')
def test_total_count_query(api_client, product):
query = """
query {
products {
totalCount
}
}
"""
response = api_client.post_graphql(query)
content = get_graphql_content(response)
assert content['data']['products']['totalCount'] == 1
|
Python
| 0.023907
|
@@ -430,16 +430,58 @@
to_date%0A
+from saleor.product.models import Product%0A
%0A%0Adef te
@@ -4304,14 +4304,36 @@
lCount'%5D ==
-1
+Product.objects.count()
%0A
|
a90ba7efb5b7d2cef7aa86b1c741d5d582ae5574
|
Fix SDK log output setting
|
senlin/drivers/openstack/sdk.py
|
senlin/drivers/openstack/sdk.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
SDK Client
'''
import sys
import functools
from oslo_config import cfg
from oslo_log import log as logging
import six
from openstack import connection
from openstack import exceptions as sdk_exc
from openstack import profile
from openstack import utils
from oslo_serialization import jsonutils
from requests import exceptions as req_exc
from senlin.common import exception as senlin_exc
USER_AGENT = 'senlin'
exc = sdk_exc
LOG = logging.getLogger(__name__)
utils.enable_logging(debug=True, stream=sys.stdout)
def parse_exception(ex):
'''Parse exception code and yield useful information.'''
code = 500
if isinstance(ex, sdk_exc.HttpException):
# some exceptions don't contain status_code
if ex.http_status is not None:
code = ex.http_status
message = ex.message
data = {}
if ex.details is None and ex.response is not None:
data = ex.response.json()
else:
try:
data = jsonutils.loads(ex.details)
except Exception:
# Some exceptions don't have details record or
# are not in JSON format
pass
# try dig more into the exception record
# usually 'data' has two types of format :
# type1: {"forbidden": {"message": "error message", "code": 403}
# type2: {"code": 404, "error": { "message": "not found"}}
if data:
code = data.get('code', code)
message = data.get('message', message)
error = data.get('error', None)
if error:
code = data.get('code', code)
message = data['error'].get('message', message)
else:
for value in data.values():
code = value.get('code', code)
message = value.get('message', message)
elif isinstance(ex, sdk_exc.SDKException):
# Besides HttpException there are some other exceptions like
# ResourceTimeout can be raised from SDK, handle them here.
message = ex.message
elif isinstance(ex, req_exc.RequestException):
# Exceptions that are not captured by SDK
code = ex.errno
message = six.text_type(ex)
else:
# This could be a generic exception or something we don't understand
message = six.text_type(ex)
raise senlin_exc.InternalError(code=code, message=message)
def translate_exception(func):
"""Decorator for exception translation."""
@functools.wraps(func)
def invoke_with_catch(driver, *args, **kwargs):
try:
return func(driver, *args, **kwargs)
except Exception as ex:
LOG.exception(ex)
raise parse_exception(ex)
return invoke_with_catch
def create_connection(params=None):
if params is None:
params = {}
if params.get('token', None):
auth_plugin = 'token'
else:
auth_plugin = 'password'
prof = profile.Profile()
prof.set_version('identity', 'v3')
prof.set_version('messaging', 'v2')
if 'region_name' in params:
prof.set_region(prof.ALL, params['region_name'])
params.pop('region_name')
elif cfg.CONF.default_region_name:
prof.set_region(prof.ALL, cfg.CONF.default_region_name)
try:
conn = connection.Connection(profile=prof, user_agent=USER_AGENT,
auth_plugin=auth_plugin, **params)
except Exception as ex:
raise parse_exception(ex)
return conn
def authenticate(**kwargs):
'''Authenticate using openstack sdk based on user credential'''
conn = create_connection(kwargs)
access_info = {
'token': conn.session.get_token(),
'user_id': conn.session.get_user_id(),
'project_id': conn.session.get_project_id()
}
return access_info
|
Python
| 0.002074
|
@@ -796,24 +796,37 @@
import utils
+ as sdk_utils
%0Afrom oslo_s
@@ -1020,16 +1020,21 @@
name__)%0A
+%0Asdk_
utils.en
@@ -1056,11 +1056,12 @@
bug=
-Tru
+Fals
e, s
|
08125322609e97e868c5c712df9e35e4c556434d
|
Use enumerate() instead of managing an index variable.
|
httparchive.py
|
httparchive.py
|
#!/usr/bin/env python
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class HttpArchive(dict):
"""Dict with ArchivedHttpRequest keys and ArchivedHttpResponse values."""
pass
class ArchivedHttpRequest(object):
def __init__(self, command, host, path, request_body):
self.command = command
self.host = host
self.path = path
self.request_body = request_body
def __repr__(self):
return repr((self.command, self.host, self.path, self.request_body))
def __hash__(self):
return hash(self.__repr__())
def __eq__(self, other):
return self.__repr__() == other.__repr__()
class ArchivedHttpResponse(object):
def __init__(self, status, reason, headers, response_data):
self.status = status
self.reason = reason
self.headers = headers
self.response_data = response_data
def get_header(self, key):
for k, v in self.headers:
if key == k:
return v
return None
def set_header(self, key, value):
i = 0
for k, v in self.headers:
if key == k:
self.headers[i] = (key, value)
return
i = i + 1
self.headers.append((key, value))
def remove_header(self, key):
i = 0
for k, v in self.headers:
if key == k:
self.headers.pop(i)
return
i = i + 1
|
Python
| 0.999997
|
@@ -1518,38 +1518,43 @@
e):%0A
-i = 0%0A
for
+i, (
k, v
+)
in
+enumerate(
self.hea
@@ -1549,32 +1549,33 @@
ate(self.headers
+)
:%0A if key =
@@ -1633,32 +1633,16 @@
return%0A
- i = i + 1%0A
self
@@ -1708,26 +1708,16 @@
y):%0A
- i = 0%0A
for
k, v
@@ -1708,32 +1708,47 @@
y):%0A for
+i, (
k, v
+)
in
+enumerate(
self.headers
@@ -1739,32 +1739,33 @@
ate(self.headers
+)
:%0A if key =
@@ -1797,16 +1797,16 @@
.pop(i)%0A
+
@@ -1816,20 +1816,4 @@
urn%0A
- i = i + 1%0A
|
d5482b10a712863c36a59d8ce82f3958ec41e78b
|
Add CORS on /swagger.json
|
APITaxi/api/__init__.py
|
APITaxi/api/__init__.py
|
# -*- coding: utf-8 -*-
from flask.ext.restplus import apidoc, Api
from flask import Blueprint, render_template
api_blueprint = Blueprint('api', __name__)
api = Api(api_blueprint, doc=False, catch_all_404s=True,
title='API version 2.0')
ns_administrative = api.namespace('administrative',
description="Administrative APIs", path='/')
def init_app(app):
from . import hail, taxi, ads, drivers, zupc, profile, vehicle, documents
api.init_app(app, add_specs=False)
app.register_blueprint(api_blueprint)
app.register_blueprint(apidoc.apidoc)
@app.route('/swagger.json', endpoint='api.specs')
def swagger():
return render_template('swagger.json', host=app.config['SERVER_NAME']), 200,
{'Content-Type': 'application/json'}
|
Python
| 0.000001
|
@@ -104,16 +104,52 @@
template
+%0Afrom flask_cors import cross_origin
%0A%0Aapi_bl
@@ -657,16 +657,36 @@
specs')%0A
+ @cross_origin()%0A
def
|
d16b57f3edca478622b84f56dfee7b2eea1f7498
|
Add basic reporting
|
botbot/report.py
|
botbot/report.py
|
"""Generate a report about file errors"""
import os
import sys
import math
from pkg_resources import resource_exists, resource_filename
from jinja2 import Environment, FileSystemLoader
from . import problems
_DEFAULT_RES_PATH = os.path.join('resources', 'templates')
_GENERIC_REPORT_NAME = 'generic.txt'
_ENV_REPORT_NAME = 'env.txt'
class ReporterBase():
def __init__(self, chkr):
self.chkr = chkr
def write_status(self, barlen):
"""Write where we're at"""
done = self.chkr.status['checked']
total = self.chkr.status['files']
perc = done / total
filllen = math.ceil(perc * barlen)
print('[{0}] {1:.0%}\r'.format(filllen * '#' + (barlen - filllen) * '-', perc), end='')
if perc == 1:
print('\n', end='')
sys.stdout.flush()
def _get_template_filename(self, name):
"""Find the filename of a template. Can be a filename or just a name."""
parts = str(name).split('.')
if parts[len(parts) - 1] == 'txt':
return name
else:
return '.'.join(parts + ['txt'])
def _get_supporting_prob_info(self, probid):
return problems.every_problem.get(probid)
def _get_env(self, template):
tmppath = os.path.join(_DEFAULT_RES_PATH,
self._get_template_filename(template))
if resource_exists(__package__, tmppath):
return Environment(
loader=FileSystemLoader(resource_filename(__package__, _DEFAULT_RES_PATH)),
trim_blocks=True
)
else:
raise FileNotFoundError('No such template')
class OneshotReporter(ReporterBase):
"""Does one-off reports after one-off checks"""
def __init__(self, chkr, out=sys.stdout):
super().__init__(chkr)
self.out = out
def _should_print_report(self, filelist):
for values in filelist.values():
if values:
return True
return False
def write_report(self, fmt, shared, attr='problems'):
print(self.chkr.checked)
class DaemonReporter(ReporterBase):
"""Reports issues in daemon mode"""
def __init__(self, chkr):
super().__init__(chkr)
def write_report(self):
"""
Continuously report. (Note: this implementation is temporary until
email gets working.)
"""
#TODO: implement emailing!
queue = self.chkr.checked
while queue:
finfo = queue.pop()
print("{} -- {}".format(finfo['path'], ', '.join(finfo['problems'])))
class EnvReporter(ReporterBase):
"""Reports environment issues"""
def __init__(self, chkr, out=sys.stdout):
"""Constructor for the EnvReporter"""
self.out = out
self.chkr = chkr
def write_report(self):
"""Write a report on environment variables"""
env = self._get_env(_ENV_REPORT_NAME)
if self.chkr.problems:
tempgen = env.get_template(_ENV_REPORT_NAME).generate(
problist=[(self._get_supporting_prob_info(p[0]), p[1])
for p in self.chkr.problems]
)
if self.out != sys.stdout:
print('Writing report to {}.'.format(self.out))
out = open(self.out, mode='w')
else:
print('Report:')
out = sys.stdout
for line in tempgen:
print(line, file=out, end='')
print('\n', file=out, end='')
out.close()
else:
print('No problems here!')
|
Python
| 0
|
@@ -2061,24 +2061,26 @@
s'):%0A
+ #
print(self.
@@ -2092,16 +2092,301 @@
checked)
+%0A for user, probs in self.chkr.checked.items():%0A if probs:%0A print(user.pw_gecos)%0A for prob, files in probs.items():%0A print('%5Ct' + prob)%0A for f in files:%0A print('%5Ct%5Ct' + str(f))
%0A%0Aclass
|
6e525872537cd31a80cb791d6594a1f6800c61b4
|
add invers option, add args-parsing
|
i2c/PCF8574.py
|
i2c/PCF8574.py
|
#!/usr/bin/python
import sys
import smbus
import time
# Reads data from PCF8574 and prints the state of each port
def readPCF8574(busnumber,address):
address = int(address,16)
busnumber = int(1)
bus = smbus.SMBus(busnumber)
state = bus.read_byte(address);
for i in range(0,8):
port = "port " + str(i)
value = 1&(state>>7-i)
print str(port) + ': ' + str(value)
if len(sys.argv) != 3:
print "Usage: python PCF8574.py bus address"
exit(1)
bus = sys.argv[1]
address = sys.argv[2]
readPCF8574(bus,address)
|
Python
| 0.000002
|
@@ -46,16 +46,32 @@
ort time
+%0Aimport argparse
%0A%0A# Read
@@ -124,17 +124,16 @@
ch port%0A
-%0A
def read
@@ -210,17 +210,25 @@
r = int(
-1
+busnumber
)%0A bu
@@ -290,17 +290,16 @@
dress);%0A
-%0A
for
@@ -431,152 +431,813 @@
%0A%0A
-%0A%0Aif len(sys.argv) != 3:%0A print %22Usage: python PCF8574.py bus address%22%0A exit(1)%0A%0Abus = sys.argv%5B1%5D%0Aaddress = sys.argv%5B2%5D%0A%0AreadPCF8574(bus,
+# Reads data from PCF8574 and prints the inverted state of each port%0Adef readPCF8574_INV(busnumber,address):%0A address = int(address,16)%0A busnumber = int(busnumber)%0A bus = smbus.SMBus(busnumber)%0A state = 255 - bus.read_byte(address);%0A for i in range(0,8):%0A port = %22port %22 + str(i)%0A value = 1&(state%3E%3E(7-i))%0A print str(port) + ': ' + str(value)%0A%0A# parse arguments%0Aparser = argparse.ArgumentParser()%0Aparser.add_argument(%22-i%22,action='store_true', help=%22Invert the bit of in- and output%22)%0Aparser.add_argument('i2c_bus', help='Number of active i2c-bus (0 or 1)')%0Aparser.add_argument('i2c_address', help='address of PCF8574')%0Aargs = parser.parse_args()%0A%0A# run commands%0Aif args.i:%0A readPCF8574_INV(args.i2c_bus,args.i2c_address)%0Aelse:%0A readPCF8574(args.i2c_bus,args.i2c_
addr
|
9cc436d1abec0d4c31fd52b0f2ff1f30ab889fb1
|
Change Record blocks from list to tuple
|
ehrcorral/herd.py
|
ehrcorral/herd.py
|
# -*- coding: utf-8 -*-
"""Contains core classes and functions for defining populations and acting upon
them.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from __future__ import unicode_literals
import sys
from collections import namedtuple
import jellyfish
import metaphone
PROFILE_FIELDS = (
'forename',
'mid_forename',
'current_surname',
'birth_surname',
'suffix',
'address',
'sex',
'gender',
'ssn',
'birth_year',
'birth_month',
'birth_day',
'blood_type',
)
META_FIELDS = (
'person', # Unique to this individual, which can be changed if match found
'accession', # Unique number in entire herd to identify this record
)
PHONEMES = (
'soundex',
'nysiis',
'metaphone',
'dmetaphone',
)
compression_dispatch = {
'soundex': jellyfish.soundex,
'nysiis': jellyfish.nysiis,
'metaphone': jellyfish.metaphone,
'dmetaphone': metaphone.doublemetaphone
}
def compress(names, method):
"""Compresses surnames using different phonemic algorithms.
Args:
names (list): A list of names, typically surnames
method (str): A phonemic compression algorithm. Must be one of
:py:data::PHONEMES.
Returns:
A list of the compressions.
"""
if not isinstance(names, list):
ValueError("Expected a list of names, got a {}.".format(type(names)))
compressions = map(compression_dispatch[method], names)
# Double metaphone returns a list of tuples, so need to unpack it
if method == 'dmetaphone':
compressions = [compression for dmetaphone in compressions
for compression in dmetaphone if compression != '']
return compressions
class Profile(namedtuple('Profile', PROFILE_FIELDS)):
__slots__ = () # Prevent per-instance dictionaries to reduce memory
class Meta(namedtuple('Meta', META_FIELDS)):
__slots__ = ()
class Record(object):
"""A Record contains identifying information about a patient, as well as
generated phonemic and meta information.
"""
def __init__(self):
self.profile = None
self._meta = None
self._blocks = None
def __unicode__(self):
if self.profile is None:
return ''
else:
return str(self.profile._asdict())
def __str__(self):
return unicode(self).encode('utf-8')
def gen_blocks(self, blocking_method):
"""Generate and set the blocking codes for a given record.
Blocking codes are comprised of the phonemic compressions of the
profile surnames combined with the first letter of each forename.
Generated blocking codes are stored in self._blocks, and only contain
the unique set of blocking codes.
Args:
blocking_method (str): Which phonemic compression to use for the
generation of blocks. Must be one of :py:data::PHONEMES.
"""
blocks = []
profile = self.profile
surnames = [profile.current_surname, profile.birth_surname]
surnames = [surname for surname in surnames if surname != '']
bases = compress(surnames, blocking_method)
# Bases are now [PJTR, PHTR] - base phonemic compressions of surnames
forenames = [profile.forename, profile.mid_forename]
forenames = [forename for forename in forenames if forename != '']
# Append 1st letter of each forename to each surname compression
for base in bases:
for forename in forenames:
block = base + forename[0]
blocks.append(block.upper())
self._blocks = list(set(blocks))
class Herd(object):
"""A collection of :py:class:`.Record`s with methods for interacting with
and linking records in the herd.
"""
def __init__(self):
self._population = None
def __unicode__(self):
population = self._population
if population is None:
return str(())
elif len(population) >= 4:
return "({},\n {}\n ...,\n {},\n {})".format(
population[0],
population[1],
population[-2],
population[-1]
)
else:
return str(population)
def __str__(self):
return unicode(self).encode('utf-8')
@property
def size(self):
"""Returns the size of the Herd's population."""
population = self._population
if population is None:
return 0
else:
return len(population)
def populate(self, records):
"""Sets the Herd's sub-population.
Args:
records (list, tuple): A list or tuple of :py:class:`._Record`s
"""
if not isinstance(records, (tuple, list)):
raise ValueError("Expected a tuple or list.")
if isinstance(records, list):
records = tuple(records)
self._population = records
def explode(self, blocking='dmetaphone'):
"""Generates primary and exploded phonemic blocking codes for each
Record.
The primary blocking code uses the current surname and first forename
and exploded blocking codes use various combinations of birth surname,
first surname, and middle name.
"""
if blocking not in PHONEMES:
raise ValueError("Blocking must be be one of {}.".format(PHONEMES))
try:
for record in self._population:
record.gen_blocks(blocking)
except TypeError:
exc_type, trace = sys.exc_info()[:2]
raise TypeError("You must populate the Herd first."), None, trace
finally:
# Clear per https://docs.python.org/2/library/sys.html#sys.exc_info
sys.exc_info()
def gen_record(data):
"""Generate a :py:class:`.Record` which can be used to populate a
:py:class:`Herd`.
In addition to extracting the profile information for
Args:
data (dict): A dictionary containing at least one of fields in
:py:data::PROFILE_FIELDS.
Returns:
A object of class :py:class:`.Record`.
"""
fields = [data.get(field, '') for field in PROFILE_FIELDS]
profile = Profile._make(fields)
if len(profile.forename) < 1 or len(profile.current_surname) < 1:
raise ValueError("A forename and current surname must be supplied.")
record = Record()
record.profile = profile
return record
|
Python
| 0.000001
|
@@ -3704,20 +3704,21 @@
locks =
-list
+tuple
(set(blo
@@ -5037,23 +5037,22 @@
def
-explode
+corral
(self, b
@@ -5074,16 +5074,200 @@
hone'):%0A
+ if blocking not in PHONEMES:%0A raise ValueError(%22Blocking must be be one of %7B%7D.%22.format(PHONEMES))%0A self._explode(blocking)%0A%0A def _explode(self, blocking):%0A
@@ -5563,125 +5563,8 @@
%22%22%22%0A
- if blocking not in PHONEMES:%0A raise ValueError(%22Blocking must be be one of %7B%7D.%22.format(PHONEMES))%0A
|
7cca2fab9fe697fe0e31be0ea6dcd43e29028bfb
|
better example output
|
example/shapes.py
|
example/shapes.py
|
from rdc.etl.status.console import ConsoleStatus
from rdc.etl.transform.util import Log
from rdc.etl.transform.extract import Extract
from rdc.etl.harness.threaded2 import ThreadedHarness as ThreadedHarness2
from rdc.etl.harness.threaded import ThreadedHarness
def build_producer(name):
return Extract(({'producer': name, 'id': 1}, {'producer': name, 'id': 2}))
for Harness in ThreadedHarness, ThreadedHarness2:
print
print "-------------------------------"
print "With %r" % Harness
print
print
h = Harness()
h.status.append(ConsoleStatus())
p1 = build_producer('p1')
h.chain_add(p1, Log())
h()
print
print
|
Python
| 0.999999
|
@@ -1,52 +1,17 @@
-from rdc.etl.status.console import ConsoleStatus
+import pprint
%0Afro
@@ -326,16 +326,57 @@
: 2%7D))%0A%0A
+print '%3E%3E%3E Test of simple linear shape'%0A%0A
for Harn
@@ -430,156 +430,45 @@
rint
-%0A print %22-------------------------------%22%0A print %22With %25r%22 %25 Harness%0A print%0A print%0A h = Harness()%0A h.status.append(ConsoleStatus()
+ %22With %25r%22 %25 Harness%0A h = Harness(
)%0A
@@ -530,16 +530,17 @@
h()%0A
+%0A
prin
@@ -540,16 +540,60 @@
print
+ 'Summary:'%0A pprint.pprint(h._transforms)
%0A pri
@@ -594,10 +594,15 @@
print
+ '%5Cn'
%0A%0A
|
229a0db6574f75acf94cad6612dd39351fa6656a
|
Use absolute import. (Should this go into 2.5?)
|
Lib/test/test_cpickle.py
|
Lib/test/test_cpickle.py
|
import cPickle
import unittest
from cStringIO import StringIO
from pickletester import AbstractPickleTests, AbstractPickleModuleTests
from test import test_support
class cPickleTests(AbstractPickleTests, AbstractPickleModuleTests):
def setUp(self):
self.dumps = cPickle.dumps
self.loads = cPickle.loads
error = cPickle.BadPickleGet
module = cPickle
class cPicklePicklerTests(AbstractPickleTests):
def dumps(self, arg, proto=0):
f = StringIO()
p = cPickle.Pickler(f, proto)
p.dump(arg)
f.seek(0)
return f.read()
def loads(self, buf):
f = StringIO(buf)
p = cPickle.Unpickler(f)
return p.load()
error = cPickle.BadPickleGet
class cPickleListPicklerTests(AbstractPickleTests):
def dumps(self, arg, proto=0):
p = cPickle.Pickler(proto)
p.dump(arg)
return p.getvalue()
def loads(self, *args):
f = StringIO(args[0])
p = cPickle.Unpickler(f)
return p.load()
error = cPickle.BadPickleGet
class cPickleFastPicklerTests(AbstractPickleTests):
def dumps(self, arg, proto=0):
f = StringIO()
p = cPickle.Pickler(f, proto)
p.fast = 1
p.dump(arg)
f.seek(0)
return f.read()
def loads(self, *args):
f = StringIO(args[0])
p = cPickle.Unpickler(f)
return p.load()
error = cPickle.BadPickleGet
def test_recursive_list(self):
self.assertRaises(ValueError,
AbstractPickleTests.test_recursive_list,
self)
def test_recursive_inst(self):
self.assertRaises(ValueError,
AbstractPickleTests.test_recursive_inst,
self)
def test_recursive_dict(self):
self.assertRaises(ValueError,
AbstractPickleTests.test_recursive_dict,
self)
def test_recursive_multi(self):
self.assertRaises(ValueError,
AbstractPickleTests.test_recursive_multi,
self)
def test_nonrecursive_deep(self):
# If it's not cyclic, it should pickle OK even if the nesting
# depth exceeds PY_CPICKLE_FAST_LIMIT. That happens to be
# 50 today. Jack Jansen reported stack overflow on Mac OS 9
# at 64.
a = []
for i in range(60):
a = [a]
b = self.loads(self.dumps(a))
self.assertEqual(a, b)
def test_main():
test_support.run_unittest(
cPickleTests,
cPicklePicklerTests,
cPickleListPicklerTests,
cPickleFastPicklerTests
)
if __name__ == "__main__":
test_main()
|
Python
| 0
|
@@ -60,16 +60,21 @@
IO%0Afrom
+test.
picklete
|
c888e52788ec37641f97f761d2052902db20582a
|
Add missing dates
|
erpnext/accounts/dashboard.py
|
erpnext/accounts/dashboard.py
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
from itertools import groupby
from operator import itemgetter
import frappe
from frappe.utils import add_to_date
from erpnext.accounts.report.general_ledger.general_ledger import execute
def get(filters=None):
filters = frappe._dict({
"company": "Gadget Technologies Pvt. Ltd.",
"from_date": get_from_date_from_timespan(filters.get("timespan")),
"to_date": "2020-12-12",
"account": "Cash - GTPL",
"group_by": "Group by Voucher (Consolidated)"
})
report_columns, report_results = execute(filters=filters)
interesting_fields = ["posting_date", "balance"]
columns = [column for column in report_columns if column["fieldname"] in interesting_fields]
_results = []
for row in report_results[1:-2]:
_results.append([row[key] for key in interesting_fields])
grouped_results = groupby(_results, key=itemgetter(0))
results = [list(values)[-1] for key, values in grouped_results]
return {
"labels": [result[0] for result in results],
"datasets": [{
"name": "Cash - GTPL",
"values": [result[1] for result in results]
}]
}
def get_from_date_from_timespan(timespan):
days = months = years = 0
if "Last Week" == timespan:
days = -7
if "Last Month" == timespan:
months = -1
elif "Last Quarter" == timespan:
months = -3
elif "Last Year" == timespan:
years = -1
return add_to_date(None, years=years, months=months, days=days,
as_string=True, as_datetime=True)
|
Python
| 0.000043
|
@@ -273,17 +273,46 @@
_to_date
+, date_diff, getdate, nowdate
%0A
-
from erp
@@ -1148,24 +1148,86 @@
d_results%5D%0A%0A
+ results = add_missing_dates(results, from_date, to_date)%0A%0A
return %7B
@@ -1766,16 +1766,16 @@
s=days,%0A
-
@@ -1808,8 +1808,540 @@
e=True)%0A
+%0Adef add_missing_dates(incomplete_results, from_date, to_date):%0A dates = %5Br%5B0%5D for r in incomplete_results%5D%0A day_count = date_diff(to_date, from_date)%0A%0A results_dict = dict(incomplete_results)%0A last_date, last_balance = incomplete_results%5B0%5D%0A results = %5B%5D%0A for date in (add_to_date(getdate(from_date), days=n) for n in range(day_count + 1)):%0A if date in results_dict:%0A last_date = date%0A last_balance = results_dict%5Bdate%5D%0A results.append(%5Bdate, last_balance%5D)%0A return results%0A
|
dca8dce24e0bea671b52d456909c35e43c4f5929
|
move exchange endpoint into consumer urlspace
|
example/urls.py
|
example/urls.py
|
from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
from django.contrib import admin
from .views import ConsumerView, ConsumerExchangeView
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^$', TemplateView.as_view(template_name='example/home.html'), name='home'),
url(r'^exchange/', ConsumerExchangeView.as_view(), name='exchange'),
url(r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'example/login.html'}),
url(r'^admin/', include(admin.site.urls)),
url(r'^o/', include('oauth2_provider.urls')),
url(r'^consumer/$', ConsumerView.as_view(), name="consumer"),
)
|
Python
| 0.000002
|
@@ -156,16 +156,19 @@
merView,
+
Consume
@@ -333,16 +333,25 @@
url(r'%5E
+consumer/
exchange
|
dcc472a6c8e15e7fc105277332681b38e40640df
|
Revert open_file_dialog example
|
examples/open_file_dialog.py
|
examples/open_file_dialog.py
|
import webview
import threading
"""
This example demonstrates creating an open file dialog.
"""
def open_file_dialog():
import time
time.sleep(5)
print(webview.create_file_dialog(webview.OPEN_DIALOG, allow_multiple=False))
if __name__ == '__main__':
t = threading.Thread(target=open_file_dialog)
t.start()
webview.create_window("Open file dialog example", "http://www.flowrl.com")
|
Python
| 0
|
@@ -226,12 +226,11 @@
ple=
-Fals
+Tru
e))%0A
@@ -403,8 +403,9 @@
.com%22)%0A%0A
+%0A
|
ef0e9f59ee1df18a5c37a559e78d0350d9e0a624
|
Use `import_by_path`/`import_string` instead of manually `__import__`ing things
|
enumfields/fields.py
|
enumfields/fields.py
|
from django.core.exceptions import ValidationError
from django.db import models
from enum import Enum
import six
from django.db.models.fields import NOT_PROVIDED
class EnumFieldMixin(six.with_metaclass(models.SubfieldBase)):
def __init__(self, enum, **options):
if isinstance(enum, six.string_types):
module_name, class_name = enum.rsplit('.', 1)
module = __import__(module_name, globals(), locals(), [class_name])
self.enum = getattr(module, class_name)
else:
self.enum = enum
if "choices" not in options:
options["choices"] = [(i, i.name) for i in self.enum] # choices for the TypedChoiceField
super(EnumFieldMixin, self).__init__(**options)
def to_python(self, value):
if value is None or value == '':
return None
for m in self.enum:
if value == m:
return value
if value == m.value or str(value) == str(m.value) or str(value) == str(m):
return m
raise ValidationError('%s is not a valid value for enum %s' % (value, self.enum))
def get_prep_value(self, value):
return None if value is None else value.value
def value_to_string(self, obj):
"""
This method is needed to support proper serialization. While its name is value_to_string()
the real meaning of the method is to convert the value to some serializable format.
Since most of the enum values are strings or integers we WILL NOT convert it to string
to enable integers to be serialized natively.
"""
value = self._get_val_from_obj(obj)
return value.value if value else None
def get_default(self):
if self.has_default():
if self.default is None:
return None
if isinstance(self.default, Enum):
return self.default
return self.enum(self.default)
return super(EnumFieldMixin, self).get_default()
def deconstruct(self):
name, path, args, kwargs = super(EnumFieldMixin, self).deconstruct()
kwargs['enum'] = self.enum
kwargs.pop('choices', None)
if 'default' in kwargs:
if hasattr(kwargs["default"], "value"):
kwargs["default"] = kwargs["default"].value
return name, path, args, kwargs
class EnumField(EnumFieldMixin, models.CharField):
def __init__(self, enum, *args, **kwargs):
kwargs.setdefault("max_length", 10)
super(EnumField, self).__init__(enum, **kwargs)
self.validators = []
class EnumIntegerField(EnumFieldMixin, models.IntegerField):
def get_prep_value(self, value):
if value is None:
return None
if isinstance(value, Enum):
return value.value
try:
return int(value)
except ValueError:
return self.to_python(value).value
# South compatibility stuff
def converter_func(enum_class):
return "'%s.%s'" % (enum_class.__module__, enum_class.__name__)
def enum_value(an_enum):
return an_enum.value
rules = [
(
[EnumFieldMixin],
[],
{
"enum": ["enum", {'is_django_function': True, "converter": converter_func}],
"default": ['default', {'default': NOT_PROVIDED, 'ignore_dynamics': True,
'converter': enum_value}]},
)
]
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules(rules, ["^enumfields\.fields"])
except ImportError:
pass
|
Python
| 0.000003
|
@@ -156,16 +156,176 @@
OVIDED%0A%0A
+try:%0A from django.utils.module_loading import import_string%0Aexcept ImportError:%0A from django.utils.module_loading import import_by_path as import_string%0A%0A
%0Aclass E
@@ -484,184 +484,38 @@
-module_name, class_name = enum.rsplit('.', 1)%0A module
+self.enum
=
-__
import_
-_(module_name, globals(), locals(), %5Bclass_name%5D)%0A self.enum = getattr(module, class_name
+string(enum
)%0A
@@ -2621,17 +2621,16 @@
= %5B%5D%0A%0A%0A
-%0A
class En
|
965236870ce5bf6dcbe9398b444b977c796b096e
|
set the right keyword to the close function
|
simphony_paraview/tests/test_show.py
|
simphony_paraview/tests/test_show.py
|
import unittest
from hypothesis import given
from paraview import servermanager
from paraview.simple import Disconnect
from simphony_paraview.show import show
from simphony_paraview.core.testing import cuds_containers
class TestShow(unittest.TestCase):
def setUp(self):
if servermanager.ActiveConnection is not None:
Disconnect()
self.closed = False
def tearDown(self):
if servermanager.ActiveConnection is not None:
raise RuntimeError('There is still an active connection')
@given(cuds_containers)
def test_valid_cuds_containers(self, setup):
# XXX This is a very basic test.
# given
cuds, kind = setup
def close(obj, event):
obj.TerminateApp()
show(cuds, close)
def test_unknown_container(self):
container = object()
with self.assertRaises(TypeError):
show(container)
|
Python
| 0.000021
|
@@ -775,16 +775,24 @@
w(cuds,
+testing=
close)%0A%0A
|
c0358584f2b5a05947ebb558c6d10293cc969a1a
|
Fix tests
|
tests/test_dependenpy.py
|
tests/test_dependenpy.py
|
# -*- coding: utf-8 -*-
"""Main test script."""
from dependenpy.cli import main
def test_main():
"""Main test method."""
main('dependenpy')
|
Python
| 0.000003
|
@@ -131,16 +131,24 @@
main(
+%5B'-lm',
'depende
@@ -151,10 +151,11 @@
endenpy'
+%5D
)%0A
|
437e01df9f8582256aaa5ab1a5cd4c0fc1aef373
|
Use self.course_id rather than hard code when removing lab assistants from enrollment tests
|
tests/test_enrollment.py
|
tests/test_enrollment.py
|
from tests import OkTestCase
from server.models import db, Enrollment, User
from server.forms import EnrollmentForm, BatchEnrollmentForm
from server.constants import STUDENT_ROLE, LAB_ASSISTANT_ROLE
class TestEnrollment(OkTestCase):
def setUp(self):
super().setUp()
self.studentA = {
'name': 'Frank Underwood',
'email': 'frank.underwood@whitehouse.gov',
'sid': '123456789',
'class_account': 'cs61a-fu',
'section': '101',
'role': STUDENT_ROLE
}
self.studentB = {
'name': 'Claire Underwood',
'email': 'claire.underwood@whitehouse.gov',
'sid': '987654321',
'class_account': 'cs61a-cu',
'section': '102',
'role': STUDENT_ROLE
}
self.studentB_alt = {
'name': 'Claire Hale Underwood',
'email': 'claire.underwood@whitehouse.gov',
'sid': '9876543210',
'class_account': 'cs61a-chu',
'section': '103',
'role': STUDENT_ROLE
}
self.lab_assistantA = {
'name': 'Ned Stark',
'email': 'eddard.stark@winterfell.com',
'sid': '152342343',
'section': '101',
'role': LAB_ASSISTANT_ROLE
}
self.lab_assistantB = {
'name': 'Robb Stark',
'email': 'robb.stark@winterfell.com',
'sid': '189693423',
'section': '102',
'role': LAB_ASSISTANT_ROLE
}
def test_create(self):
self.setup_course()
user = User(name=self.studentA['name'], email=self.studentA['email'])
db.session.add(user)
db.session.commit()
self.studentA['id'] = user.id
Enrollment.create(self.course.id, [self.studentA])
self.enrollment_matches_info(user, self.studentA)
def test_enroll_from_form(self):
self.setup_course()
Enrollment.enroll_from_form(self.course.id, make_enrollment_form(self.studentB))
user = User.lookup(self.studentB['email'])
self.studentB['id'] = user.id
self.enrollment_matches_info(user, self.studentB)
Enrollment.enroll_from_form(self.course.id, make_enrollment_form(self.lab_assistantA))
lab_assistant = User.lookup(self.lab_assistantA['email'])
self.lab_assistantA['id'] = lab_assistant.id
self.enrollment_matches_info(lab_assistant, self.lab_assistantA)
def test_enroll_from_csv(self):
self.setup_course()
template = "{email},{name},{sid},{class_account},{section}"
form = BatchEnrollmentForm()
form.csv.data = template.format(**self.studentA) + "\n" + template.format(**self.studentB)
Enrollment.enroll_from_csv(self.course.id, form)
userA = User.lookup(self.studentA['email'])
self.studentA['id'] = userA.id
self.enrollment_matches_info(userA, self.studentA)
userB = User.lookup(self.studentB['email'])
self.studentB['id'] = userB.id
self.enrollment_matches_info(userB, self.studentB)
def test_enroll_twice(self):
self.setup_course()
form = make_enrollment_form(self.studentB)
Enrollment.enroll_from_form(self.course.id, form)
user = User.lookup(self.studentB['email'])
self.studentB['id'] = user.id
self.enrollment_matches_info(user, self.studentB)
form = make_enrollment_form(self.studentB_alt)
Enrollment.enroll_from_form(self.course.id, form)
user_updated = User.lookup(self.studentB['email'])
self.studentB_alt['id'] = user_updated.id
assert user.id == user_updated.id
self.enrollment_matches_info(user, self.studentB_alt)
def enrollment_matches_info(self, user, info):
query = Enrollment.query.filter_by(
user=user,
course=self.course
)
assert query.count() == 1
enrollment = query[0]
assert enrollment.user.name == info.get('name')
assert enrollment.user.email == info.get('email')
assert enrollment.user_id == info.get('id')
assert enrollment.sid == info.get('sid')
assert enrollment.class_account == info.get('class_account')
assert enrollment.section == info.get('section')
assert enrollment.role == info.get('role')
def remove_lab_assistants(self):
[db.session.delete(e) for e in (Enrollment.query
.options(db.joinedload('user'))
.filter_by(role = LAB_ASSISTANT_ROLE,
course_id = 1)
.all()
)]
db.session.commit()
def test_lab_assistant_enroll_web(self):
self.setup_course()
self.remove_lab_assistants()
self.login(self.staff1.email)
response = self.client.get('/admin/course/{}/enrollment'.format(self.course.id))
self.assert200(response)
source = response.get_data().decode("utf-8")
self.assertTrue("<span> Student </span>" in source)
self.assertTrue("<span> Staff </span>" in source)
self.assertFalse("<span> Lab Assistant </span>" in source)
response = self.client.post('/admin/course/{}/enrollment'.format(self.course.id),
data=self.lab_assistantA, follow_redirects=True)
self.assert200(response)
source = response.get_data().decode("utf-8")
self.assertTrue("<span> Student </span>" in source)
self.assertTrue("<span> Staff </span>" in source)
self.assertTrue("<span> Lab Assistant </span>" in source)
response = self.client.get('/admin/course/{}/enrollment'.format(self.course.id))
self.assert200(response)
source = response.get_data().decode("utf-8")
self.assertTrue("<span> Student </span>" in source)
self.assertTrue("<span> Staff </span>" in source)
self.assertTrue("<span> Lab Assistant </span>" in source)
self.login(self.user1.email)
response = self.client.post('/admin/course/{}/enrollment'.format(self.course.id),
data=self.lab_assistantB)
self.assertRedirects(response, '/')
def make_enrollment_form(info):
form = EnrollmentForm()
form.name.data = info.get('name')
form.email.data = info.get('email')
form.sid.data = info.get('sid')
form.secondary.data = info.get('class_account')
form.section.data = info.get('section')
form.role.data = info.get('role')
return form
|
Python
| 0
|
@@ -4841,17 +4841,30 @@
se_id =
-1
+self.course_id
)%0A
|
f6debd39f929616ca72763682c25a52bc01b536b
|
Update test_filterbank.py
|
tests/test_filterbank.py
|
tests/test_filterbank.py
|
from blimpy import Filterbank, read_header, fix_header
import pylab as plt
import numpy as np
import os
from pprint import pprint
def test_voyager():
filename = '/workdata/bl/data/voyager_f1032192_t300_v2.fil'
fb = Filterbank(filename)
fb.info()
fb.plot_spectrum()
plt.show()
fb = Filterbank(filename, f_start=8420, f_stop=8420.5)
fb.info()
fb.plot_spectrum()
plt.show()
def test_voyager_extract():
filename = '/workdata/bl/data/voyager_f1032192_t300_v2.fil'
new_filename = 'voyager_ext.fil'
fb = Filterbank(filename, f_start=8420.1, f_stop=8420.3)
fb.info()
fb.plot_spectrum()
plt.show()
fb.write_to_filterbank(new_filename)
fb2 = Filterbank(new_filename)
fb2.info()
fb2.plot_spectrum()
plt.show()
os.remove(new_filename)
def test_voyager_fix_header():
filename = '/workdata/bl/data/voyager_f1032192_t300_v2.fil'
new_filename = 'voyager_ext.fil'
fb = Filterbank(filename, f_start=8420.1, f_stop=8420.3)
fb.write_to_filterbank(new_filename)
fb = Filterbank(new_filename)
filename = new_filename
assert read_header(filename)['ibeam'] == 1
fix_header(filename, 'ibeam', 7)
assert read_header(filename)['ibeam'] == 7
fix_header(filename, 'ibeam', 1)
assert read_header(filename)['ibeam'] == 1
fix_header(filename, 'ibeam', 13)
assert read_header(filename)['ibeam'] == 13
pprint(read_header(filename))
fix_header(filename, 'rawdatafile', './blc3_9bit_guppi_57386_VOYAGER1_0004.0000.raw')
assert read_header(filename)['rawdatafile'] == './blc3_9bit_guppi_57386_VOYAGER1_0004.0000.raw'
fix_header(filename, 'rawdatafile', './blc3_2bit_guppi_57386_VOYAGER1_0004.0000.raw')
assert read_header(filename)['rawdatafile'] == './blc3_2bit_guppi_57386_VOYAGER1_0004.0000.raw'
os.remove(new_filename)
def test_filterbank_gen():
""" Generate a blimpy from nothing """
filename = '/bldata/gbt_data/voyager_f1032192_t300_v2.fil'
fb0 = Filterbank(filename)
fb0.info()
fb = Filterbank(header_dict=fb0.header, data_array=fb0.data)
fb.info()
print "Writing to blimpy..."
fb.write_to_filterbank('test.fil')
print "Writing to hdf5..."
fb.write_to_hdf5('test.h5')
fb2 = Filterbank('test.h5')
fb2.info()
fb2.plot_spectrum()
plt.show()
os.remove('test.h5')
if __name__ == "__main__":
#test_voyager()
#test_voyager_extract()
#test_voyager_fix_header()
test_filterbank_gen()
|
Python
| 0.000001
|
@@ -2478,16 +2478,17 @@
r()%0A
+#
test_fil
@@ -2501,8 +2501,18 @@
k_gen()%0A
+ %0A %0A
|
dbf520bb4050c5e393a4de3be9c136fef1cd88f2
|
break test
|
tests/test_functional.py
|
tests/test_functional.py
|
# -*- coding: utf-8 -*-
"""Functional tests using WebTest.
See: http://webtest.readthedocs.org/
"""
import pytest
from flask import url_for
from foobar.user.models import User
from .factories import UserFactory
class TestLoggingIn:
def test_can_log_in_returns_200(self, user, testapp):
# Goes to homepage
res = testapp.get("/")
# Fills out login form in navbar
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'myprecious'
# Submits
res = form.submit().follow()
assert res.status_code == 200
def test_sees_alert_on_log_out(self, user, testapp):
res = testapp.get("/")
# Fills out login form in navbar
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'myprecious'
# Submits
res = form.submit().follow()
res = testapp.get(url_for('public.logout')).follow()
# sees alert
assert 'You are logged out.' in res
def test_sees_error_message_if_password_is_incorrect(self, user, testapp):
# Goes to homepage
res = testapp.get("/")
# Fills out login form, password incorrect
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'wrong'
# Submits
res = form.submit()
# sees error
assert "Invalid password" in res
def test_sees_error_message_if_username_doesnt_exist(self, user, testapp):
# Goes to homepage
res = testapp.get("/")
# Fills out login form, password incorrect
form = res.forms['loginForm']
form['username'] = 'unknown'
form['password'] = 'myprecious'
# Submits
res = form.submit()
# sees error
assert "Unknown user" in res
class TestRegistering:
def test_can_register(self, user, testapp):
old_count = len(User.query.all())
# Goes to homepage
res = testapp.get("/")
# Clicks Create Account button
res = res.click("Create account")
# Fills out the form
form = res.forms["registerForm"]
form['username'] = 'foobar'
form['email'] = 'foo@bar.com'
form['password'] = 'secret'
form['confirm'] = 'secret'
# Submits
res = form.submit().follow()
assert res.status_code == 200
# A new user was created
assert len(User.query.all()) == old_count + 1
def test_sees_error_message_if_passwords_dont_match(self, user, testapp):
# Goes to registration page
res = testapp.get(url_for("public.register"))
# Fills out form, but passwords don't match
form = res.forms["registerForm"]
form['username'] = 'foobar'
form['email'] = 'foo@bar.com'
form['password'] = 'secret'
form['confirm'] = 'secrets'
# Submits
res = form.submit()
# sees error message
assert "Passwords must match" in res
def test_sees_error_message_if_user_already_registered(self, user, testapp):
user = UserFactory(active=True) # A registered user
user.save()
# Goes to registration page
res = testapp.get(url_for("public.register"))
# Fills out form, but username is already registered
form = res.forms["registerForm"]
form['username'] = user.username
form['email'] = 'foo@bar.com'
form['password'] = 'secret'
form['confirm'] = 'secret'
# Submits
res = form.submit()
# sees error
assert "Username already registered" in res
|
Python
| 0.000005
|
@@ -596,17 +596,17 @@
code ==
-2
+3
00%0A%0A
|
a6435a8713985464b8c37a438ac035d65f66b4cd
|
Add more user mapfiles and validate
|
tests/test_large_file.py
|
tests/test_large_file.py
|
import logging
import cProfile
from mappyfile.parser import Parser
from mappyfile.pprint import PrettyPrinter
from mappyfile.transformer import MapfileToDict
def output(fn):
"""
Parse, transform, and pretty print
the result
"""
p = Parser()
m = MapfileToDict()
ast = p.parse_file(fn)
# print(ast)
d = m.transform(ast)
# print(d)
pp = PrettyPrinter(indent=0, newlinechar=" ", quote="'")
pp.pprint(d)
def main():
fns = [r"D:\Temp\large_map1.txt", r"D:\Temp\large_map2.txt"]
for fn in fns:
pr = cProfile.Profile()
pr.enable()
output(fn)
pr.disable()
pr.print_stats(sort='time')
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
main()
print("Done!")
|
Python
| 0
|
@@ -19,17 +19,67 @@
ort
-cProfile%0A
+os%0Aimport cProfile%0Aimport glob%0Aimport json%0Aimport mappyfile
%0Afro
@@ -202,16 +202,58 @@
eToDict%0A
+from mappyfile.validator import Validator%0A
%0A%0Adef ou
@@ -347,16 +347,37 @@
Parser(
+expand_includes=False
)%0A m
@@ -393,16 +393,36 @@
ToDict()
+%0A v = Validator()
%0A%0A as
@@ -492,96 +492,392 @@
-# print
+assert(v.validate
(d)
+)%0A
%0A
-pp = PrettyP
+output_file = fn + %22.map%22%0A%0A try:%0A mappyfile.utils.w
ri
-n
te
-r(indent=0, newlinechar=%22 %22, quote=%22'%22)%0A pp.pprint
+(d, output_file)%0A except Exception:%0A logging.warning(json.dumps(d, indent=4))%0A logging.warning(%22%25s could not be successfully re-written%22, fn)%0A raise%0A%0A # now try reading it again%0A ast = p.parse_file(output_file)%0A d = m.transform(ast)%0A%0A assert(v.validate
(d)
+)
%0A%0A%0Ad
@@ -895,62 +895,144 @@
-fns = %5Br%22D:%5CTemp%5Clarge_map1.txt%22, r%22D:%5CTemp%5Clarge_map2
+sample_dir = os.path.join(os.path.dirname(__file__), %22mapfiles%22)%0A mapfiles = glob.glob(sample_dir + '/*.txt')%0A # mapfiles = %5B%22map4
.txt
@@ -1053,12 +1053,101 @@
in
-fns:
+mapfiles:%0A print(%22Processing %7B%7D%22.format(fn))%0A fn = os.path.join(sample_dir, fn)
%0A
@@ -1238,24 +1238,26 @@
le()%0A
+ #
pr.print_st
|
2a816cbb29488861fe8897a6af9359db254018c1
|
Fix up test_paraboloid accuracy
|
tests/test_paraboloid.py
|
tests/test_paraboloid.py
|
import jtrace
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def test_properties():
import random
for i in range(100):
A = random.gauss(0.7, 0.8)
B = random.gauss(0.8, 1.2)
para = jtrace.Paraboloid(A, B)
assert para.A == A
assert para.B == B
def test_call():
import random
for i in range(100):
A = random.gauss(0.2, 0.3)
B = random.gauss(0.4, 0.2)
para = jtrace.Paraboloid(A, B)
for j in range(10):
x = random.gauss(0.0, 1.0)
y = random.gauss(0.0, 1.0)
assert isclose(para(x, y), A*(x*x + y*y)+B)
def test_intersect():
import random
for i in range(100):
A = random.gauss(0.2, 0.1)
B = random.gauss(0.4, 0.2)
para = jtrace.Paraboloid(A, B)
for j in range(10):
x = random.gauss(0.0, 1.0)
y = random.gauss(0.0, 1.0)
# If we shoot rays straight up, then it's easy to predict the
# intersection points.
r = jtrace.Ray(x, y, -1000, 0, 0, 1, 0)
isec = para.intersect(r)
assert isclose(isec.point.x, x)
assert isclose(isec.point.y, y)
assert isclose(isec.point.z, para(x, y))
# We can also check just for mutual consistency of the paraboloid,
# ray and intersection.
vx = random.gauss(0.0, 0.001)
vy = random.gauss(0.0, 0.001)
vz = 1.0
v = jtrace.Vec3(vx, vy, vz).UnitVec3()
r = jtrace.Ray(jtrace.Vec3(x, y, -1000), v, 0)
isec = para.intersect(r)
p1 = r(isec.t)
p2 = isec.point
assert isclose(p1.x, p2.x)
assert isclose(p1.y, p2.y)
assert isclose(p1.z, p2.z)
assert isclose(para(p1.x, p2.y), p1.z, abs_tol=1e-3)
|
Python
| 0.999279
|
@@ -781,21 +781,23 @@
gauss(0.
-2
+05
, 0.
+0
1)%0A
@@ -1632,18 +1632,16 @@
, y, -10
-00
), v, 0)
@@ -1912,11 +1912,99 @@
_tol=1e-
-3
+6)%0A%0A%0Aif __name__ == '__main__':%0A test_properties()%0A test_call()%0A test_intersect(
)%0A
|
5e642c912ff7be5424e78e3dfe356c9579a39320
|
fix typo in get_networks function
|
web_frontend/cloudscheduler/csv2/utils.py
|
web_frontend/cloudscheduler/csv2/utils.py
|
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
from sqlalchemy.ext.automap import automap_base
import config
'''
dev code
= db_session.query(Cloud).filter(Cloud.cloud_type=="openstack")
db_session.merge(new_flav)
db_session.commit()
'''
def get_quotas(filter=None):
engine = create_engine("mysql://" + config.db_user + ":" + config.db_password + "@" + config.db_host + ":" + str(config.db_port) + "/" + config.db_name)
Base.prepare(engine, reflect=True)
db_session = Session(engine)
Quota = Base.classes.cloud_quotas
quota_list = db_session.query(Quota)
return quota_list
def get_vms(filter=None):
engine = create_engine("mysql://" + config.db_user + ":" + config.db_password + "@" + config.db_host + ":" + str(config.db_port) + "/" + config.db_name)
Base.prepare(engine, reflect=True)
db_session = Session(engine)
VM = Base.classes.cloud_vm
vm_list = db_session.query(VM)
return vm_list
def get_flavors(filter=None):
engine = create_engine("mysql://" + config.db_user + ":" + config.db_password + "@" + config.db_host + ":" + str(config.db_port) + "/" + config.db_name)
Base.prepare(engine, reflect=True)
db_session = Session(engine)
Flavors = Base.classes.cloud_flavors
flavor_list = db_session.query(Flavors)
return flavor_list
def get_images(filter=None):
engine = create_engine("mysql://" + config.db_user + ":" + config.db_password + "@" + config.db_host + ":" + str(config.db_port) + "/" + config.db_name)
Base.prepare(engine, reflect=True)
db_session = Session(engine)
Images = Base.classes.cloud_images
image_list = db_session.query(Images)
return image_list
def get_networks(filter=None):
engine = create_engine("mysql://" + config.db_user + ":" + config.db_password + "@" + config.db_host + ":" + str(config.db_port) + "/" + config.db_name)
Base.prepare(engine, reflect=True)
db_session = Session(engine)
Networks = Base.classes.cloud_quotas
network_list = db_session.query(Networks)
return network_list
def get_groups(filter=None):
engine = create_engine("mysql://" + config.db_user + ":" + config.db_password + "@" + config.db_host + ":" + str(config.db_port) + "/" + config.db_name)
Base.prepare(engine, reflect=True)
db_session = Session(engine)
Groups = Base.classes.csv2_groups
group_list = db_session.query(Groups)
return group_list
# may be best to query the view instead of the resources table
def get_group_resources(filter=None):
engine = create_engine("mysql://" + config.db_user + ":" + config.db_password + "@" + config.db_host + ":" + str(config.db_port) + "/" + config.db_name)
Base.prepare(engine, reflect=True)
db_session = Session(engine)
GroupResources = Base.classes.csv2_group_resources
group_resources_list = db_session.query(GroupResources)
return group_resources_list
|
Python
| 0.000948
|
@@ -1981,29 +1981,31 @@
asses.cloud_
-quota
+network
s%0A networ
|
ddf311b4dc7c08f3f08516c702531053f8919720
|
Tidy imports
|
tests/test_validation.py
|
tests/test_validation.py
|
import json
from django.conf import settings
from django.test import TestCase, override_settings
from django_slack.exceptions import ChannelNotFound, MsgTooLong
from django_slack.backends import Backend
class TestOverride(TestCase):
def test_ok_result(self):
backend = Backend()
backend.validate('application/json', json.dumps({'ok': True}), {})
def test_msg_too_long_result(self):
# Arbitrarily chosen 'simple' error
backend = Backend()
with self.assertRaises(
MsgTooLong,
expected_regexp=r"MsgTooLong: msg_too_long",
):
backend.validate(
'application/json',
json.dumps({'ok': False, 'error': 'msg_too_long'}),
{},
)
def test_channel_not_found_result(self):
backend = Backend()
with self.assertRaises(
ChannelNotFound,
expected_regexp=r"ChannelNotFound: channel 'bad-channel' could not be found",
):
backend.validate(
'application/json',
json.dumps({'ok': False, 'error': 'channel_not_found'}),
{'channel': 'bad-channel'},
)
|
Python
| 0
|
@@ -9,40 +9,8 @@
son%0A
-from django.conf import settings
%0Afro
@@ -42,27 +42,8 @@
Case
-, override_settings
%0A%0Afr
|
1ee39cd3174b487038b62a3a6a66bac46571775a
|
Test that symlinks are properly created in bin_dir
|
tests/test_virtualenv.py
|
tests/test_virtualenv.py
|
import virtualenv
import optparse
from mock import patch, Mock
def test_version():
"""Should have a version string"""
assert virtualenv.virtualenv_version, "Should have version"
@patch('os.path.exists')
def test_resolve_interpreter_with_absolute_path(mock_exists):
"""Should return absolute path if given and exists"""
mock_exists.return_value = True
virtualenv.is_executable = Mock(return_value=True)
exe = virtualenv.resolve_interpreter("/usr/bin/python42")
assert exe == "/usr/bin/python42", "Absolute path should return as is"
mock_exists.assert_called_with("/usr/bin/python42")
virtualenv.is_executable.assert_called_with("/usr/bin/python42")
@patch('os.path.exists')
def test_resolve_intepreter_with_nonexistant_interpreter(mock_exists):
"""Should exit when with absolute path if not exists"""
mock_exists.return_value = False
try:
virtualenv.resolve_interpreter("/usr/bin/python42")
assert False, "Should raise exception"
except SystemExit:
pass
mock_exists.assert_called_with("/usr/bin/python42")
@patch('os.path.exists')
def test_resolve_intepreter_with_invalid_interpreter(mock_exists):
"""Should exit when with absolute path if not exists"""
mock_exists.return_value = True
virtualenv.is_executable = Mock(return_value=False)
try:
virtualenv.resolve_interpreter("/usr/bin/python42")
assert False, "Should raise exception"
except SystemExit:
pass
mock_exists.assert_called_with("/usr/bin/python42")
virtualenv.is_executable.assert_called_with("/usr/bin/python42")
def test_activate_after_future_statements():
"""Should insert activation line after last future statement"""
script = [
'#!/usr/bin/env python',
'from __future__ import with_statement',
'from __future__ import print_function',
'print("Hello, world!")'
]
assert virtualenv.relative_script(script) == [
'#!/usr/bin/env python',
'from __future__ import with_statement',
'from __future__ import print_function',
'',
"import os; activate_this=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'activate_this.py'); execfile(activate_this, dict(__file__=activate_this)); del os, activate_this",
'',
'print("Hello, world!")'
]
def test_cop_update_defaults_with_store_false():
"""store_false options need reverted logic"""
class MyConfigOptionParser(virtualenv.ConfigOptionParser):
def __init__(self, *args, **kwargs):
self.config = virtualenv.ConfigParser.RawConfigParser()
self.files = []
optparse.OptionParser.__init__(self, *args, **kwargs)
def get_environ_vars(self, prefix='VIRTUALENV_'):
yield ("no_site_packages", "1")
cop = MyConfigOptionParser()
cop.add_option(
'--no-site-packages',
dest='system_site_packages',
action='store_false',
help="Don't give access to the global site-packages dir to the "
"virtual environment (default)")
defaults = {}
cop.update_defaults(defaults)
assert defaults == {'system_site_packages': 0}
|
Python
| 0.000001
|
@@ -27,16 +27,67 @@
ptparse%0A
+import os%0Aimport shutil%0Aimport sys%0Aimport tempfile%0A
from moc
@@ -3249,8 +3249,952 @@
es': 0%7D%0A
+%0Adef test_install_python_symlinks():%0A %22%22%22Should create the right symlinks in bin_dir%22%22%22%0A tmp_virtualenv = tempfile.mkdtemp()%0A try:%0A home_dir, lib_dir, inc_dir, bin_dir = %5C%0A virtualenv.path_locations(tmp_virtualenv)%0A virtualenv.install_python(home_dir, lib_dir, inc_dir, bin_dir, False,%0A False)%0A%0A py_exe_no_version = 'python'%0A py_exe_version_major = 'python%25s' %25 sys.version_info%5B0%5D%0A py_exe_version_major_minor = 'python%25s.%25s' %25 (%0A sys.version_info%5B0%5D, sys.version_info%5B1%5D)%0A required_executables = %5B py_exe_no_version, py_exe_version_major,%0A py_exe_version_major_minor %5D%0A%0A for pth in required_executables:%0A assert os.path.exists(os.path.join(bin_dir, pth)), (%22%25s should %22%0A %22exist in bin_dir%22 %25 pth)%0A finally:%0A shutil.rmtree(tmp_virtualenv)%0A
|
0b1f6d7c5ff91bb6b1d0902c5cc5fcc1ab927c4e
|
Test that none on cli merged into the watchmaker config
|
tests/test_watchmaker.py
|
tests/test_watchmaker.py
|
# -*- coding: utf-8 -*-
"""Watchmaker main test module."""
from __future__ import (absolute_import, division, print_function,
unicode_literals, with_statement)
import os
import pytest
import yaml
import watchmaker
from watchmaker import static
@pytest.fixture
def watchmaker_arguments():
"""Return default watchmaker arguments."""
watchmaker_arguments = {
'salt_states': 'highstate',
'no_reboot': False,
'admin_groups': None,
'computer_name': None,
'admin_users': None,
'log_level':
'debug',
'ou_path': None,
'config_path': None,
'environment': None,
'extra_arguments': [],
'log_dir': u'/var/log/watchmaker'
}
return watchmaker_arguments
@pytest.fixture
def watchmaker_client(watchmaker_arguments):
"""Return watchmaker client with defaults."""
return watchmaker.Client(watchmaker_arguments)
@pytest.fixture
def default_config():
"""Return default configuration for watchmaker."""
config_path = os.path.join(static.__path__[0], 'config.yaml')
with open(config_path, 'r') as stream:
return yaml.safe_load(stream)
def test_main():
"""Placeholder for tests."""
assert watchmaker.__version__ == watchmaker.__version__
def test_none_arguments():
"""Check string 'None' conversion to None."""
raw_arguments = {
'admin_groups': 'None',
'admin_users': 'None',
'computer_name': 'None',
'salt_states': 'None',
'ou_path': 'None'
}
watchmaker_arguments = watchmaker.Arguments(**dict(**raw_arguments))
assert watchmaker_arguments.admin_groups is None
assert watchmaker_arguments.admin_users is None
assert watchmaker_arguments.computer_name is None
assert watchmaker_arguments.salt_states is None
assert watchmaker_arguments.ou_path is None
def test_argument_default_value():
"""Ensure argument default value is `Arguments.DEFAULT_VALUE`."""
raw_arguments = {}
check_val = watchmaker.Arguments.DEFAULT_VALUE
watchmaker_arguments = watchmaker.Arguments(**dict(**raw_arguments))
assert watchmaker_arguments.admin_groups == check_val
assert watchmaker_arguments.admin_users == check_val
assert watchmaker_arguments.computer_name == check_val
assert watchmaker_arguments.salt_states == check_val
assert watchmaker_arguments.ou_path == check_val
def test_extra_arguments_string():
"""Test string in extra_arguments loads correctly."""
# setup
raw_arguments = {
"extra_arguments": [
'--foo',
'bar'
]
}
check_val = {"foo": "bar"}
watchmaker_arguments = watchmaker.Arguments(**dict(**raw_arguments))
# test
watchmaker_client = watchmaker.Client(watchmaker_arguments)
# assertions
assert watchmaker_client.worker_args == check_val
def test_extra_arguments_equal_separator():
"""Test equal separator in extra_arguments loads correctly."""
# setup
raw_arguments = {
"extra_arguments": [
'--foo=bar',
]
}
check_val = {"foo": "bar"}
watchmaker_arguments = watchmaker.Arguments(**dict(**raw_arguments))
# test
watchmaker_client = watchmaker.Client(watchmaker_arguments)
# assertions
assert watchmaker_client.worker_args == check_val
def test_extra_arguments_quoted_string():
"""Test quoted string in extra_arguments loads correctly."""
# setup
raw_arguments = {
"extra_arguments": [
'--foo',
'"bar"'
]
}
check_val = {"foo": "bar"}
watchmaker_arguments = watchmaker.Arguments(**dict(**raw_arguments))
# test
watchmaker_client = watchmaker.Client(watchmaker_arguments)
# assertions
assert watchmaker_client.worker_args == check_val
def test_extra_arguments_list():
"""Test list in extra_arguments loads correctly."""
# setup
raw_arguments = {
"extra_arguments": [
'--foo',
'["bar"]'
]
}
check_val = {"foo": ["bar"]}
watchmaker_arguments = watchmaker.Arguments(**dict(**raw_arguments))
# test
watchmaker_client = watchmaker.Client(watchmaker_arguments)
# assertions
assert watchmaker_client.worker_args == check_val
def test_extra_arguments_map():
"""Test map in extra_arguments loads correctly."""
# setup
raw_arguments = {
"extra_arguments": [
'--user-formulas',
'{"foo-formula": "https://url"}'
]
}
check_val = {"user_formulas": {"foo-formula": "https://url"}}
watchmaker_arguments = watchmaker.Arguments(**dict(**raw_arguments))
# test
watchmaker_client = watchmaker.Client(watchmaker_arguments)
# assertions
assert watchmaker_client.worker_args == check_val
|
Python
| 0
|
@@ -1887,16 +1887,204 @@
s None%0A%0A
+ watchmaker_client = watchmaker.Client(watchmaker_arguments)%0A%0A assert 'salt_states' in watchmaker_client.worker_args%0A assert watchmaker_client.worker_args%5B'salt_states'%5D is None%0A%0A
%0Adef tes
|
50b6c9a9e55a22dc1893fcaf6f8800015992d41d
|
Make import more specific
|
iatidq/util.py
|
iatidq/util.py
|
# IATI Data Quality, tools for Data QA on IATI-formatted publications
# by Mark Brough, Martin Keegan, Ben Webb and Jennifer Smith
#
# Copyright (C) 2013 Publish What You Fund
#
# This programme is free software; you may redistribute and/or modify
# it under the terms of the GNU Affero General Public License v3.0
import contextlib
import os
import urllib2
import json
from flask import request, current_app
import traceback
import collections
download_headers = {'User-Agent': "PWYF/Aid Transparency Tracker"}
@contextlib.contextmanager
def report_error(success, failure):
try:
yield
if success is not None:
print success
except Exception, e:
if failure is not None:
print failure, e
#print traceback.print_exc()
finally:
pass
def ensure_download_dir(directory):
if not os.path.exists(directory):
with report_error(None, "Couldn't create directory"):
os.makedirs(directory)
def download_file(url, path):
with file(path, 'w') as localFile:
req = urllib2.Request(url, headers=download_headers)
webFile = urllib2.urlopen(req)
localFile.write(webFile.read())
webFile.close()
class JSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
def jsonify(*args, **kwargs):
return current_app.response_class(json.dumps(dict(*args, **kwargs),
indent=None if request.is_xhr else 2, cls=JSONEncoder),
mimetype='application/json')
def resort_sqlalchemy_indicator(data):
resort_fn = lambda x, y: cmp(x[1]['indicator']["indicator_order"],
y[1]['indicator']["indicator_order"])
new = sorted(data.items(),
cmp=resort_fn)
return collections.OrderedDict(new)
def resort_dict_indicator(data):
resort_fn = lambda x, y: cmp(x[1]['indicator']['indicator_order'],
y[1]['indicator']['indicator_order'])
new = sorted(data.items(),
cmp=resort_fn)
return collections.OrderedDict(new)
def resort_indicator_tests(data):
resort_fn = lambda x, y: cmp(x[1]["indicator_order"],
y[1]["indicator_order"])
new = sorted(data.items(),
cmp=resort_fn)
return collections.OrderedDict(new)
|
Python
| 0
|
@@ -317,16 +317,32 @@
e v3.0%0A%0A
+from contextlib
import c
@@ -347,19 +347,23 @@
context
-lib
+manager
%0Aimport
@@ -541,19 +541,8 @@
%7D%0A%0A@
-contextlib.
cont
|
a8389e913b417dc37e23f9cfc1f52ab63802c8a4
|
movie title encode to support multiple language
|
demo/indexMlTmdb.py
|
demo/indexMlTmdb.py
|
import json
def enrich(movie):
""" Enrich for search purposes """
if 'title' in movie:
movie['title_sent'] = 'SENTINEL_BEGIN ' + movie['title']
def reindex(es, analysisSettings={}, mappingSettings={}, movieDict={}, index='tmdb'):
import elasticsearch.helpers
settings = {
"settings": {
"number_of_shards": 1,
"index": {
"analysis" : analysisSettings,
}}}
if mappingSettings:
settings['mappings'] = mappingSettings #C
es.indices.delete(index, ignore=[400, 404])
es.indices.create(index, body=settings)
def bulkDocs(movieDict):
for id, movie in movieDict.items():
if 'release_date' in movie and movie['release_date'] == "":
del movie['release_date']
enrich(movie)
addCmd = {"_index": index, #E
"_type": "movie",
"_id": id,
"_source": movie}
yield addCmd
if 'title' in movie:
print("%s added to %s" % (movie['title'], index))
elasticsearch.helpers.bulk(es, bulkDocs(movieDict))
if __name__ == "__main__":
from utils import Elasticsearch
from sys import argv
es = Elasticsearch(timeout=30)
movieDict = json.loads(open('tmdb.json').read())
reindex(es, movieDict=movieDict)
|
Python
| 0.999999
|
@@ -1085,16 +1085,32 @@
'title'%5D
+.encode('utf-8')
, index)
|
351cbae9cd3695002e613162981c13a06a19d803
|
Handle contents files in iso-8859-1 encoding
|
dep11/iconfinder.py
|
dep11/iconfinder.py
|
#!/usr/bin/env python
#
# Copyright (c) 2014-2015 Matthias Klumpp <mak@debian.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program.
import os
import gzip
import re
from dep11.component import IconSize
from dep11.utils import read_packages_dict_from_file
class AbstractIconFinder:
'''
An icon-finder finds an icon in the archive, if it has not yet
been found in the analyzed package already.
AbstractIconFinder is a dummy class, not implementing the
methods needed to find an icon.
'''
def __init__(self, suite_name, archive_component):
pass
def find_icons(self, pkgname, icon_str, icon_sizes, binid=-1):
return None
def set_allowed_icon_extensions(self, exts):
pass
class ContentsListIconFinder(AbstractIconFinder):
'''
An implementation of an IconFinder, using a Contents-<arch>.gz file
present in Debian archive mirrors to find icons.
'''
def __init__(self, suite_name, archive_component, arch_name, archive_mirror_dir, pkgdict=None):
self._suite_name = suite_name
self._component = archive_component
self._mirror_dir = archive_mirror_dir
contents_basename = "Contents-%s.gz" % (arch_name)
contents_fname = os.path.join(archive_mirror_dir, "dists", suite_name, archive_component, contents_basename)
# Ubuntu does not place the Contents file in a component-specific directory,
# so fall back to the global one.
if not os.path.isfile(contents_fname):
path = os.path.join(archive_mirror_dir, "dists", suite_name, contents_basename)
if os.path.isfile(path):
contents_fname = path
# load and preprocess insanely large file.
# we don't show mercy to memory here, we just want this to be fast.
self._contents_data = list()
f = gzip.open(contents_fname, 'r')
for line in f:
line = str(line, 'utf-8')
if line.startswith("usr/share/icons/hicolor/") or line.startswith("usr/share/pixmaps/"):
self._contents_data.append(line)
f.close()
self._packages_dict = pkgdict
if not self._packages_dict:
self._packages_dict = read_packages_dict_from_file(archive_mirror_dir, suite_name, archive_component, arch_name)
def _query_icon(self, size, icon):
'''
Find icon files in the archive which match a size.
'''
if not self._contents_data:
return None
valid = None
if size:
valid = re.compile('^usr/share/icons/hicolor/' + size + '/.*' + icon + '[\.png|\.svg|\.svgz]')
else:
valid = re.compile('^usr/share/pixmaps/' + icon + '.png')
res = list()
for line in self._contents_data:
if valid.match(line):
res.append(line)
for line in res:
line = line.strip(' \t\n\r')
if not " " in line:
continue
parts = line.split(" ", 1)
path = parts[0].strip()
group_pkg = parts[1].strip()
if not "/" in group_pkg:
continue
pkgname = group_pkg.split("/", 1)[1].strip()
pkg = self._packages_dict.get(pkgname)
if not pkg:
continue
deb_fname = os.path.join(self._mirror_dir, pkg['filename'])
return {'icon_fname': path, 'deb_fname': deb_fname}
return None
def find_icons(self, package, icon, sizes, binid):
'''
Tries to find the best possible icon available
'''
size_map_flist = dict()
for size in sizes:
flist = self._query_icon(str(size), icon)
if flist:
size_map_flist[size] = flist
if not IconSize(64) in size_map_flist:
# see if we can find a scalable vector graphic as icon
# we assume "64x64" as size here, and resize the vector
# graphic later.
flist = self._query_icon("scalable", icon)
if flist:
size_map_flist[IconSize(64)] = flist
else:
# some software doesn't store icons in sized XDG directories.
# catch these here, and assume that the size is 64x64
flist = self._query_icon(None, icon)
if flist:
size_map_flist[IconSize(64)] = flist
return size_map_flist
def set_allowed_icon_extensions(self, exts):
self._allowed_exts = exts
|
Python
| 0
|
@@ -1291,16 +1291,144 @@
pass%0A%0A%0A
+def _decode_contents_line(line):%0A try:%0A return str(line, 'utf-8')%0A except:%0A return str(line, 'iso-8859-1')%0A%0A
class Co
@@ -2608,33 +2608,42 @@
line =
-str(line, 'utf-8'
+_decode_contents_line(line
)%0A
|
e4c92b7d8cdd808b2415c2edf11576a87264f7f3
|
Remove context_stack_on_request_context()
|
frasco/ctx.py
|
frasco/ctx.py
|
from flask import has_request_context, _request_ctx_stack
from frasco.utils import unknown_value
from werkzeug.local import LocalProxy, LocalStack
from contextlib import contextmanager
import functools
class ContextStack(LocalStack):
def __init__(self, top=None, default_item=None, allow_nested=True, ignore_nested=False):
super(ContextStack, self).__init__()
self.default_top = top
self.default_item = default_item
self.allow_nested = allow_nested
self.ignore_nested = ignore_nested
@property
def stack(self):
return getattr(self._local, 'stack', None) or []
@property
def is_stacked(self):
return bool(self.stack)
def push(self, item=unknown_value):
if self.is_stacked and not self.allow_nested:
raise RuntimeError('Context does not support nesting')
if self.is_stacked and self.ignore_nested:
item = self.top
elif item is unknown_value:
if callable(self.default_item):
item = self.default_item()
else:
item = self.default_item
super(ContextStack, self).push(item)
return item
def replace(self, item):
stack = self.stack
if stack:
stack.pop()
stack.append(item)
return item
@property
def top(self):
try:
return self._local.stack[-1]
except (AttributeError, IndexError):
return self.default_top
@contextmanager
def ctx(self, item=unknown_value, **kwargs):
item = self.push(item, **kwargs)
try:
yield item
finally:
self.pop()
def __call__(self, *args, **kwargs):
return self.ctx(*args, **kwargs)
def make_proxy(self):
return super(ContextStack, self).__call__()
def context_stack_on_request_context(name, cls=ContextStack):
def _get_object():
if has_request_context() and not hasattr(_request_ctx_stack.top, name):
setattr(_request_ctx_stack.top, name, cls())
return getattr(_request_ctx_stack.top, name, None)
return LocalProxy(_get_object)
delayed_result = object()
class DelayedCallsContext(ContextStack):
def __init__(self):
super(DelayedCallsContext, self).__init__(default_item=list, ignore_nested=True)
def call(self, func, args, kwargs):
if self.top is not None:
self.top.append((func, args, kwargs))
return delayed_result
return func(*args, **kwargs)
def pop(self, drop_calls=False):
top = super(DelayedCallsContext, self).pop()
if not drop_calls and not self.is_stacked:
for func, args, kwargs in top:
func(*args, **kwargs)
def proxy(self, func):
@functools.wraps(func)
def proxy(*args, **kwargs):
return self.call(func, args, kwargs)
proxy.call_now = func
return proxy
class FlagContextStack(ContextStack):
def __init__(self, flag=False):
super(FlagContextStack, self).__init__(flag, not flag)
self.once_stack = ContextStack()
def push(self, item=unknown_value, once=False):
self.once_stack.push(once)
return super(FlagContextStack, self).push(item)
def pop(self):
self.once_stack.pop()
return super(FlagContextStack, self).pop()
def once(self, value=unknown_value):
return self.ctx(unknown_value, once=True)
def consume_once(self):
top = self.top
if self.once_stack.top:
self.once_stack.replace(False)
self.replace(self.stack[-2] if len(self.stack) > 1 else self.default_top)
return top
def once_consumer(self, func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
self.consume_once()
return func(*args, **kwargs)
return wrapper
def active(self):
if self.once_stack.top:
return self.consume_once()
return self.top
|
Python
| 0.000001
|
@@ -1851,326 +1851,8 @@
)%0A%0A%0A
-def context_stack_on_request_context(name, cls=ContextStack):%0A def _get_object():%0A if has_request_context() and not hasattr(_request_ctx_stack.top, name):%0A setattr(_request_ctx_stack.top, name, cls())%0A return getattr(_request_ctx_stack.top, name, None)%0A return LocalProxy(_get_object)%0A%0A%0A
dela
|
1c116355e91ebed668620f8f84d9d4331de4adab
|
include first 3 sentences only
|
cogs/wiki.py
|
cogs/wiki.py
|
import discord
from discord.ext import commands
from bs4 import BeautifulSoup
from urllib.parse import quote_plus
from dateutil.parser import isoparse
from utils import aiohttp_wrap as aw
class Wiki(commands.Cog):
SUMMARY_URI = "https://en.wikipedia.org/api/rest_v1/page/summary/{}?redirect=true"
SEARCH_URI = "http://en.wikipedia.org/w/api.php?action=opensearch&format=json&search={}&limit=1&namespace=0"
HEADERS = {
"user-agent": "qtbot/1.0 - A friendly discord bot (https://github.com/Naught0/qtbot)"
}
def __init__(self, bot):
self.bot = bot
self.session = bot.aio_session
@commands.command(name="wiki", aliases=["wi"])
async def wiki_search(self, ctx, *, query=None):
""" Get the closest matching Wikipedia article for a given query """
formatted_query = quote_plus(query)
# Get wiki page
wiki_info = await aw.aio_get_json(
self.session,
self.SEARCH_URI.format(formatted_query),
headers=self.HEADERS,
)
# No result found
if not wiki_info[1]:
return await ctx.error(f"Sorry, I couldn't find anything for `{query}`.")
# Get summary
article_title = quote_plus(wiki_info[1][0].replace(" ", "_"), safe="_")
article_summary = await aw.aio_get_json(
self.session, self.SUMMARY_URI.format(article_title), headers=self.HEADERS
)
# Get wiki image
article_html = await aw.aio_get_text(
self.session, article_summary["content_urls"]["desktop"]["page"]
)
soup = BeautifulSoup(article_html)
article_image = soup.head.find(attrs={"property": "og:image"})
# Create embed
em = discord.Embed(
title=article_summary["titles"]["display"], color=discord.Color.blurple()
)
em.description = article_summary["extract"]
em.url = article_summary["content_urls"]["desktop"]["page"]
em.set_thumbnail(
url="https://lh5.ggpht.com/1Erjb8gyF0RCc9uhnlfUdbU603IgMm-G-Y3aJuFcfQpno0N4HQIVkTZERCTo65Iz2II=w300"
if article_image is None
else article_image.attrs["content"]
)
em.set_footer(text="last edited")
em.timestamp = isoparse(article_summary["timestamp"])
await ctx.send(embed=em)
def setup(bot):
bot.add_cog(Wiki(bot))
|
Python
| 0.000072
|
@@ -1871,16 +1871,25 @@
ption =
+' '.join(
article_
@@ -1906,16 +1906,33 @@
xtract%22%5D
+.split('. ')%5B:3%5D)
%0A
|
4f8429e9cd17f207ef429bdf21508cfac4200c4c
|
improve display
|
examples/admin.py
|
examples/admin.py
|
# -*- coding: utf-8 -*-
#
# django-granadilla
# Copyright (C) 2009 Bolloré telecom
# See AUTHORS file for a full list of contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.contrib import admin
from examples.models import LdapGroup, LdapUser
class LdapGroupAdmin(admin.ModelAdmin):
exclude = ['dn', 'usernames']
list_display = ['name', 'gid']
search_fields = ['name']
class LdapUserAdmin(admin.ModelAdmin):
exclude = ['dn', 'password', 'photo']
list_display = ['username', 'uid']
search_fields = ['first_name', 'last_name', 'full_name', 'username']
admin.site.register(LdapGroup, LdapGroupAdmin)
admin.site.register(LdapUser, LdapUserAdmin)
|
Python
| 0.000001
|
@@ -1104,16 +1104,52 @@
ername',
+ 'first_name', 'last_name', 'email',
'uid'%5D%0A
|
f978372d8345bd0ab1c9dc1a9739d0d5852a1780
|
add FIXMEs / TODOs
|
exportiphoto.py
|
exportiphoto.py
|
#!/usr/bin/env python
__version__ = "0.5"
import datetime
import os
import re
import shutil
import stat
import sys
from optparse import OptionParser
from xml.dom.minidom import parse, Node
def main(albumDataXml, targetDir, copyImg=True, useEvents=True):
print "Parsing..."
try:
albumDataDom = parse(albumDataXml)
except IOError, why:
return error("Can't parse Album Data: %s" % why[1])
topMostDict = albumDataDom.documentElement.getElementsByTagName('dict')[0]
if not topMostDict:
return error("Album Data doesn't appear to be in the right format.")
masterImageListDict = getValue(topMostDict, "Master Image List")
if useEvents:
targetLists = getValue(topMostDict, "List of Rolls")
useDate = True
else:
targetLists = getValue(topMostDict, "List of Albums")
useDate = False
# walk through all the rolls (events) / albums
for folderDict in findChildren(targetLists, 'dict'):
if useEvents:
folderName = getElementText(getValue(folderDict, "RollName"))
print "\n\nProcessing Roll: %s" % (folderName)
else:
folderName = getElementText(getValue(folderDict, "AlbumName"))
if folderName == 'Photos':
continue
print "\n\nProcessing Album: %s" % (folderName)
if useDate:
appleTime = getElementText(
getValue(folderDict, "RollDateAsTimerInterval")
)
rollTime = getAppleTime(appleTime)
date = '%(year)d-%(month)02d-%(day)02d' % {
'year': rollTime.year,
'month': rollTime.month,
'day': rollTime.day
}
else:
date = ''
#walk through all the images in this roll/event/album
imageIdArray = getValue(folderDict, "KeyList")
for imageIdElement in findChildren(imageIdArray, 'string'):
imageId = getElementText(imageIdElement)
imageDict = getValue(masterImageListDict, imageId)
mFilePath = getElementText(getValue(imageDict, "ImagePath"))
# oFilePath = getElementText(getValue(imageDict, "OriginalPath"))
mStat = os.stat(mFilePath)
basename = os.path.basename(mFilePath)
if useDate and re.match(
"[A-Z][a-z]{2} [0-9]{1,2}, [0-9]{4}", folderName
):
outputPath = date
elif useDate:
outputPath = date + " " + folderName
else:
outputPath = folderName
targetFileDir = targetDir + "/" + outputPath
if not os.path.exists(targetFileDir):
print "Creating directory: %s" % targetFileDir
if copyImg:
try:
os.makedirs(targetFileDir)
except OSError, why:
error("Can't create directory: %s" % why[1])
tFilePath = targetFileDir + "/" + basename
# skip unchanged files
if os.path.exists(tFilePath):
tStat = os.stat(tFilePath)
if abs(tStat[stat.ST_MTIME] - mStat[stat.ST_MTIME]) <= 10 or \
tStat[stat.ST_SIZE] == mStat[stat.ST_SIZE]:
sys.stdout.write(".")
continue
msg = "copying from:%s to:%s" % (mFilePath, tFilePath)
if copyImg:
print msg
shutil.copy2(mFilePath, tFilePath)
else:
print "test - %s" % (msg)
albumDataDom.unlink()
def findChildren(parent, name):
result = []
for child in parent.childNodes:
if child.nodeName == name:
result.append(child)
return result
def getElementText(element):
if element is None: return None
if len(element.childNodes) == 0:
return None
else:
return element.childNodes[0].nodeValue
def getValue(parent, keyName):
for key in findChildren(parent, "key"):
if getElementText(key) == keyName:
sib = key.nextSibling
while(sib is not None and sib.nodeType != Node.ELEMENT_NODE):
sib = sib.nextSibling
return sib
error("Can't find %s in Album Data." % keyName)
APPLE_BASE = 978307200 # 2001/1/1
def getAppleTime(value):
"Converts a numeric Apple time stamp into a date and time"
return datetime.datetime.fromtimestamp(APPLE_BASE + float(value))
def error(msg):
sys.stderr.write("ERROR: " + msg + "\n")
sys.exit(1)
if __name__ == '__main__':
usage = """Usage: %prog [options] <AlbumData.xml> <destination dir>"""
version = """exportiphoto version %s""" % __version__
option_parser = OptionParser(usage=usage, version=version)
option_parser.set_defaults(test=False, albums=False)
option_parser.add_option("-t", "--test",
action="store_true", dest="test",
help="don't copy images; dry run"
)
option_parser.add_option("-a", "--albums",
action="store_true", dest="albums",
help="use albums instead of events"
)
(options, args) = option_parser.parse_args()
if len(args) != 2:
option_parser.error(
"Please specify an iPhoto library and a destination."
)
main(args[0], args[1], not options.test, not options.albums)
|
Python
| 0
|
@@ -187,16 +187,78 @@
, Node%0A%0A
+# FIXME: use SAX so we don't have to load XML all into memory%0A
%0Adef mai
@@ -1818,17 +1818,18 @@
#
-w
+ W
alk thro
@@ -3120,17 +3120,17 @@
#
-s
+S
kip unch
@@ -3522,32 +3522,32 @@
if copyImg:%0A
-
@@ -3556,16 +3556,81 @@
int msg%0A
+ # TODO: try findertools.copy and macostools.copy%0A
|
cbc8632a74f32415b2819b678340b6e4f0944dba
|
Use build_context factory
|
tests/unit/tools/list.py
|
tests/unit/tools/list.py
|
# encoding: UTF-8
import unittest
from tml.tools.list import List
from tml.tools.template import Template
from tests.mock import Client
from tml import Context
class list(unittest.TestCase):
def setUp(self):
self.context = Context(client = Client.read_all(), locale = 'ru')
def test_render(self):
self.assertEquals('a, b, c', List(['a','b','c']).render(self.context), 'Just list')
self.assertEquals('a;b;c', List(['a','b','c'], separator = ';').render(self.context), 'Custom separator')
self.assertEquals('a, b and c', List(['a','b','c'], last_separator = 'and').render(self.context), 'Last separator')
self.assertEquals('a, b', List(['a','b','c'], limit = 2).render(self.context), 'Limit')
self.assertEquals('a and b', List(['a','b','c'], limit = 2, last_separator = 'and').render(self.context), 'Limit')
self.assertEquals('a', List(['a'], limit = 2, last_separator = 'and').render(self.context), 'One element')
def test_tpl(self):
list = List(['a','b','c'], tpl = Template('<b>{$0}</b>'))
self.assertEquals('<b>a</b>, <b>b</b>, <b>c</b>', list.render(self.context), 'Apply template')
list = List([{'name':'Вася','gender':'male'},{'name':'Андрей','gender':'male'},{'name':'Семен','gender':'male'}], tpl = Template('{$0::dat}'), last_separator = u'и')
self.assertEquals(u'Васе, Андрею и Семену', list.render(self.context), 'Apply context')
if __name__ == '__main__':
unittest.main()
|
Python
| 0.000001
|
@@ -145,17 +145,23 @@
import
-C
+build_c
ontext%0A%0A
@@ -167,18 +167,22 @@
%0A%0Aclass
-li
+ListTe
st(unitt
@@ -241,17 +241,23 @@
ntext =
-C
+build_c
ontext(c
|
c6c79df5dc0cae569e24e86e92a44421d855c815
|
Remove some unused functions
|
entropy/engine.py
|
entropy/engine.py
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2013 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from concurrent.futures import ThreadPoolExecutor
import datetime
import logging
import os
import croniter
import pause
from entropy import utils
LOG = logging.getLogger(__name__)
class Engine(object):
def __init__(self, name, **cfg_data):
# constants
# TODO(praneshp): Hardcode for now, could/should be cmdline input
self.max_workers = 8
self.audit_type = 'audit'
self.repair_type = 'repair'
# engine variables
self.name = name
self.audit_cfg = cfg_data['audit_cfg']
self.repair_cfg = cfg_data['repair_cfg']
# TODO(praneshp): Assuming cfg files are in 1 dir. Change later
self.cfg_dir = os.path.dirname(self.audit_cfg)
self.log_file = cfg_data['log_file']
self.executor = ThreadPoolExecutor(max_workers=self.max_workers)
self.running_audits = []
self.running_repairs = []
self.futures = []
LOG.info('Creating engine obj')
self.start_scheduler()
def start_scheduler(self):
# Start watchdog thread, which will detect any new audit/react scripts
# TODO(praneshp): Look into how to do this with threadpoolexecutor?
watchdog_thread = self.start_watchdog(self.cfg_dir) # noqa
# Start react and audit scripts.
self.futures.append(self.start_scripts('repair'))
self.futures.append(self.start_scripts('audit'))
watchdog_thread.join()
def register_audit(self):
pass
def register_repair(self):
pass
# TODO(praneshp): For now, only addition of scripts. Take care of
# deletion later
def audit_modified(self):
LOG.warning('Audit configuration changed')
self.futures.append(self.start_scripts('audit'))
def repair_modified(self):
LOG.warning('Repair configuration changed')
self.futures.append(self.start_scripts('repair'))
def start_watchdog(self, dir_to_watch):
event_fn = {self.audit_cfg: self.audit_modified,
self.repair_cfg: self.repair_modified}
LOG.info(event_fn)
return utils.watch_dir_for_change(dir_to_watch, event_fn)
def start_scripts(self, script_type):
if script_type == 'audit':
running_scripts = self.running_audits
setup_func = self.setup_audit
cfg = self.audit_cfg
elif script_type == 'repair':
running_scripts = self.running_repairs
setup_func = self.setup_react
cfg = self.repair_cfg
else:
LOG.error('Unknown script type %s', script_type)
return
scripts = utils.load_yaml(cfg)
futures = []
for script in scripts:
if script['name'] not in running_scripts:
futures.append(setup_func(script))
LOG.warning('Running %s scripts %s', script_type,
', '.join(running_scripts))
return futures
def setup_react(self, script):
LOG.warning('Setting up reactor %s', script['name'])
# Pick out relevant info
data = dict(utils.load_yaml(script['conf']).next())
react_script = data['script']
available_modules = utils.find_module(react_script, ['repair'])
LOG.info('Found these modules: %s', available_modules)
if not available_modules:
LOG.error('No module to load')
else:
imported_module = utils.import_module(available_modules[0])
kwargs = data
kwargs['conf'] = script['conf']
# add this job to list of running audits
self.running_repairs.append(script['name'])
future = self.executor.submit(imported_module.main, **kwargs)
return future
def setup_audit(self, script):
LOG.warning('Setting up audit script %s', script['name'])
# Now pick out relevant info
data = dict(utils.load_yaml(script['conf']).next())
# stuff for the message queue
mq_args = {'mq_host': data['mq_host'],
'mq_port': data['mq_port'],
'mq_user': data['mq_user'],
'mq_password': data['mq_password']}
# general stuff for the audit module
# TODO(praneshp): later, fix to send only one copy of mq_args
kwargs = data
kwargs['mq_args'] = mq_args
# add this job to list of running audits
self.running_audits.append(script['name'])
# start a process for this audit script
future = self.executor.submit(self.start_audit, **kwargs)
return future
def start_audit(self, **kwargs):
LOG.info("Starting audit for %s", kwargs['name'])
now = datetime.datetime.now()
schedule = kwargs['schedule']
cron = croniter.croniter(schedule, now)
next_iteration = cron.get_next(datetime.datetime)
while True:
LOG.warning('Next call at %s', next_iteration)
pause.until(next_iteration)
Engine.run_audit(**kwargs)
next_iteration = cron.get_next(datetime.datetime)
@staticmethod
def run_audit(**kwargs):
# Put a message on the mq
#TODO(praneshp): this should be the path with register-audit
#TODO(praneshp): The whole logic in this function should be in
# try except blocks
available_modules = utils.find_module(kwargs['module'], ['audit'])
LOG.info('Found these modules: %s', available_modules)
if not available_modules:
LOG.error('No module to load')
else:
imported_module = utils.import_module(available_modules[0])
audit_obj = imported_module.Audit()
try:
audit_obj.send_message(**kwargs)
except Exception as e:
LOG.error(e)
|
Python
| 0.000024
|
@@ -2106,97 +2106,8 @@
()%0A%0A
- def register_audit(self):%0A pass%0A%0A def register_repair(self):%0A pass%0A%0A
@@ -2193,17 +2193,16 @@
n later%0A
-%0A
def
|
d75fc1e49076aeb17a4d956fd02e091f195aabf3
|
Remove extra bracket for indexing causing error
|
example/initialize.py
|
example/initialize.py
|
#!/usr/bin/env python
"""An example script to initialize audio lists and speaker configurations.
Usage: initialize.py SOURCE TARGET SAMPLING_RATE
Options:
-h, --help Show the help
"""
from __future__ import division # , unicode_literals
from __future__ import absolute_import, print_function
import os
import shutil
import sys
import six
from docopt import docopt
sys.path.append(os.path.join(os.path.dirname(__file__), "src")) # isort:skip
from src import initialize_speaker # isort:skip # pylint: disable=C0413
def create_configure(dest, base, exist_ok=False):
"""Creates a configuration file based on a template file.
Parameters
----------
dest : str or path-like
The path of the configuration file you are creating.
base : str or path-like
The path of the template configure file.
exist_ok : bool
If `False`, this function throws `IOError` (Python 2.7) or `FileExistsError` (Python 3 or later) when `dest` is already created.
Raises
------
IOError (Python 2.7) or FileExistsError (Python 3 or later)
If `exist_ok` is `False` and `dest` is already exists.
You can catch both of them by:
>>> except IOError:
"""
if not exist_ok and os.path.exists(dest):
raise (IOError if six.PY2 else FileExistsError)(
"The configuration file {} already exists.".format(dest))
print("Generate {}".format(dest), file=sys.stderr)
shutil.copy(base, dest)
def create_list(dest, wav_dir, exist_ok=False):
"""Create an audio list file based on a template.
Parameters
----------
dest : str or path-like
The path of the list file you are creating.
wav_dir : str or path-like
The path of the directory of audio files.abs
exist_ok : bool
If `False`, this function throws `IOError` (Python 2.7) or `FileExistsError` (Python 3 or later) when `dest` is already created.
Raises
------
IOError (Python 2.7) or FileExistsError (Python 3 or later)
If `exist_ok` is `False` and `dest` is already exists.
You can catch both of them by:
>>> except IOError:
"""
if not exist_ok and os.path.exists(dest):
raise (IOError if six.PY2 else FileExistsError)(
"The list file {} already exists.".format(dest))
print("Generate {}".format(dest))
speaker_label = os.path.basename(dest)
lines = (os.path.join(speaker_label, wav_file_name) for wav_file_name in os.listdir(
wav_dir) if os.path.splitext(wav_file_name)[1] == ".wav")
with open(dest, "w") as file_handler:
for line in lines:
print(line, file=file_handler)
LIST_EXTENSION = ".list"
USES = ("train", "eval")
LIST_SUFFIXES = {
use: "_" + use + LIST_EXTENSION for use in USES}
YML_EXTENSION = ".yml"
EXAMPLE_ROOT_DIR = os.path.dirname(__file__)
CONF_DIR = os.path.join(EXAMPLE_ROOT_DIR, "conf")
DATA_DIR = os.path.join(EXAMPLE_ROOT_DIR, "data")
LIST_DIR = os.path.join(EXAMPLE_ROOT_DIR, "list")
WAV_DIR = os.path.join(DATA_DIR, "wav")
if __name__ == "__main__":
args = docopt(__doc__) # pylint: disable=invalid-name
LABELS = {label: args[label.upper()] for label in ("source", "target")}
SOURCE_TARGET_PAIR = LABELS["source"] + "-" + LABELS["target"]
PAIR_DIR = os.path.join(DATA_DIR, "pair",
SOURCE_TARGET_PAIR)
LIST_FILES = {
speaker_part: {
use: os.path.join(LIST_DIR, speaker_label + LIST_SUFFIXES[use])
for use in USES}
for speaker_part, speaker_label in LABELS.items()}
SPEAKER_CONF_FILES = {
part: os.path.join(
CONF_DIR, "speaker", label + YML_EXTENSION)
for part, label in LABELS.items()}
PAIR_CONF_FILE = os.path.join(
CONF_DIR, "pair", SOURCE_TARGET_PAIR + YML_EXTENSION)
SAMPLING_RATE = args["SAMPLING_RATE"]
print("""\
##############################################################
### 1. create initial list files ###
##############################################################""")
# create list files for both the speakers
for use in USES:
for part, speaker in LABELS.items():
create_list(LIST_FILES[part][use], os.path.join(WAV_DIR, speaker))
print("# Please modify train and eval list files, if you want. #")
print("""\
##############################################################
### 2. create configure files ###
##############################################################""")
# create speaker-dependent configure file
for part, speaker in LABELS.items():
create_configure(
SPEAKER_CONF_FILES[part][use],
os.path.join(
CONF_DIR, "default",
"speaker_default_{}{}".format(
SAMPLING_RATE, YML_EXTENSION)))
# create pair-dependent configure file
create_configure(PAIR_CONF_FILE, os.path.join(
CONF_DIR, "default", "pair_default.yml"))
print("""\
##############################################################
### 3. create figures to define F0 range ###
##############################################################""")
# get F0 range in each speaker
for part, speaker in LABELS.items():
initialize_speaker.main(
speaker, LIST_FILES[part]["train"], WAV_DIR, os.path.join(CONF_DIR, "figure"))
print("# Please modify f0 range values in speaker-dependent YAML files based on the figure #")
|
Python
| 0
|
@@ -4698,21 +4698,16 @@
ES%5Bpart%5D
-%5Buse%5D
,%0A
|
dbce79102efa8fee233af95939f1ff0b9d060b00
|
Update example workflow to show you can use classes
|
examples/basic.py
|
examples/basic.py
|
import time
from simpleflow import (
activity,
Workflow,
futures,
)
@activity.with_attributes(task_list='quickstart', version='example')
def increment(x):
return x + 1
@activity.with_attributes(task_list='quickstart', version='example')
def double(x):
return x * 2
@activity.with_attributes(task_list='quickstart', version='example')
def delay(t, x):
time.sleep(t)
return x
class BasicWorkflow(Workflow):
name = 'basic'
version = 'example'
task_list = 'example'
def run(self, x, t=30):
y = self.submit(increment, x)
yy = self.submit(delay, t, y)
z = self.submit(double, y)
print '({x} + 1) * 2 = {result}'.format(
x=x,
result=z.result)
futures.wait(yy, z)
return z.result
|
Python
| 0
|
@@ -284,16 +284,153 @@
x * 2%0A%0A
+# A simpleflow activity can be any callable, so a function works, but a class%0A# will also work given the processing happens in __init__()
%0A@activi
@@ -495,25 +495,63 @@
e')%0A
-def delay(
+class Delay(object):%0A def __init__(self,
t, x):%0A
+
@@ -564,16 +564,20 @@
leep(t)%0A
+
retu
@@ -775,17 +775,17 @@
.submit(
-d
+D
elay, t,
|
9308152c67bc2ad2150a76e7897c8fd2568bf590
|
Bump version: 0.0.4 -> 0.0.5
|
conanfile.py
|
conanfile.py
|
from conans import ConanFile
from conans.tools import download, unzip
import os
VERSION = "0.0.4"
class IWYUCTargetCmakeConan(ConanFile):
name = "iwyu-target-cmake"
version = os.environ.get("CONAN_VERSION_OVERRIDE", VERSION)
generators = "cmake"
requires = ("cmake-include-guard/master@smspillaz/cmake-include-guard",
"tooling-find-pkg-util/master@smspillaz/tooling-find-pkg-util",
"tooling-cmake-util/master@smspillaz/tooling-cmake-util")
url = "http://github.com/polysquare/iwyu-target-cmake"
license = "MIT"
options = {
"dev": [True, False]
}
default_options = "dev=False"
def requirements(self):
if self.options.dev:
self.requires("cmake-module-common/master@smspillaz/cmake-module-common")
def source(self):
zip_name = "iwyu-target-cmake.zip"
download("https://github.com/polysquare/"
"iwyu-target-cmake/archive/{version}.zip"
"".format(version="v" + VERSION),
zip_name)
unzip(zip_name)
os.unlink(zip_name)
def package(self):
self.copy(pattern="Find*.cmake",
dst="",
src="iwyu-target-cmake-" + VERSION,
keep_path=True)
self.copy(pattern="*.cmake",
dst="cmake/iwyu-target-cmake",
src="iwyu-target-cmake-" + VERSION,
keep_path=True)
|
Python
| 0
|
@@ -93,9 +93,9 @@
0.0.
-4
+5
%22%0A%0A%0A
|
948d85e7d30fb7f8e4dff4b01f759b971420e9aa
|
Set OpenSSL:shared=False
|
conanfile.py
|
conanfile.py
|
from conans import ConanFile, CMake
import os
import shutil
class ImageFlowConan(ConanFile):
name = "imageflow"
version = "0.1.0"
license = "AGPLv3"
settings = "os", "compiler", "build_type", "arch"
requires = "littlecms/2.7@lasote/stable", "libpng/1.6.21@lasote/stable", "libjpeg-turbo/1.4.2@imazen/testing" , "giflib/5.1.3@lasote/stable"
options = {"shared": [True, False]}
generators = "cmake"
default_options = "shared=False", "libjpeg-turbo:shared=False", "libpng:shared=False", \
"zlib:shared=False", "libcurl:shared=False", "OpenSSL:shared=True", \
"OpenSSL:no_electric_fence=True", \
"imageflow:shared=True"
exports = "lib/*", "tests/*", "CMakeLists.txt", "imageflow.h", "imageflow_advanced.h"
def config(self):
if self.settings.os != "Windows": # giflib/littlecms must be shared on windows?
self.options["giflib"].shared = False
self.options["littlecms"].shared = False
#TODO: Drop libcurl dependency when we port visual tests to Rust
self.requires("libcurl/7.47.1@lasote/stable")
self.requires("OpenSSL/1.0.2i@lasote/stable")
if self.settings.os == "Macos":
self.options["libcurl"].darwin_ssl = False
self.options["libcurl"].custom_cacert = True
if self.scope.build_tests:
self.requires("catch/1.3.0@TyRoXx/stable", dev=True)
if self.settings.os != "Windows": # Not supported in windows
self.requires("theft/0.2.0@lasote/stable", dev=True)
def imports(self):
self.copy("*.so", dst="bin", src="bin") # From bin to bin
self.copy("*.dll", dst="bin", src="bin") # From bin to bin
self.copy("*.dylib*", dst="bin", src="lib") # From lib to bin
self.copy("*cacert.pem", dst="bin") # Allows use libcurl with https without problems - except on darwin
self.copy("*cacert.pem", dst=".") # Allows use libcurl with https without problems
self.copy("*.a", dst=".") # Copy all static libs to use in cargo build.
def clean_cmake_cache(self, build_dir):
def on_build_dir(x):
return os.path.join(build_dir, x)
try:
shutil.rmtree(on_build_dir("CMakeFiles"))
os.remove(on_build_dir("CMakeCache.txt"))
os.remove(on_build_dir("cmake_install.cmake"))
os.remove(on_build_dir("Makefile"))
except:
pass
def build(self):
self.output.warn('build_tests=%s debug_build=%s coverage=%s profiling=%s shared=%s' % (self.scope.build_tests, self.scope.debug_build, self.scope.coverage, self.scope.profiling, self.options.shared))
build_dir = os.path.join(self.conanfile_directory, "build")
if not os.path.exists(build_dir):
os.mkdir(build_dir)
else:
self.clean_cmake_cache(build_dir)
os.chdir(build_dir)
cmake = CMake(self.settings)
cmake_settings = ""
if self.scope.dev and self.scope.coverage:
cmake_settings += " -DCOVERAGE=ON"
if self.scope.dev and self.scope.debug_build:
cmake_settings += " -DDEBUG_BUILD=ON"
if self.scope.dev and self.scope.build_tests:
cmake_settings += " -DENABLE_TEST=ON"
if self.scope.dev and self.scope.profiling:
cmake_settings += " -DSKIP_LIBRARY=ON -DENABLE_TEST=OFF -DENABLE_PROFILING=ON"
cmake_settings += " -DBUILD_SHARED_LIBS=ON" if self.options.shared else " -DBUILD_SHARED_LIBS=OFF"
cmake_command = 'cmake "%s" %s %s' % (self.conanfile_directory, cmake.command_line, cmake_settings)
cmake_build_command = 'cmake --build . %s' % cmake.build_config
cmake_valgrind = "-D ExperimentalMemCheck" if self.scope.valgrind else ""
cmake_test_command = 'ctest -V -C Release %s' % cmake_valgrind
self.output.warn(cmake_command)
self.run(cmake_command)
self.output.warn(cmake_build_command)
self.run(cmake_build_command)
if self.scope.dev and self.scope.build_tests:
if self.scope.skip_test_run:
self.output.warn("Skipping tests; skip_test_run=False (perhaps for later valgrind use?)")
self.output.warn("Would have run %s" % cmake_test_command)
else:
self.output.warn(cmake_test_command)
self.run(cmake_test_command)
else:
self.output.warn("Skipping tests; build_tests=False")
def package(self):
self.copy("imageflow.h", dst="include", src="", keep_path=False)
self.copy("imageflow_advanced.h", dst="include", src="", keep_path=False)
self.copy("*.h", dst="include", src="lib", keep_path=True)
self.copy("*.so*", dst="lib", src="build/", keep_path=False)
self.copy("*.a", dst="lib", src="build", keep_path=False)
self.copy("*.lib", dst="lib", src="build", keep_path=False)
self.copy("*.dll", dst="bin", src="build", keep_path=False)
def package_info(self):
self.cpp_info.libs = ['imageflow']
|
Python
| 0.999997
|
@@ -583,19 +583,20 @@
:shared=
-Tru
+Fals
e%22, %5C%0A
|
09ab8f6290e3c5bf33e01857d11b124444a4c990
|
add sendaddr support to isotp
|
examples/isotp.py
|
examples/isotp.py
|
DEBUG = False
def msg(x):
if DEBUG:
print "S:",x.encode("hex")
if len(x) <= 7:
ret = chr(len(x)) + x
else:
assert False
return ret.ljust(8, "\x00")
def isotp_send(panda, x, addr, bus=0):
if len(x) <= 7:
panda.can_send(addr, msg(x), bus)
else:
ss = chr(0x10 + (len(x)>>8)) + chr(len(x)&0xFF) + x[0:6]
x = x[6:]
idx = 1
sends = []
while len(x) > 0:
sends.append(((chr(0x20 + (idx&0xF)) + x[0:7]).ljust(8, "\x00")))
x = x[7:]
idx += 1
# actually send
panda.can_send(addr, ss, bus)
rr = recv(panda, 1, addr+8, bus)[0]
panda.can_send_many([(addr, None, s, 0) for s in sends])
kmsgs = []
def recv(panda, cnt, addr, nbus):
global kmsgs
ret = []
while len(ret) < cnt:
kmsgs += panda.can_recv()
nmsgs = []
for ids, ts, dat, bus in kmsgs:
if ids == addr and bus == nbus and len(ret) < cnt:
ret.append(dat)
else:
pass
kmsgs = nmsgs
return map(str, ret)
def isotp_recv(panda, addr, bus=0):
msg = recv(panda, 1, addr, bus)[0]
if ord(msg[0])&0xf0 == 0x10:
# first
tlen = ((ord(msg[0]) & 0xf) << 8) | ord(msg[1])
dat = msg[2:]
# 0 block size?
CONTINUE = "\x30" + "\x00"*7
panda.can_send(addr-8, CONTINUE, bus)
idx = 1
for mm in recv(panda, (tlen-len(dat) + 7)/8, addr, bus):
assert ord(mm[0]) == (0x20 | idx)
dat += mm[1:]
idx += 1
elif ord(msg[0])&0xf0 == 0x00:
# single
tlen = ord(msg[0]) & 0xf
dat = msg[1:]
else:
assert False
dat = dat[0:tlen]
if DEBUG:
print "R:",dat.encode("hex")
return dat
|
Python
| 0
|
@@ -1004,24 +1004,39 @@
addr, bus=0
+, sendaddr=None
):%0A msg = r
@@ -1064,16 +1064,67 @@
us)%5B0%5D%0A%0A
+ if sendaddr is None:%0A sendaddr = addr-8%0A %0A%0A
if ord
@@ -1302,22 +1302,24 @@
an_send(
+send
addr
--8
, CONTIN
|
ba1186c47e5f3466faeea9f2d5bf96948d5f7183
|
Add --strict flag to raise exception on undefined variables
|
confuzzle.py
|
confuzzle.py
|
import sys
import argparse
import yaml
from jinja2 import Template
def render(template_string, context_dict):
template = Template(template_string)
return template.render(**context_dict)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('template', nargs='?', type=argparse.FileType('r'), default=sys.stdin, help="Config file template. If not supplied, stdin is used")
parser.add_argument('config', type=argparse.FileType('r'), help="YAML data file to read")
parser.add_argument('--out', '-o', dest='out', type=argparse.FileType('w'), default=sys.stdout, help="Output file to write. If not supplied, stdout is used")
args = parser.parse_args()
context_dict = yaml.load(args.config.read())
template_string = args.template.read()
rendered = render(template_string, context_dict)
args.out.write(rendered)
if __name__ == "__main__":
main()
|
Python
| 0
|
@@ -36,35 +36,21 @@
aml%0A
-from jinja2 import Template
+import jinja2
%0A%0A%0Ad
@@ -88,16 +88,30 @@
ext_dict
+, strict=False
):%0A t
@@ -120,16 +120,23 @@
plate =
+jinja2.
Template
@@ -153,16 +153,95 @@
string)%0A
+ if strict:%0A template.environment.undefined = jinja2.StrictUndefined%0A
retu
@@ -740,16 +740,153 @@
s used%22)
+%0A parser.add_argument('--strict', dest='strict', action='store_true', default=False, help=%22Raise an exception on undefined variables%22)
%0A%0A ar
@@ -1055,16 +1055,29 @@
ext_dict
+, args.strict
)%0A ar
|
e7eab3521d80e83a5a360c6b196cd0b1f914ca08
|
add URL encoding to meme params
|
examples/leona.py
|
examples/leona.py
|
import sys
import json
import random
import requests
from fbchat.models import ThreadType, MessageReaction
from googletrans import Translator
from bot import Bot
import strings
# THREAD_ID is only used for readding admins. Leona will work in any chat she's
# added to.
THREAD_ID = "1261018777320723"
class LeonaBot(Bot):
def __init__(self, email, password, name, admins=[], protected=[],
*args, **kwargs):
super(LeonaBot, self).__init__(email=email, password=password,
name=name, admins=admins,
protected=protected, *args, **kwargs)
self.add_message_handler("say", self.say_cmd, admin=True)
self.add_message_handler("protect", self.protect_cmd, admin=True)
self.add_message_handler("unprotect", self.unprotect_cmd, admin=True)
self.add_message_handler("meme", self.meme_cmd)
self.add_message_handler("readd", self.readd_cmd, admin=True)
self.add_message_handler("trump", self.trump_cmd)
self.add_message_handler("love", self.love_cmd)
self.add_message_handler("translate", self.translate_cmd)
self.translator = Translator()
def translate_cmd(self, msg):
"""Translate any text to English."""
text = msg['message'][10:].strip()
result = "Translation: " + self.translator.translate(text).text
self.sendMessage(result, msg['thread_id'], msg['thread_type'])
def love_cmd(self, msg):
"""I'll heart react your message."""
self.reactToMessage(msg['mid'], MessageReaction.LOVE)
def say_cmd(self, msg):
"""Repeat the words following 'say'."""
self.sendMessage(msg['message'][4:].strip(), msg['thread_id'],
msg['thread_type'])
def protect_cmd(self, msg):
"""Readd the specified person if they are removed."""
protect_name = msg['message'][8:].strip('@').strip()
protect_id = self.protect_user(protect_name)
if protect_id:
self.sendMessage("{} ({}) added to protected users list.".format(
protect_name, protect_id),
msg['thread_id'], msg['thread_type'])
else:
self.sendMessage("I couldn't find a user with that name.",
msg['thread_id'], msg['thread_type'])
def unprotect_cmd(self, msg):
"""Stop readding the specified person."""
uprotect_name = msg['message'][10:].strip('@').strip()
uprotect_id = self.unprotect_user(uprotect_name)
if uprotect_id:
self.sendMessage("{} removed from protected users list.".format(
uprotect_name),
msg['thread_id'], msg['thread_type'])
else:
self.sendMessage("I couldn't find a user with that name.",
msg['thread_id'], msg['thread_type'])
def meme_cmd(self, msg):
"""Generate a meme. Use 'meme help' command for more info."""
def clean(s):
"""Clean text before sending it to memegen."""
s = '_'.join(s.split())
s = s.replace('?', '~q').replace('%', '~p').replace('#', '~h')
return s
meme_data = msg['message'].lower().split(' ')
if meme_data == ["meme", "help"]:
self.sendMessage(strings.meme_help,
msg['thread_id'], msg['thread_type'])
else:
meme_type = meme_data[1]
meme_text = [t.strip()
for t in ' '.join(meme_data[2:]).split('/', 1)]
top_text = clean(meme_text[0])
if len(meme_text) > 1:
bottom_text = clean(meme_text[1])
img_url = "https://memegen.link/{}/{}/{}.jpg".format(
meme_type, top_text, bottom_text)
else:
img_url = "https://memegen.link/{}/{}.jpg".format(
meme_type, top_text)
self.sendRemoteImage(img_url, "",
msg['thread_id'], msg['thread_type'])
def readd_cmd(self, msg):
"""I'll readd you to the group chat."""
if THREAD_ID:
self.sendMessage("Readding you to the chat.",
msg['thread_id'], msg['thread_type'])
self.addUsersToGroup(msg['author_id'], THREAD_ID)
else:
self.sendMessage("Error: Thread ID not specified in the code.",
msg['thread_id'], msg['thread_type'])
sys.stderr.write(
"Error: You must specify thread ID in the code.\n")
def trump_cmd(self, msg):
"""Search for Trump quotes."""
params = {'query': msg['message'].split(' ', 1)[1]}
r = requests.get("https://api.tronalddump.io/search/quote",
params=params)
data = json.loads(r.content)
count = data['count']
if count < 1:
self.sendMessage("I couldn't find a quote with that keyword.",
msg['thread_id'], msg['thread_type'])
return
n = random.randint(0, count - 1)
quote = data['_embedded']['quotes'][n]['value']
self.sendMessage(quote, msg['thread_id'], msg['thread_type'])
def protect_user(self, user_name):
users = self.fetchAllUsers()
users = filter(lambda u: u.name == user_name, users)
if users:
uid = users[0].uid
if uid not in self.protected:
self.protected.append(uid)
return uid
else:
return None
def unprotect_user(self, user_name):
users = self.fetchAllUsers()
users = filter(lambda u: u.name == user_name, users)
if users:
uid = users[0].uid
if uid in self.protected:
self.protected.remove(uid)
return uid
else:
return None
def onPersonRemoved(self, removed_id, author_id, thread_id, **kwargs):
if (removed_id != self.uid and
author_id != self.uid and
removed_id != author_id and
removed_id in self.protected):
# and author_id not in self.admins):
self.addUsersToGroup(removed_id, thread_id=thread_id)
|
Python
| 0
|
@@ -29,16 +29,30 @@
t random
+%0Aimport urllib
%0A%0Aimport
@@ -3219,16 +3219,72 @@
, '~h')%0A
+ s = s.replace('/', '~s').replace('%22', %22''%22)%0A
@@ -3710,16 +3710,71 @@
ext%5B0%5D)%0A
+ top_text = urllib.quote(top_text, safe=%22%22)%0A
@@ -3850,16 +3850,81 @@
ext%5B1%5D)%0A
+ bottom_text = urllib.quote(bottom_text, safe=%22%22)%0A
|
bfd1e90365446fe1a7c1e5ae710dbf497cc405fb
|
Fix test with newline problems in Windows
|
utest/writer/test_filewriters.py
|
utest/writer/test_filewriters.py
|
from __future__ import with_statement
import unittest
from StringIO import StringIO
from robot.parsing import TestCaseFile
from robot.parsing.model import TestCaseTable
from robot.utils.asserts import assert_equals
from robot.utils import ET, ETSource
def create_test_case_file():
data = TestCaseFile(source='foo.txt')
table = TestCaseTable(data)
data.testcase_table = table
table.set_header(['test case', 'some', 'and other'])
test = table.add('A test')
test.add_step(['A kw', 'an arg'])
return data
class _WriterTestCase(unittest.TestCase):
def _test_rows_are_not_split_if_there_are_headers(self, format='txt'):
output = self._add_long_step_and_save(format)
assert_equals(len(output.splitlines()), 4)
def _add_long_step_and_save(self, format):
data = create_test_case_file()
data.testcase_table.tests[0].add_step(['A kw', '1', '2', '3', '4', '6', '7', '8'])
output = StringIO()
data.save(format=format, output=output)
return output.getvalue().strip()
class TestSpaceSeparatedWriter(_WriterTestCase):
def test_end_of_line_whitespace_is_removed(self):
output = StringIO()
create_test_case_file().save(output=output)
expected = '''
*** test case *** some and other
A test
A kw an arg'''.strip()
for exp, act in zip(expected.splitlines(), output.getvalue().splitlines()):
assert_equals(repr(exp), repr(act))
def test_rows_are_not_split_if_there_are_headers(self):
self._test_rows_are_not_split_if_there_are_headers()
def test_configuring_number_of_separating_spaces(self):
output = StringIO()
create_test_case_file().save(output=output, txt_separating_spaces=8)
expected = '''\
*** test case *** some and other
A test
A kw an arg'''.strip()
actual = output.getvalue().strip()
assert_equals(repr(expected), repr(actual))
class TestTsvWriter(_WriterTestCase):
def test_rows_are_not_split_if_there_are_headers(self):
try:
import csv
except ImportError:
pass # csv not available on IronPython 2.7
else:
self._test_rows_are_not_split_if_there_are_headers('tsv')
class TestHtmlWriter(_WriterTestCase):
def test_rows_are_not_split_if_there_are_headers(self):
output = self._add_long_step_and_save('html')
with ETSource('\n'.join(output.splitlines()[1:])) as source:
tree = ET.parse(source)
lines = tree.findall('body/table/tr')
assert_equals(len(lines), 4)
for l in lines:
cols = l.findall('td') or l.findall('th')
assert_equals(len(cols), 9)
if __name__ == '__main__':
unittest.main()
|
Python
| 0.000001
|
@@ -1973,21 +1973,16 @@
als(
-repr(
expected
), r
@@ -1981,22 +1981,41 @@
cted
-), repr(actual
+.splitlines(), actual.splitlines(
))%0A%0A
|
237b1f889b060019f61c79ee1dd49a0c6993fa46
|
fix rest fixture
|
tests/rest_fixtures.py
|
tests/rest_fixtures.py
|
import json
import aiohttp
import pytest
from aiohttp import web
from yarl import URL
from aiohttp_admin.utils import jsonify
class RestClientError(Exception):
"""Base exception class for RESTClient"""
@property
def status_code(self):
return self.args[0]
class PlainRestError(RestClientError):
"""Answer is not JSON, for example for 500 Internal Server Error"""
@property
def error_text(self):
return self.args[1]
class JsonRestError(RestClientError):
"""Answer is JSON error report"""
@property
def error_json(self):
return self.args[1]
class AdminRESTClient:
def __init__(self, url, *, admin_prefix=None, headers=None, loop):
self._loop = loop
self._url = URL(url)
self._admin_prefix = admin_prefix or 'admin'
self._session = aiohttp.ClientSession(loop=loop)
self._headers = headers or {}
RestClientError = RestClientError
JsonRestError = JsonRestError
@property
def base_url(self):
return self._url
@property
def admin_prefix(self):
return self._admin_prefix
async def request(self, method, path, data=None, params=None,
headers=None, json_dumps=True, token=None,
**kwargs):
url = self._url / path
if json_dumps and (data is not None):
data = jsonify(data).encode('utf-8')
h = self._headers.copy()
if headers:
h.update(headers)
if token:
h.update({"Authorization": token})
resp = await self._session.request(method, str(url),
params=params, data=data,
headers=h, **kwargs)
return resp
async def handle_response(self, resp):
body = await resp.read()
if resp.status in (200, 201):
jsoned = await resp.json()
return jsoned
elif resp.status == 500:
raise PlainRestError(body.decode('utf-8'))
else:
try:
jsoned = await resp.json(encoding='utf-8')
except ValueError:
raise PlainRestError(body.decode('utf-8'))
else:
raise JsonRestError(resp.status, jsoned)
def close(self):
# TODO: make coroutine
if self._session:
self._session.close()
def set_token(self, token):
self._headers["Authorization"] = token
async def create(self, resource, data, **kw):
url = '{}/{}'.format(self._admin_prefix, resource)
resp = await self.request("POST", url, data=data, *kw)
answer = await self.handle_response(resp)
return answer
async def detail(self, resource, entity_id, **kw):
path = '{}/{}/{}'.format(self._admin_prefix, resource, entity_id)
resp = await self.request("GET", path, **kw)
answer = await self.handle_response(resp)
return answer
async def list(self, resource, page=1, per_page=30, sort_field=None,
sort_dir=None, filters=None, **kw):
url = '{}/{}'.format(self._admin_prefix, resource)
f = json.dumps(filters or {})
query = {'_page': page, '_perPage': per_page, '_filters': f}
sort_field and query.update({'_sortField': sort_field})
sort_dir and query.update({'_sortDir': sort_dir})
resp = await self.request("GET", url, params=query, **kw)
answer = await self.handle_response(resp)
return answer
async def update(self, resource, entity_id, data, **kw):
path = '{}/{}/{}'.format(self._admin_prefix, resource, entity_id)
resp = await self.request("PUT", path, data=data, **kw)
answer = await self.handle_response(resp)
return answer
async def delete(self, resource, entity_id, **kw):
path = '{}/{}/{}'.format(self._admin_prefix, resource, entity_id)
resp = await self.request("DELETE", path, **kw)
answer = await self.handle_response(resp)
return answer
async def token(self, username, password):
path = '{}/{}'.format(self._admin_prefix, 'token')
data = dict(username=username, password=password)
resp = await self.request("POST", path, data=data)
token = resp.headers.get('X-Token')
await self.handle_response(resp)
return token
async def destroy_token(self, token):
path = '{}/{}'.format(self._admin_prefix, 'logout')
h = {'Authorization': token}
resp = await self.request("DELETE", path, headers=h)
await self.handle_response(resp)
return token
@pytest.yield_fixture
def create_server(loop, unused_port):
app = handler = srv = None
async def create(*, debug=False, ssl_ctx=None, proto='http'):
nonlocal app, handler, srv
app = web.Application(loop=loop)
port = unused_port()
handler = app.make_handler(debug=debug, keep_alive_on=False)
srv = await loop.create_server(handler, '127.0.0.1', port, ssl=ssl_ctx)
if ssl_ctx:
proto += 's'
url = "{}://127.0.0.1:{}".format(proto, port)
return app, url
yield create
async def finish():
if app is None:
return
await handler.finish_connections()
await app.finish()
srv.close()
await srv.wait_closed()
loop.run_until_complete(finish())
@pytest.yield_fixture
def create_app_and_client(create_server, loop):
client = None
async def maker(*, server_params=None, client_params=None):
nonlocal client
if server_params is None:
server_params = {}
server_params.setdefault('debug', False)
server_params.setdefault('ssl_ctx', None)
app, url = await create_server(**server_params)
if client_params is None:
client_params = {}
client = AdminRESTClient(url, **client_params, loop=loop)
return app, client
yield maker
if client is not None:
client.close()
|
Python
| 0
|
@@ -2654,16 +2654,17 @@
a=data,
+*
*kw)%0A
|
b336e83a63722b3a3e4d3f1779686149d5cef8d1
|
Add compatibility for Python 2
|
setuptools/tests/test_setopt.py
|
setuptools/tests/test_setopt.py
|
# coding: utf-8
from __future__ import unicode_literals
import io
import six
from setuptools.command import setopt
from setuptools.extern.six.moves import configparser
class TestEdit:
@staticmethod
def parse_config(filename):
parser = configparser.ConfigParser()
with io.open(filename, encoding='utf-8') as reader:
(parser.read_file if six.PY3 else parser.readfp)(reader)
return parser
@staticmethod
def write_text(file, content):
with io.open(file, 'wb') as strm:
strm.write(content.encode('utf-8'))
def test_utf8_encoding_retained(self, tmpdir):
"""
When editing a file, non-ASCII characters encoded in
UTF-8 should be retained.
"""
config = tmpdir.join('setup.cfg')
self.write_text(config, '[names]\njaraco=йарацо')
setopt.edit_config(str(config), dict(names=dict(other='yes')))
parser = self.parse_config(str(config))
assert parser['names']['jaraco'] == 'йарацо'
assert parser['names']['other'] == 'yes'
|
Python
| 0.00002
|
@@ -810,22 +810,27 @@
te_text(
+str(
config
+)
, '%5Bname
@@ -993,26 +993,30 @@
rser
-%5B
+.get(
'names'
-%5D%5B
+,
'jaraco'
%5D ==
@@ -1011,17 +1011,17 @@
'jaraco'
-%5D
+)
== '%D0%B9%D0%B0%D1%80
@@ -1050,26 +1050,30 @@
rser
-%5B
+.get(
'names'
-%5D%5B
+,
'other'
-%5D
+)
==
|
982641bd913466d8ff45352b85bac8e9e1112ea1
|
Remove workaround not needed since before intake 0.5.2.
|
databroker/in_memory.py
|
databroker/in_memory.py
|
import copy
import event_model
import intake
import intake.catalog
import intake.catalog.local
import intake.source.base
from mongoquery import Query
from .core import parse_handler_registry, discover_handlers, Entry
from .v2 import Broker
class SafeLocalCatalogEntry(intake.catalog.local.LocalCatalogEntry):
# For compat with intake 0.5.1.
# Not necessary after https://github.com/intake/intake/pull/362
# is released.
def describe(self):
return copy.deepcopy(super().describe())
class BlueskyInMemoryCatalog(Broker):
name = 'bluesky-run-catalog' # noqa
def __init__(self, handler_registry=None, root_map=None, query=None,
**kwargs):
"""
This Catalog is backed by Python collections in memory.
Subclasses should define a ``_load`` method (same as any intake
Catalog) that calls this class's ``upsert`` method (which is particular
to this class).
Parameters
----------
handler_registry : dict, optional
Maps each asset spec to a handler class or a string specifying the
module name and class name, as in (for example)
``{'SOME_SPEC': 'module.submodule.class_name'}``. If None, the
result of ``databroker.core.discover_handlers()`` is used.
root_map : dict, optional
Maps resource root paths to different paths.
query : dict, optional
Mongo query that filters entries' RunStart documents
**kwargs :
Additional keyword arguments are passed through to the base class,
Catalog.
"""
self._query = query or {}
if handler_registry is None:
handler_registry = discover_handlers()
parsed_handler_registry = parse_handler_registry(handler_registry)
self.filler = event_model.Filler(
parsed_handler_registry, root_map=root_map, inplace=True)
self._uid_to_run_start_doc = {}
super().__init__(**kwargs)
def upsert(self, start_doc, stop_doc, gen_func, gen_args, gen_kwargs):
if not Query(self._query).match(start_doc):
return
uid = start_doc['uid']
self._uid_to_run_start_doc[uid] = start_doc
entry = Entry(
name=start_doc['uid'],
description={}, # TODO
driver='databroker.core.BlueskyRunFromGenerator',
direct_access='forbid',
args={'gen_func': gen_func,
'gen_args': gen_args,
'gen_kwargs': gen_kwargs,
'filler': self.filler},
cache=None, # ???
parameters=[],
metadata={'start': start_doc, 'stop': stop_doc},
catalog_dir=None,
getenv=True,
getshell=True,
catalog=self)
self._entries[uid] = entry
def search(self, query):
"""
Return a new Catalog with a subset of the entries in this Catalog.
Parameters
----------
query : dict
"""
if self._query:
query = {'$and': [self._query, query]}
cat = type(self)(
query=query,
handler_registry=self.filler.handler_registry,
root_map=self.filler.root_map,
name='search results',
getenv=self.getenv,
getshell=self.getshell,
auth=self.auth,
metadata=(self.metadata or {}).copy(),
storage_options=self.storage_options)
for key, entry in self._entries.items():
args = entry._captured_init_kwargs['args']
cat.upsert(args['gen_func'],
args['gen_args'],
args['gen_kwargs'])
return cat
def __getitem__(self, name):
# If this came from a client, we might be getting '-1'.
try:
N = int(name)
except (ValueError, TypeError):
if name in self._uid_to_run_start_doc:
uid = name
else:
# Try looking up by *partial* uid.
matches = []
for uid, run_start_doc in list(self._uid_to_run_start_doc.items()):
if uid.startswith(name):
matches.append(uid)
if not matches:
raise KeyError(name)
elif len(matches) > 1:
match_list = '\n'.join(matches)
raise ValueError(
f"Multiple matches to partial uid {name!r}:\n"
f"{match_list}")
else:
uid, = matches
else:
# Sort in reverse chronological order (most recent first).
time_sorted = sorted(self._uid_to_run_start_doc.values(),
key=lambda doc: -doc['time'])
if N < 0:
# Interpret negative N as "the Nth from last entry".
if -N > len(time_sorted):
raise IndexError(
f"Catalog only contains {len(time_sorted)} "
f"runs.")
uid = time_sorted[-N - 1]['uid']
else:
# Interpret positive N as
# "most recent entry with scan_id == N".
for run_start_doc in time_sorted:
if run_start_doc.get('scan_id') == N:
uid = run_start_doc['uid']
break
else:
raise KeyError(f"No run with scan_id={N}")
entry = self._entries[uid]
# The user has requested one specific Entry. In order to give them a
# more useful object, 'get' the Entry for them. Note that if they are
# expecting an Entry and try to call ``()`` or ``.get()``, that will
# still work because BlueskyRun supports those methods and will just
# return itself.
return entry.get() # an instance of BlueskyRun
def __len__(self):
return len(self._uid_to_run_start_doc)
|
Python
| 0
|
@@ -241,275 +241,8 @@
r%0A%0A%0A
-class SafeLocalCatalogEntry(intake.catalog.local.LocalCatalogEntry):%0A # For compat with intake 0.5.1.%0A # Not necessary after https://github.com/intake/intake/pull/362%0A # is released.%0A def describe(self):%0A return copy.deepcopy(super().describe())%0A%0A%0A
clas
|
70ccca895892fc81eb07c4d0b4b7cefe17554b77
|
Fix typo
|
src/checker/plugin/links_finder_plugin.py
|
src/checker/plugin/links_finder_plugin.py
|
from bs4 import BeautifulSoup
from yapsy.IPlugin import IPlugin
from requests.exceptions import InvalidSchema
from requests.exceptions import ConnectionError
from requests.exceptions import MissingSchema
import requests
import urlparse
import urllib
import marisa_trie
class LinksFinder(IPlugin):
def __init__(self):
self.database = None
self.types = None
self.trie = None
def setDb(self, DB):
self.database = DB
def setTypes(self, types):
self.types = types
self.trie = marisa_trie.Trie(types)
def check(self, transactionId, content):
""" Najde tagy <a>, <link>, vybere atribut href, ulozi jako odkazy,
stahne obsah jako dalsi transakci.
"""
soup = BeautifulSoup(content, 'html.parser')
uri = self.database.getUri(transactionId)
self.make_links_absolute(soup, uri,'a')
links = soup.find_all('a')
self.check_links(links, "Link to ", transactionId, 'href')
self.make_links_absolute(soup, uri, 'link')
links2 = soup.find_all('link')
self.check_links(links2, "Linked resource: ", transactionId, 'href')
self.make_sources_absolute(soup, uri, 'img')
images = soup.find_all('img')
self.check_links(images, "Image: ", transactionId, 'src')
return
def getId(self):
return "linksFinder"
def getLink(self, url, reqId, srcId):
try:
print "Inspecting "+url
r = requests.head(url)
if r.status_code != 200:
self.database.setDefect(srcId, "badlink", 0, url)
if 'content-type' in r.headers.keys():
ct = r.headers['content-type']
else:
ct = ''
if self.getMaxPrefix(ct) in self.types:
print "Downloading "+url
r = requests.get(url)
self.database.setResponse(reqId, r.status_code, ct, r.text.encode("utf-8").strip()[:65535])
else: print "Content type not accepted: "+ct
except InvalidSchema:
print "Invalid schema"
except ConnectionError:
print "Connection error"
except MissingSchema:
print "Missing schema"
def make_links_absolute(self, soup, url, tag):
print "Make links absolute: "+url
for tag in soup.findAll(tag, href=True):
if 'href' in tag.attrs:
tag['href'] = urlparse.urljoin(url, tag['href'])
def make_sources_absolute(self, soup, url, tag):
for tag in soup.findAll(tag):
tag['src'] = urlparse.urljoin(url, tag['src'])
def check_links(self, links, logMsg, transactionId, tag):
for link in links:
url = link.get(tag)
if url is not None:
urlNoAnchor = url.split('#')[0]
reqId = self.database.setLink(transactionId, urllib.quote(urlNoAnchor.encode('utf-8'())
print logMsg+str(url)
if reqId != -1:
self.getLink(url, reqId, transactionId)
def getMaxPrefix(self, ctype):
prefList = self.trie.prefixes(unicode(ctype, encoding="utf-8"))
if len(prefList) > 0:
return prefList[-1]
else: return ctype
|
Python
| 0.999999
|
@@ -2904,17 +2904,17 @@
('utf-8'
-(
+)
))%0A
|
71f67f02dd26e29002ced50298b245c6114ece3b
|
Update mathfunctions.py
|
Python/Math/mathfunctions.py
|
Python/Math/mathfunctions.py
|
# File with the functions which will be used in math script
# Number to the power of
def po (number, pof):
b = number
for _ in range(pof - 1):
b = int(b) * int(number)
return b
# Factors of a number
def factors (number):
current, ao, nums = 0, 0, []
while current < number:
ao = ao + 1
current = number % ao
if current == 0:
nums.append(ao)
return nums
# Sqare root of number
def sqroot (number):
fac, f = factors (number), ''
for x in fac:
a = x * x
if a == number:
return (x)
f = True
if f != True:
return "No Square Root Found"
# THIS CAN SERIOUSLY BE DONE BETTER WITH CREATING OTHER FUCNTIONS, BUT LEAVING IT HERE FOR NOW...
def lineareq(numbers):
ai = numbers[3]
bi = numbers[1] * -1
ci = numbers[2] * -1
di = numbers[0]
# Calculate the Determinent of the inverse
de = ai * di - bi * ci
# Calculate the final answer, for easy eye viewing
xo = ai * numbers[4]
xoo = bi * numbers[5]
ans1 = xo + xoo
xo = ci * numbers[4]
xoo = di * numbers[5]
ans2 = xo + xoo
# Finish Equation
ans1 = ans1 / de
ans2 = ans2 / de
return ans1, ans2
|
Python
| 0.000003
|
@@ -656,24 +656,715 @@
Found%22%0A
+%0A%0A# Linear Patern Solver %0Adef lseq (ls1, ls2, ls3, ls4):%0A if int(ls2) - int(ls1) == int(ls4) - int(ls3):%0A lsd1 = int(ls2) - int(ls1) # common difference%0A lsc = int(lsd1) - int(ls1) # constant e.g. Tn = xn + c%0A lsc = int(lsc) * -1%0A if lsd1 == 1: # added to change Tn = 1n to Tn = n%0A return(%22Tn = %25sn+%22 %25 (lsd1) + (%22%25s%22 %25 (lsc)))%0A elif lsc == 0: # added to prevent problem where 0 is neither '+' or '-'. So a sequence: 1;2;3;4 -%3E Tn = n0%0A return(%22Tn = %25sn%22 %25 (lsd1))%0A else:%0A return(%22Tn = %25sn+%22 %25 (lsd1) + (%22%25s%22 %25 (lsc)))%0A%0A elif ls2 - ls1 != ls4 - ls3:%0A return(%22This is not a Linear Equation!%22)%0A
%0A %0A# THIS
|
4befff8ccefad76d0d846e73b499e7229df9482d
|
Configure django-dbbackup
|
fat/settings.py
|
fat/settings.py
|
"""
Django settings for fat project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
URL_SRC = "https://github.com/softwaresaved/fat"
VERSION = "0.4.1"
FELLOWS_MANAGEMENT_EMAIL = "fellows-management@software.ac.uk"
SETTINGS_EXPORT = [
'URL_SRC',
'VERSION',
'FELLOWS_MANAGEMENT_EMAIL',
]
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_iy7)5@ids_q5m(b4!q$-)ie)&-943zx37$+9-9b#988^*f-+4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_countries',
'crispy_forms',
'social.apps.django_app.default',
'fat',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'fat.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django_settings_export.settings_export',
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
],
},
},
]
WSGI_APPLICATION = 'fat.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTHENTICATION_BACKENDS = (
'social.backends.github.GithubOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
SOCIAL_AUTH_GITHUB_KEY = ''
SOCIAL_AUTH_GITHUB_SECRET = ''
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = False
USE_TZ = False
DATE_FORMAT = "Y-m-d"
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
# Stored files
# https://docs.djangoproject.com/en/1.9/ref/settings/#media-url
MEDIA_URL = '/upload/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'upload')
# Authentication system
# https://docs.djangoproject.com/en/1.9/topics/auth/default/
LOGIN_URL = '/login/' # The URL where requests are redirected for login, especially when using the login_required() decorator.
LOGIN_REDIRECT_URL = '/dashboard/'
# Email
# Email backend for development (print on console)
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Email backend for development (save on file)
# EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
# EMAIL_FILE_PATH = '/tmp/fat-emails'
# Default email address to use for various automated correspondence from the site manager(s).
DEFAULT_FROM_EMAIL = 'no-reply@software.ac.uk'
# The email address that error messages come from.
SERVER_EMAIL = 'no-reply@software.ac.uk'
# A list of all the people who get code error notifications.
ADMINS = [
('admin', 'admin@software.ac.uk'),
]
# Subject-line prefix for email messages sent
EMAIL_SUBJECT_PREFIX = "Software Sustaibability Institute's Claimedship Administration Tool"
|
Python
| 0.000003
|
@@ -1361,24 +1361,40 @@
p.default',%0A
+ 'dbbackup',%0A
'fat',%0A%5D
@@ -5110,8 +5110,177 @@
n Tool%22%0A
+%0A# Backup%0ADBBACKUP_STORAGE = 'django.core.files.storage.FileSystemStorage'%0ADBBACKUP_STORAGE_OPTIONS = %7B%0A 'location': os.path.join(BASE_DIR, 'backups'),%0A %7D%0A
|
634199544667f0a78711e540de493d1dbffae795
|
update index views
|
develop/settings.py
|
develop/settings.py
|
"""
Django settings for develop project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ermn@t3e6)2lwtgca9nfyxf$h6b9fpo%(!h%mtgt7tyy2ut6m*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'news.apps.NewsConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'develop.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'develop.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'develop_usta',
'USER': 'develop_user',
'PASSWORD': '7HRdMcOvx6toidiEPFTKCAE8gNdA6C04',
'HOST': 'localhost',
'PORT': '',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'az'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = 'static'
# STATICFILES_DIRS = [
# os.path.join(BASE_DIR, "static"),
# '/home/munis/Documents/unicopycenter/static',
# ]
MEDIA_URL = '/media/'
MEDIA_ROOT = 'media'
|
Python
| 0
|
@@ -809,12 +809,11 @@
G =
-Fals
+Tru
e%0A%0AA
|
1d3eb0bafd46f3e9cfb7d6395ad1a100052ff821
|
Clean up parameter types (#52527)
|
lib/ansible/plugins/doc_fragments/online.py
|
lib/ansible/plugins/doc_fragments/online.py
|
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Standard documentation fragment
DOCUMENTATION = '''
options:
api_token:
description:
- Online OAuth token.
aliases: ['oauth_token']
api_url:
description:
- Online API URL
default: 'https://api.online.net'
aliases: ['base_url']
api_timeout:
description:
- HTTP timeout to Online API in seconds.
default: 30
aliases: ['timeout']
validate_certs:
description:
- Validate SSL certs of the Online API.
default: yes
type: bool
notes:
- Also see the API documentation on U(https://console.online.net/en/api/)
- If C(api_token) is not set within the module, the following
environment variables can be used in decreasing order of precedence
C(ONLINE_TOKEN), C(ONLINE_API_KEY), C(ONLINE_OAUTH_TOKEN), C(ONLINE_API_TOKEN)
- If one wants to use a different C(api_url) one can also set the C(ONLINE_API_URL)
environment variable.
'''
|
Python
| 0
|
@@ -17,16 +17,17 @@
f-8 -*-%0A
+%0A
# GNU Ge
@@ -205,16 +205,17 @@
ATION =
+r
'''%0Aopti
@@ -277,16 +277,30 @@
token.%0A
+ type: str%0A
alia
@@ -305,17 +305,17 @@
iases: %5B
-'
+
oauth_to
@@ -317,17 +317,17 @@
th_token
-'
+
%5D%0A api_
@@ -371,16 +371,30 @@
API URL%0A
+ type: str%0A
defa
@@ -441,17 +441,17 @@
s: %5B
-'
+
base_url
'%5D%0A
@@ -446,17 +446,17 @@
base_url
-'
+
%5D%0A api_
@@ -528,16 +528,30 @@
econds.%0A
+ type: int%0A
defa
@@ -576,17 +576,17 @@
s: %5B
-'
+
timeout
-'
+
%5D%0A
@@ -664,16 +664,31 @@
ne API.%0A
+ type: bool%0A
defa
@@ -696,31 +696,16 @@
lt: yes%0A
- type: bool%0A
notes:%0A
|
eb3f93ac64953eacecdd48e2cb8d5ca80554a95b
|
Update search-for-a-range.py
|
Python/search-for-a-range.py
|
Python/search-for-a-range.py
|
# Time: O(logn)
# Space: O(1)
#
# Given a sorted array of integers, find the starting and ending position of a given target value.
#
# Your algorithm's runtime complexity must be in the order of O(log n).
#
# If the target is not found in the array, return [-1, -1].
#
# For example,
# Given [5, 7, 7, 8, 8, 10] and target value 8,
# return [3, 4].
#
class Solution(object):
def searchRange(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
# Find the first index where target <= nums[idx]
left = self.binarySearch(lambda x, y: x <= y, nums, target)
if left >= len(nums) or nums[left] != target:
return [-1, -1]
# Find the first index where target < nums[idx]
right = self.binarySearch(lambda x, y: x < y, nums, target)
return [left, right - 1]
def binarySearch(self, compare, nums, target):
left, right = 0, len(nums)
while left < right:
mid = left + (right - left) / 2
if compare(target, nums[mid]):
right = mid
else:
left = mid + 1
return left
def binarySearch2(self, compare, nums, target):
left, right = 0, len(nums) - 1
while left <= right:
mid = left + (right - left) / 2
if compare(target, nums[mid]):
right = mid - 1
else:
left = mid + 1
return left
def binarySearch3(self, compare, nums, target):
left, right = -1, len(nums)
while right - left > 1:
mid = left + (right - left) / 2
if compare(target, nums[mid]):
right = mid
else:
left = mid
return right
if __name__ == "__main__":
print Solution().searchRange([2, 2], 3)
print Solution().searchRange([5, 7, 7, 8, 8, 10], 8)
|
Python
| 0
|
@@ -625,17 +625,17 @@
x, y: x
-%3C
+%3E
= y, num
@@ -832,17 +832,17 @@
x, y: x
-%3C
+%3E
y, nums
@@ -1066,32 +1066,24 @@
if compare(
-target,
nums%5Bmid%5D):%0A
@@ -1071,32 +1071,40 @@
ompare(nums%5Bmid%5D
+, target
):%0A
@@ -1371,32 +1371,24 @@
if compare(
-target,
nums%5Bmid%5D):%0A
@@ -1376,32 +1376,40 @@
ompare(nums%5Bmid%5D
+, target
):%0A
@@ -1602,25 +1602,25 @@
hile
- right -
left
-%3E
++
1
+ %3C right
:%0A
@@ -1688,16 +1688,8 @@
are(
-target,
nums
@@ -1693,16 +1693,24 @@
ums%5Bmid%5D
+, target
):%0A
|
8831fb698e6ce4c263b1b3f02eba09744b46d64b
|
Remove unused variable (via yapf)
|
basis_set_exchange/curate/readers/cfour.py
|
basis_set_exchange/curate/readers/cfour.py
|
from ... import lut
from ..skel import create_skel
def read_cfour(basis_lines, fname):
'''Reads gbasis-formatted file data and converts it to a dictionary with the
usual BSE fields
Note that the gbasis format does not store all the fields we
have, so some fields are left blank
'''
skipchars = '!#'
basis_lines = [l for l in basis_lines if l and not l[0] in skipchars]
bs_data = create_skel('component')
i = 0
bs_name = None
while i < len(basis_lines):
line = basis_lines[i]
lsplt = line.split(':')
elementsym = lsplt[0]
if bs_name is None:
bs_name = lsplt[1]
elif lsplt[1] != bs_name:
raise RuntimeError("Multiple basis sets in a file")
element_Z = lut.element_Z_from_sym(elementsym)
element_Z = str(element_Z)
if not element_Z in bs_data['elements']:
bs_data['elements'][element_Z] = {}
element_data = bs_data['elements'][element_Z]
if not 'electron_shells' in element_data:
element_data['electron_shells'] = []
i += 2 # Skip comment line
nshell = int(basis_lines[i].strip())
i += 1
# Read in the AM, ngeneral, and nprim for each shell
# This is in a block just after nshell
all_am = [int(x.strip()) for x in basis_lines[i].split()]
i += 1
all_ngen = [int(x.strip()) for x in basis_lines[i].split()]
i += 1
all_nprim = [int(x.strip()) for x in basis_lines[i].split()]
i += 1
assert len(all_am) == nshell
assert len(all_ngen) == nshell
assert len(all_nprim) == nshell
for shell_idx in range(nshell):
shell_am = [all_am[shell_idx]]
ngen = all_ngen[shell_idx]
nprim = all_nprim[shell_idx]
if max(shell_am) <= 1:
func_type = 'gto'
else:
func_type = 'gto_spherical'
shell = {'function_type': func_type, 'region': '', 'angular_momentum': shell_am}
exponents = []
coefficients = []
# Read in exponents block
while len(exponents) < nprim:
line = basis_lines[i].replace('D', 'E')
line = line.replace('d', 'E')
lsplt = line.split()
exponents.extend([x.strip() for x in line.split()])
i += 1
# Read in all coefficients
for prim in range(nprim):
coef_tmp = []
while len(coef_tmp) < ngen:
line = basis_lines[i].replace('D', 'E')
line = line.replace('d', 'E')
lsplt = line.split()
coef_tmp.extend([x.strip() for x in line.split()])
i += 1
coefficients.append(coef_tmp)
shell['exponents'] = exponents
# We need to transpose the coefficient matrix
# (we store a matrix with primitives being the column index and
# general contraction being the row index)
shell['coefficients'] = list(map(list, zip(*coefficients)))
element_data['electron_shells'].append(shell)
return bs_data
|
Python
| 0
|
@@ -2306,45 +2306,8 @@
E')%0A
- lsplt = line.split()%0A
@@ -2659,49 +2659,8 @@
E')%0A
- lsplt = line.split()%0A
|
946b3867f464d96e85056b60d94593346a39cc51
|
add map to tweet list
|
index.py
|
index.py
|
import os
import time
import TwitterAPI
import src.art.fluid
import src.art.gas
# Configuration
twitterAPI = TwitterAPI.TwitterAPI(
consumer_key=os.environ["CONSUMER_KEY"],
consumer_secret=os.environ["CONSUMER_SECRET"],
access_token_key=os.environ["ACCESS_TOKEN_KEY"],
access_token_secret=os.environ["ACCESS_TOKEN_SECRET"]
)
# Generate
types = [src.art.fluid, src.art.gas]
totalTypes = len(types)
current = 0
while True:
print("\x1b[36mIce\x1b[0m Crafting Post 💡")
seedText = types[current].generate()
f = open("art.png", "rb")
twitterAPI.request("statuses/update_with_media", {
"status": seedText
}, {
"media[]": f.read()
})
f.close()
print("\x1b[36mIce\x1b[0m Success \"" + seedText + "\" ✨\n")
current = (current + 1) % totalTypes
time.sleep(1020)
|
Python
| 0.000002
|
@@ -72,16 +72,35 @@
.art.gas
+%0Aimport src.art.map
%0A%0A# Conf
@@ -401,16 +401,29 @@
.art.gas
+, src.art.map
%5D%0AtotalT
|
8db77ca35504dcfcb42dbc072e3dc0e7690f9b41
|
concat the strings properly for the combined first body chunk
|
feather/http.py
|
feather/http.py
|
import BaseHTTPServer
import httplib
import itertools
import socket
import traceback
import urlparse
from feather import connections, requests
__all__ = ["InputFile", "HTTPError", "HTTPRequest", "HTTPRequestHandler",
"HTTPConnection"]
responses = BaseHTTPServer.BaseHTTPRequestHandler.responses
class HTTPRequest(object):
'''a straightforward attribute holder that supports the following names:
* method
* version
* scheme
* host
* path
* querystring
* headers
* content
'''
__slots__ = [
"method",
"version",
"scheme",
"host",
"path",
"querystring",
"headers",
"content"]
def __init__(self, **kwargs):
for name in self.__slots__:
setattr(self, name, kwargs.get(name, None))
class HTTPError(Exception):
pass
class InputFile(socket._fileobject):
"a file object that doesn't attempt to read past a specified length"
def __init__(self, sock, length, mode='rb', bufsize=-1, close=False):
self.length = length
super(InputFile, self).__init__(sock, mode, bufsize, close)
def read(self, size=-1):
size = min(size, self.length)
if size < 0: size = self.length
rc = super(InputFile, self).read(size)
self.length -= max(self.length, len(rc))
return rc
def readlines(self):
text = self.read()
if text[-1] == "\n":
text = text[:-1]
return map(self._line_mapper, text.split("\n"))
@staticmethod
def _line_mapper(l):
return l + '\n'
def _strip_first(iterable):
iterator = iter(iterable)
try:
first = iterator.next()
except StopIteration:
first = ''
return first, iterator
class HTTPRequestHandler(requests.RequestHandler):
TRACEBACK_DEBUG = False
head_string = '''HTTP/%(http_version)s %(code)d %(status)s
%(headers)s
'''.replace('\n', '\r\n').replace('\r\n ', '\r\n')
def format(self, code, status, headers, body_iterable):
headers = '\r\n'.join('%s: %s' % pair for pair in headers)
http_version = ".".join(map(str, self.connection.http_version))
head = self.head_string % locals()
# we don't want the headers to count as a separate chunk, so
# prefix them to the first body chunk and rebuild the iterable
first_chunk, body = _strip_first(body_iterable)
return itertools.chain((head, first_chunk), body)
def error(self, code):
status, long_status = responses[code]
body = (long_status,)
raise HTTPError(code, status, [('content-type', 'text/plain')], body)
def handle(self, request):
handler = getattr(self, "do_%s" % request.method, None)
try:
if not handler:
self.error(405) # Method Not Allowed
return handler(request)
except self.RespondingRightNow, response_error:
return self.format(*(response_error.args))
except NotImplementedError:
self.error(405)
except:
status, long = responses[500]
body_text = self.TRACEBACK_DEBUG and traceback.format_exc() or long
return self.format(500, status, [('content-type', 'text/plain')],
(body_text,))
def do_GET(self, request):
raise NotImplementedError()
do_POST = do_PUT = do_HEAD = do_DELETE = do_GET
class HTTPConnection(connections.TCPConnection):
request_handler = HTTPRequestHandler
# header-parsing class from the stdlib
header_class = httplib.HTTPMessage
# we don't support changing the HTTP version inside a connection,
# because that's just silliness
http_version = (1, 1)
keepalive_timeout = 30
def get_request(self):
self.killable = False
content = InputFile(self.socket, 0)
request_line = content.readline()
if request_line in ('\n', '\r\n'):
request_line = content.readline()
if not request_line:
self.killable = True
return None
method, path, version_string = request_line.split(' ', 2)
version_string = version_string.rstrip()
if not method.isalpha() or method != method.upper():
raise HTTPError(400, "bad HTTP method: %r" % method)
url = urlparse.urlsplit(path)
if version_string[:5] != 'HTTP/':
raise HTTPError(400, "bad HTTP version: %r" % version_string)
try:
version = tuple(int(v) for v in version_string[5:].split("."))
except ValueError:
raise HTTPError(400, "bad HTTP version: %r" % version_string)
headers = self.header_class(content)
return HTTPRequest(
method=method,
version=version,
scheme=url.scheme,
host=url.netloc,
path=url.path,
querystring=url.query,
fragment=url.fragment,
headers=headers,
content=content)
|
Python
| 0.998431
|
@@ -2509,17 +2509,18 @@
in((head
-,
+ +
first_c
@@ -2523,16 +2523,17 @@
st_chunk
+,
), body)
|
34adb8bb30860eb7748188a7d1a9345a09c4519f
|
Implement punctuation filtering
|
index.py
|
index.py
|
from nltk.tokenize import word_tokenize, sent_tokenize
import getopt
import sys
import os
import io
def build_dict(docs):
dictionary = set()
for doc_id, doc in docs.items():
dictionary.update(doc)
dictionary = list(dictionary)
dictionary.sort()
return dictionary
def build_postings(dictionary):
postings = {}
for term in dictionary:
postings[term] = []
return postings
def populate_postings(docs, postings):
for doc_id, doc in docs.items():
for term in set(doc):
postings[term].append(doc_id)
def load_data(dir_doc):
docs = {}
for dirpath, dirnames, filenames in os.walk(dir_doc):
for name in filenames:
file = os.path.join(dirpath, name)
with io.open(file, 'r+') as f:
docs[name] = f.read()
return docs
def preprocess(docs):
processed_docs = {}
for doc_id, doc in docs.items():
processed_docs[doc_id] = set(word_tokenize(doc.lower()))
return processed_docs
def usage():
print("usage: " + sys.argv[0] + " -i directory-of-documents -d dictionary-file -p postings-file")
if __name__ == '__main__':
dir_doc = dict_file = postings_file = None
try:
opts, args = getopt.getopt(sys.argv[1:], 'i:d:p:')
except getopt.GetoptError as err:
usage()
sys.exit(2)
for o, a in opts:
if o == '-i':
dir_doc = a
elif o == '-d':
dict_file = a
elif o == '-p':
postings_file = a
else:
assert False, "unhandled option"
if dir_doc == None or dict_file == None or postings_file == None:
usage()
sys.exit(2)
docs = load_data(dir_doc)
docs = preprocess(docs)
dictionary = build_dict(docs)
postings = build_postings(dictionary)
populate_postings(docs, postings)
|
Python
| 0.999999
|
@@ -92,16 +92,30 @@
mport io
+%0Aimport string
%0A%0Adef bu
@@ -775,16 +775,56 @@
(docs):%0A
+%09punctuations = set(string.punctuation)%0A
%09process
@@ -928,16 +928,73 @@
ower()))
+%0A%09%09processed_docs%5Bdoc_id%5D.difference_update(punctuations)
%0A%0A%09retur
|
e320c8558646233b78760e1c84c5334a3a743d6d
|
Fix test_ensemble on Python 3.5
|
tests/test_ensemble.py
|
tests/test_ensemble.py
|
import pytest
from rasa_core.policies import Policy
from rasa_core.policies.ensemble import PolicyEnsemble
class WorkingPolicy(Policy):
@classmethod
def load(cls, path):
return WorkingPolicy()
def persist(self, path):
pass
def train(self, training_trackers, domain, **kwargs):
pass
def predict_action_probabilities(self, tracker, domain):
pass
def __eq__(self, other):
return isinstance(other, WorkingPolicy)
def test_policy_loading_simple(tmpdir):
original_policy_ensemble = PolicyEnsemble([WorkingPolicy()])
original_policy_ensemble.train([], None)
original_policy_ensemble.persist(tmpdir)
loaded_policy_ensemble = PolicyEnsemble.load(tmpdir)
assert original_policy_ensemble.policies == loaded_policy_ensemble.policies
class LoadReturnsNonePolicy(Policy):
@classmethod
def load(cls, path):
return None
def persist(self, path):
pass
def train(self, training_trackers, domain, **kwargs):
pass
def predict_action_probabilities(self, tracker, domain):
pass
def test_policy_loading_load_returns_none(tmpdir):
original_policy_ensemble = PolicyEnsemble([LoadReturnsNonePolicy()])
original_policy_ensemble.train([], None)
original_policy_ensemble.persist(tmpdir)
with pytest.raises(Exception):
PolicyEnsemble.load(tmpdir)
class LoadReturnsWrongTypePolicy(Policy):
@classmethod
def load(cls, path):
return ""
def persist(self, path):
pass
def train(self, training_trackers, domain, **kwargs):
pass
def predict_action_probabilities(self, tracker, domain):
pass
def test_policy_loading_load_returns_wrong_type(tmpdir):
original_policy_ensemble = PolicyEnsemble([LoadReturnsWrongTypePolicy()])
original_policy_ensemble.train([], None)
original_policy_ensemble.persist(tmpdir)
with pytest.raises(Exception):
PolicyEnsemble.load(tmpdir)
|
Python
| 0.998424
|
@@ -515,17 +515,16 @@
mpdir):%0A
-%0A
orig
@@ -658,31 +658,36 @@
ble.persist(
+str(
tmpdir)
+)
%0A%0A loaded
@@ -725,23 +725,28 @@
le.load(
+str(
tmpdir)
+)
%0A ass
@@ -1309,39 +1309,44 @@
nsemble.persist(
+str(
tmpdir)
+)
%0A%0A with pytes
@@ -1394,23 +1394,28 @@
le.load(
+str(
tmpdir)
+)
%0A%0A%0Aclass
@@ -1920,23 +1920,28 @@
persist(
+str(
tmpdir)
+)
%0A%0A wi
@@ -1997,16 +1997,21 @@
le.load(
+str(
tmpdir)
+)
%0A
|
ed410e81af61699a16c34c1edbbaa18a80bcdcfe
|
use global DocSimServer instance in views
|
docsim/documents/views.py
|
docsim/documents/views.py
|
from ujson import dumps
from django.http import HttpResponse, HttpResponseBadRequest
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from rest_framework.generics import ListAPIView, RetrieveAPIView
from .docsimserver import DocSimServer
from .models import Cluster, Document
from .serializers import ClusterSerializer
ACCEPTED = 202
@csrf_exempt
@require_POST
def add_or_update(request):
id = request.POST.get('id')
text = request.POST.get('text')
if id and text:
Document(id=id, text=text).save()
return HttpResponse(status=ACCEPTED)
else:
return HttpResponseBadRequest()
class ClusterList(ListAPIView):
model = Cluster
serializer_class = ClusterSerializer
class ClusterDetail(RetrieveAPIView):
model = Cluster
serializer_class = ClusterSerializer
@csrf_exempt
@require_POST
def find_similar(request):
try:
text = request.POST['text']
min_score = float(request.POST.get('min_score', .8))
max_results = int(request.POST.get('max_results', 10))
except:
return HttpResponseBadRequest()
id = request.POST.get('id')
doc = Document(id=id, text=text)
dss = DocSimServer()
tokens = doc.tokens()
similar = dss.find_similar({'tokens': tokens}, min_score=min_score,
max_results=max_results)
if id:
doc.save()
dss.server.index([{'id': id, 'tokens': tokens}])
return HttpResponse(content=dumps(similar), content_type='text/json')
|
Python
| 0
|
@@ -388,16 +388,37 @@
D = 202%0A
+DSS = DocSimServer()%0A
%0A%0A@csrf_
@@ -1237,33 +1237,8 @@
xt)%0A
- dss = DocSimServer()%0A
@@ -1273,19 +1273,19 @@
milar =
-dss
+DSS
.find_si
@@ -1429,11 +1429,11 @@
-dss
+DSS
.ser
|
5e336b92c43d9111072e877ac4dd1362ce833740
|
Fix query parameter quoting
|
euca2ools/nc/auth.py
|
euca2ools/nc/auth.py
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2009-2011, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Redistribution and use of this software in source and binary forms, with or
# without modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
#
# Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: Mitch Garnaat mgarnaat@eucalyptus.com
import M2Crypto
import base64
import boto.auth_handler
import datetime
import hashlib
import hmac
import time
import urllib
import warnings
from boto.exception import BotoClientError
class EucaRsaAuthV1Handler(boto.auth_handler.AuthHandler):
"""Provides Eucalyptus NC Authentication."""
capability = ['euca-rsa-v1', 'euca-nc']
def __init__(self, host, config, provider):
boto.auth_handler.AuthHandler.__init__(self, host, config, provider)
self.hmac = hmac.new(provider.secret_key, digestmod=hashlib.sha1)
self.private_key_path = None
def _calc_signature(self, params, headers, verb, path):
boto.log.debug('using euca_signature')
string_to_sign = '%s\n%s\n%s\n' % (verb, headers['Date'], path)
keys = params.keys()
keys.sort()
pairs = []
for key in keys:
val = params[key]
pairs.append(urllib.quote(key, safe='') + '=' + urllib.quote(val, safe='-_~'))
qs = '&'.join(pairs)
boto.log.debug('query string: %s' % qs)
string_to_sign += qs
hmac = self.hmac.copy()
hmac.update(string_to_sign)
sha_manifest = hashlib.sha1()
sha_manifest.update(string_to_sign)
private_key = M2Crypto.RSA.load_key(self.private_key_path)
signature_value = private_key.sign(sha_manifest.digest())
b64 = base64.b64encode(signature_value)
boto.log.debug('len(b64)=%d' % len(b64))
boto.log.debug('base64 encoded digest: %s' % b64)
return (qs, b64)
def add_auth(self, http_request, **kwargs):
headers = http_request.headers
params = http_request.params
qs, signature = self._calc_signature(http_request.params,
http_request.headers,
http_request.method,
http_request.path)
headers['EucaSignature'] = signature
boto.log.debug('query_string: %s Signature: %s' % (qs, signature))
if http_request.method == 'POST':
headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
http_request.body = qs
else:
http_request.body = ''
class EucaNCAuthHandler(EucaRsaAuthV1Handler):
# For API compatibility
def __init__(self, host, config, provider):
warnings.warn(('EucaNCAuthHandler has been renamed to '
'EucaRsaAuthV1Handler'), DeprecationWarning)
EucaRsaAuthV1Handler.__init__(self, host, config, provider)
class EucaRsaAuthV2Handler(boto.auth_handler.AuthHandler):
'''Provides authentication for inter-component requests'''
capability = ['euca-rsa-v2']
def __init__(self, host, config, provider):
boto.auth_handler.AuthHandler.__init__(self, host, config, provider)
self.cert_path = None
self.private_key_path = None
def add_auth(self, http_request, **kwargs):
if 'Authorization' in http_request.headers:
del http_request.headers['Authorization']
now = datetime.datetime.utcnow()
http_request.headers['Date'] = now.strftime('%Y%m%dT%H%M%SZ')
cert_fp = self._get_fingerprint()
headers_to_sign = self._get_headers_to_sign(http_request)
signed_headers = self._get_signed_headers(headers_to_sign)
boto.log.debug('SignedHeaders:%s', signed_headers)
canonical_request = self._get_canonical_request(http_request)
boto.log.debug('CanonicalRequest:\n%s', canonical_request)
signature = self._sign(canonical_request)
boto.log.debug('Signature:%s', signature)
auth_header = ' '.join(('EUCA2-RSA-SHA256', cert_fp, signed_headers,
signature))
http_request.headers['Authorization'] = auth_header
def _get_fingerprint(self):
cert = M2Crypto.X509.load_cert(self.cert_path)
return cert.get_fingerprint().lower()
def _sign(self, canonical_request):
privkey = M2Crypto.RSA.load_key(self.private_key_path)
digest = hashlib.sha256(canonical_request).digest()
return base64.b64encode(privkey.sign(digest, algo='sha256'))
def _get_canonical_request(self, http_request):
# 1. request method
method = http_request.method.upper()
# 2. CanonicalURI
c_uri = self._get_canonical_uri(http_request)
# 3. CanonicalQueryString
c_querystr = self._get_canonical_querystr(http_request)
# 4. CanonicalHeaders
headers_to_sign = self._get_headers_to_sign(http_request)
c_headers = self._get_canonical_headers(headers_to_sign)
# 5. SignedHeaders
s_headers = self._get_signed_headers(headers_to_sign)
return '\n'.join((method, c_uri, c_querystr, c_headers, s_headers))
def _get_canonical_uri(self, http_request):
return http_request.path or '/'
def _get_canonical_querystr(self, http_request):
params = []
for key, val in http_request.params.iteritems():
params.append(urllib.quote(param, safe='/~') + '=' +
urllib.quote(str(val), safe='/~'))
return '&'.join(sorted(params))
def _get_headers_to_sign(self, http_request):
headers = {'Host': http_request.host}
for key, val in http_request.headers.iteritems():
if key.lower() != 'authorization':
headers[key] = val
return headers
def _get_canonical_headers(self, headers):
header_strs = [key.lower().strip() + ':' + val.strip()
for key, val in headers.iteritems()]
return '\n'.join(sorted(header_strs))
def _get_signed_headers(self, headers):
return ';'.join(sorted(header.lower().strip() for header in headers))
|
Python
| 0.000005
|
@@ -6686,17 +6686,16 @@
, safe='
-/
~'))%0A
|
f78e7eac7e604d9206372afa86a2fc96a049e7e6
|
PATCH bugfix
|
eve/methods/patch.py
|
eve/methods/patch.py
|
# -*- coding: utf-8 -*-
"""
eve.methods.patch
~~~~~~~~~~~~~~~~~
This module imlements the PATCH method.
:copyright: (c) 2014 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
from flask import current_app as app, abort
from werkzeug import exceptions
from datetime import datetime
from eve.utils import document_etag, document_link, config, debug_error_message
from eve.auth import requires_auth
from eve.validation import ValidationError
from eve.methods.common import get_document, parse, payload as payload_, \
ratelimit, pre_event, resolve_media_files
from eve.versioning import resolve_document_version, \
insert_versioning_documents
@ratelimit()
@requires_auth('item')
@pre_event
def patch(resource, **lookup):
""" Perform a document patch/update. Updates are first validated against
the resource schema. If validation passes, the document is updated and
an OK status update is returned. If validation fails, a set of validation
issues is returned.
:param resource: the name of the resource to which the document belongs.
:param **lookup: document lookup query.
.. versionchanged:: 0.4
'on_update' raised before performing the update on the database.
Support for document versioning.
'on_updated' raised after performing the update on the database.
.. versionchanged:: 0.3
Support for media fields.
When IF_MATCH is disabled, no etag is included in the payload.
Support for new validation format introduced with Cerberus v0.5.
.. versionchanged:: 0.2
Use the new STATUS setting.
Use the new ISSUES setting.
Raise 'on_pre_<method>' event.
.. versionchanged:: 0.1.1
Item-identifier wrapper stripped from both request and response payload.
.. versionchanged:: 0.1.0
Support for optional HATEOAS.
Re-raises `exceptions.Unauthorized`, this could occur if the
`auth_field` condition fails
.. versionchanged:: 0.0.9
More informative error messages.
Support for Python 3.3.
.. versionchanged:: 0.0.8
Let ``werkzeug.exceptions.InternalServerError`` go through as they have
probably been explicitly raised by the data driver.
.. versionchanged:: 0.0.7
Support for Rate-Limiting.
.. versionchanged:: 0.0.6
ETag is now computed without the need of an additional db lookup
.. versionchanged:: 0.0.5
Support for 'aplication/json' Content-Type.
.. versionchanged:: 0.0.4
Added the ``requires_auth`` decorator.
.. versionchanged:: 0.0.3
JSON links. Superflous ``response`` container removed.
"""
payload = payload_()
original = get_document(resource, **lookup)
if not original:
# not found
abort(404)
resource_def = app.config['DOMAIN'][resource]
schema = resource_def['schema']
validator = app.validator(schema, resource)
object_id = original[config.ID_FIELD]
last_modified = None
etag = None
issues = {}
response = {}
try:
updates = parse(payload, resource)
validation = validator.validate_update(updates, object_id)
if validation:
resolve_media_files(updates, resource, original)
resolve_document_version(updates, resource, 'PATCH', original)
# some datetime precision magic
updates[config.LAST_UPDATED] = \
datetime.utcnow().replace(microsecond=0)
# the mongo driver has a different precision than the python
# datetime. since we don't want to reload the document once it has
# been updated, and we still have to provide an updated etag,
# we're going to update the local version of the 'original'
# document, and we will use it for the etag computation.
updated = original.copy()
updated.update(updates)
# notify callbacks
getattr(app, "on_update")(resource, updates, original)
getattr(app, "on_update_%s" % resource)(updates, original)
app.data.update(resource, object_id, updates)
insert_versioning_documents(resource, object_id, updated)
# nofity callbacks
getattr(app, "on_updated")(resource, updates, original)
getattr(app, "on_updated_%s" % resource)(updates, original)
response[config.ID_FIELD] = updated[config.ID_FIELD]
last_modified = response[config.LAST_UPDATED] = \
updated[config.LAST_UPDATED]
# metadata
if config.IF_MATCH:
etag = response[config.ETAG] = document_etag(updated)
if resource_def['hateoas']:
response[config.LINKS] = {
'self': document_link(resource, updated[config.ID_FIELD])
}
if resource_def['versioning'] is True:
resolve_document_version(updated, resource, 'GET')
response[config.VERSION] = updated[config.VERSION]
response[config.LATEST_VERSION] = \
updated[config.LATEST_VERSION]
else:
issues = validator.errors
except ValidationError as e:
# TODO should probably log the error and abort 400 instead (when we
# got logging)
issues['validator exception'] = str(e)
except (exceptions.InternalServerError, exceptions.Unauthorized) as e:
raise e
except Exception as e:
# consider all other exceptions as Bad Requests
abort(400, description=debug_error_message(
'An exception occurred: %s' % e
))
if len(issues):
response[config.ISSUES] = issues
response[config.STATUS] = config.STATUS_ERR
else:
response[config.STATUS] = config.STATUS_OK
return response, last_modified, etag, 200
|
Python
| 0.000001
|
@@ -3896,44 +3896,8 @@
py()
-%0A updated.update(updates)
%0A%0A
@@ -4056,32 +4056,69 @@
tes, original)%0A%0A
+ updated.update(updates)%0A%0A
app.
|
46310abfba94fbdc1ecb402de2d2991baaa2765b
|
fix comments typo
|
genomat_func/__main__.py
|
genomat_func/__main__.py
|
# -*- coding: utf-8 -*-
#########################
# __MAIN__ #
#########################
"""
Pure functionnal incomplete implementation of PRJ project.
Gene KO and mutations are not implemented.
Computation of viability for a genome is not really implemented, but works…
Its just for playing and teaching.
Real project is implemented as genomat module.
"""
#########################
# IMPORTS #
#########################
from functools import partial
from itertools import product
from numpy import matrix, array
import random
#########################
# PRE-DECLARATIONS #
#########################
NB_PARENTS = 2
DEFAULT_GENE_NUMBER = 5
DEFAULT_GENE_VALUE = partial(random.randint, 0, 9)
DEFAULT_PHENOTYPE = matrix([[1] for _ in range(DEFAULT_GENE_NUMBER)])
def DEFAULT_THRESHOLDING(phenotype):
"""
Threshold function for a phenotype
for each gene, >0 become 1 and <0 become -1
"""
# verification, sign declaration, return thresholded phenotype
assert(phenotype.shape == (DEFAULT_GENE_NUMBER, 1))
def sign(x): return (1 if x > 0 else (-1 if x < 0 else 0))
return matrix([[sign(gene_expr)] for gene_expr in phenotype])
#########################
# INIT FUNCTIONS #
#########################
def init_genome(size=DEFAULT_GENE_NUMBER, random_value=DEFAULT_GENE_VALUE):
"""
Return genome, define as dict that link a gene id A and another gene id B to interaction of
gene A on gene B.
So:
2 3
1 0
Can be translated as :
gene 0 promotes itself by factor 2
gene 0 promotes gene 1 by factor 3
gene 1 promotes gene 0 by factor 1
gene 1 promotes itself by factor 0
And can be readed in returned dict as :
assert(genome[0, 0] == 2)
assert(genome[0, 1] == 3)
assert(genome[1, 0] == 1)
assert(genome[1, 1] == 0)
"""
return matrix([[random_value() for _ in range(size)] for _ in range(size)])
def init_population(size, is_viable, new_indiv=init_genome):
pop = []
while len(pop) < size:
indiv = new_indiv()
pop.append(indiv) if is_viable(indiv) else None
return pop
#########################
# PRINT FUNCTIONS #
#########################
def prettyfied_genome(genome, size=DEFAULT_GENE_NUMBER):
"""Return well vizualisation of genome as string"""
return str(genome)
def prettyfied_population(pop, genome_size=DEFAULT_GENE_NUMBER):
return ('\nPOPULATION:\n'
+ '\n'.join([prettyfied_genome(ind, genome_size) for ind in pop]))
#########################
# STEPS FUNCTIONS #
#########################
def genome_from(parents, size=DEFAULT_GENE_NUMBER):
"""
Return a new genome, create by cross received genomes.
Each line of new genome is randomly choosed from
one parent.
"""
new_indiv = []
for gene in range(size):
# for each row choose a parent
parent = array(random.choice(parents))
new_indiv.append(parent[gene])
return matrix(new_indiv)
def next_population(pop, is_viable, size=None):
"""Return new population, derived from given one"""
size = len(pop) if size is None else size
new_pop = []
while len(new_pop) < size:
new_indiv = genome_from(random.sample(pop, NB_PARENTS))
new_pop.append(new_indiv) if is_viable(new_indiv) else None
return new_pop
def genome_is_viable(genome, initial_phenotype, thresholding=DEFAULT_THRESHOLDING, size=DEFAULT_GENE_NUMBER):
"""
Genome is viable if its stabilize itself.
Stabilization of a genome is verified if thresholding of multiplication of initial_phenotype and genome is
equal to initial phenotype.
Not really implemented.
"""
return random.randint(0, 1) == 0
#########################
# MAIN FUNCTION #
#########################
if __name__ is '__main__':
genome_is_viable = partial(genome_is_viable,
initial_phenotype=DEFAULT_PHENOTYPE,
thresholding=DEFAULT_THRESHOLDING
)
p = init_population(10, genome_is_viable)
for _ in range(10):
p = next_population(p, genome_is_viable)
print(prettyfied_population(p))
|
Python
| 0.000071
|
@@ -920,16 +920,17 @@
0 become
+s
1 and %3C
@@ -937,16 +937,17 @@
0 become
+s
-1%0A
@@ -1410,16 +1410,17 @@
hat link
+s
a gene
@@ -1463,17 +1463,16 @@
ction of
-
%0A gen
@@ -2502,17 +2502,16 @@
TION:%5Cn'
-
%0A
@@ -2835,17 +2835,16 @@
sed from
-
%0A one
@@ -3951,17 +3951,16 @@
_viable,
-
%0A
@@ -4019,17 +4019,16 @@
ENOTYPE,
-
%0A
|
70723aff7a947d00f4f5632cfd7d1fe4d52c79be
|
Remove the friend from the mapping dict.
|
shell/view/frame/friendstray.py
|
shell/view/frame/friendstray.py
|
# Copyright (C) 2006-2007 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import hippo
from sugar.presence import presenceservice
from sugar.graphics.tray import VTray, TrayIcon
from view.BuddyMenu import BuddyMenu
from view.frame.frameinvoker import FrameWidgetInvoker
from model.BuddyModel import BuddyModel
class FriendIcon(TrayIcon):
def __init__(self, shell, buddy):
TrayIcon.__init__(self, icon_name='computer-xo',
xo_color=buddy.get_color())
palette = BuddyMenu(shell, buddy)
self.set_palette(palette)
palette.set_group_id('frame')
palette.props.invoker = FrameWidgetInvoker(self)
class FriendsTray(VTray):
def __init__(self, shell):
VTray.__init__(self)
self._shell = shell
self._activity_ps = None
self._joined_hid = -1
self._left_hid = -1
self._buddies = {}
self._pservice = presenceservice.get_instance()
self._pservice.connect('activity-appeared',
self.__activity_appeared_cb)
# Add initial activities the PS knows about
self._pservice.get_activities_async(reply_handler=self._get_activities_cb)
home_model = shell.get_model().get_home()
home_model.connect('active-activity-changed',
self._active_activity_changed_cb)
def _get_activities_cb(self, list):
for activity in list:
self.__activity_appeared_cb(self._pservice, activity)
def add_buddy(self, buddy):
if self._buddies.has_key(buddy.props.key):
return
model = BuddyModel(buddy=buddy)
icon = FriendIcon(self._shell, model)
self.add_item(icon)
icon.show()
self._buddies[buddy.props.key] = icon
def remove_buddy(self, buddy):
if not self._buddies.has_key(buddy.props.key):
return
self.remove_item(self._buddies[buddy.props.key])
def clear(self):
for item in self.get_children():
self.remove_item(item)
self._buddies = {}
def __activity_appeared_cb(self, pservice, activity_ps):
activity = self._shell.get_current_activity()
if activity and activity_ps.props.id == activity.get_id():
self._set_activity_ps(activity_ps)
def _set_activity_ps(self, activity_ps):
if self._activity_ps == activity_ps:
return
if self._joined_hid > 0:
self._activity_ps.disconnect(self._joined_hid)
self._joined_hid = -1
if self._left_hid > 0:
self._activity_ps.disconnect(self._left_hid)
self._left_hid = -1
self._activity_ps = activity_ps
self.clear()
if activity_ps != None:
for buddy in activity_ps.get_joined_buddies():
self.add_buddy(buddy)
self._joined_hid = activity_ps.connect(
'buddy-joined', self.__buddy_joined_cb)
self._left_hid = activity_ps.connect(
'buddy-left', self.__buddy_left_cb)
def _active_activity_changed_cb(self, home_model, home_activity):
if not home_activity:
self._set_activity_ps(None)
return
activity_id = home_activity.get_activity_id()
if not activity_id:
self._set_activity_ps(None)
return
# HACK to suppress warning in logs when activity isn't found
# (if it's locally launched and not shared yet)
activity = None
for act in self._pservice.get_activities():
if activity_id == act.props.id:
activity = act
break
if activity:
self._set_activity_ps(activity)
else:
self._set_activity_ps(None)
def __buddy_joined_cb(self, activity, buddy):
self.add_buddy(buddy)
def __buddy_left_cb(self, activity, buddy):
self.remove_buddy(buddy)
|
Python
| 0
|
@@ -2621,16 +2621,59 @@
ps.key%5D)
+%0A del self._buddies%5Bbuddy.props.key%5D
%0A%0A de
|
829d68f842c5076be7a8b2c3963c032977fe2f47
|
Bump to 4.4-dp2.
|
pebble_tool/version.py
|
pebble_tool/version.py
|
version_base = (4, 4, 0)
version_suffix = 'dp1'
if version_suffix is None:
__version_info__ = version_base
else:
__version_info__ = version_base + (version_suffix,)
__version__ = '{}.{}'.format(*version_base)
if version_base[2] != 0:
__version__ += '.{}'.format(version_base[2])
if version_suffix is not None:
__version__ += '-{}'.format(version_suffix)
|
Python
| 0.000001
|
@@ -42,9 +42,9 @@
'dp
-1
+2
'%0A%0Ai
|
bc91c7abdc5754917642614930ad24d5db169c9a
|
simplify settings
|
shotglass/shotglass/settings.py
|
shotglass/shotglass/settings.py
|
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'qjg2s4mxb$mmv0e222yg7ot2obfg8rh+u7s@7l!fwv1@r*fa_n'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
# 'source',
'django_extensions',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'shotglass.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'shotglass.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(asctime)s %(levelname)s %(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'INFO',
},
},
}
|
Python
| 0.00049
|
@@ -1,428 +1,13 @@
-# Build paths inside the project like this: os.path.join(BASE_DIR, ...)%0Aimport os%0A%0ABASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))%0A%0A%0A# Quick-start development settings - unsuitable for production%0A# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/%0A%0A# SECURITY WARNING: keep the secret key used in production secret!%0ASECRET_KEY = 'qjg2s4mxb$mmv0e222yg7ot2obfg8rh+u7s@7l!fwv1@r*fa_n'
+import os
%0A%0A#
@@ -88,28 +88,8 @@
ue%0A%0A
-ALLOWED_HOSTS = %5B%5D%0A%0A
%0A# A
@@ -332,24 +332,8 @@
p',%0A
- # 'source',%0A
@@ -852,40 +852,8 @@
%0A)%0A%0A
-ROOT_URLCONF = 'shotglass.urls'%0A
%0ATEM
@@ -1373,378 +1373,221 @@
%0A%5D%0A%0A
-WSGI_APPLICATION = 'shotglass.wsgi.application'%0A%0A%0A# Database%0A# https://docs.djangoproject.com/en/1.8/ref/settings/#databases%0A%0ADATABASES = %7B%0A 'default': %7B%0A 'ENGINE': 'django.db.backends.sqlite3',%0A 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),%0A %7D%0A%7D%0A%0A%0A# Internationalization%0A# https://docs.djangoproject.com/en/1.8/topics/i18n/%0A%0ALANGUAGE_CODE = 'en-us'%0A
+%0ABASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))%0A%0AALLOWED_HOSTS = %5B%5D%0ALANGUAGE_CODE = 'en-us'%0AROOT_URLCONF = 'shotglass.urls'%0ASECRET_KEY = 'qjg2s4mxb$mmv0e222yg7ot2obfg8rh+u7s@7l!fwv1@r*fa_n'
%0ATIM
@@ -1601,17 +1601,16 @@
= 'UTC'%0A
-%0A
USE_I18N
@@ -1613,25 +1613,24 @@
I18N = True%0A
-%0A
USE_L10N = T
@@ -1633,17 +1633,16 @@
= True%0A
-%0A
USE_TZ =
@@ -1651,135 +1651,222 @@
rue%0A
-%0A%0A# Static files (CSS, JavaScript, Images)%0A# https://docs.djangoproject.com/en/1.8/howto/static-files/%0A%0ASTATIC_URL = '/static/'
+WSGI_APPLICATION = 'shotglass.wsgi.application'%0A%0ASTATIC_URL = '/static/'%0A%0ADATABASES = %7B%0A 'default': %7B%0A 'ENGINE': 'django.db.backends.sqlite3',%0A 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),%0A %7D%0A%7D
%0A%0ALO
|
752132f83cacb15273625f819eed1dab1d558e97
|
Make sure all relevant fields are shown in the admin interface
|
dictionary/admin.py
|
dictionary/admin.py
|
from daisyproducer.dictionary.models import Word
from django.contrib import admin
class WordAdmin(admin.ModelAdmin):
list_display = ('untranslated', 'grade1', 'grade2', 'type', 'isConfirmed')
ordering = ('untranslated',)
search_fields = ('untranslated',)
admin.site.register(Word, WordAdmin)
|
Python
| 0
|
@@ -188,16 +188,27 @@
nfirmed'
+, 'isLocal'
)%0A or
|
e1a5c622287f889ca3f38cffba391528e2fce5ba
|
Version 0.1.5
|
gevent_tasks/__init__.py
|
gevent_tasks/__init__.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# >>
# gevent-tasks, 2017
# <<
__author__ = 'Blake VandeMerwe'
__version__ = '0.1.4'
__license__ = 'MIT'
__contact__ = 'blakev@null.net'
__url__ = 'https://github.com/blakev/gevent-tasks'
from gevent_tasks.manager import TaskManager
from gevent_tasks.tasks import *
__all__ = [
'Task',
'TaskManager',
'TaskPool',
'cron'
]
|
Python
| 0.000001
|
@@ -131,9 +131,9 @@
0.1.
-4
+5
'%0A__
|
e60efc7bebbea465154feb26879b27830e0e4a11
|
Update docstrings.
|
dqutils/database/field.py
|
dqutils/database/field.py
|
# -*- coding: utf-8 -*-
"""dqutils.database.field -- Define field classes.
"""
from abc import ABCMeta
from abc import abstractmethod
from dqutils.bit import get_bits
from dqutils.bit import get_int
class AbstractField(metaclass=ABCMeta):
"""This class represents a member data or field in a data structure.
Attributes:
name (string): The name of this member data or field.
offset (int): The offset from the base alignment (in bytes).
mask (int): A mask value for a bit field.
format (string): A `sprintf` style formatting string.
"""
def __init__(self, name, **kwargs):
"""The constructor.
Args:
name (string): The name of the member data or field.
**kwargs: Arbitrary keyword arguments.
"""
self.name = name
self.offset = kwargs['offset']
self.mask = kwargs.get('mask')
self.format = kwargs.get('format', self._do_get_format())
def __str__(self):
return '{0:02X} {1}'.format(self.offset, self.name)
@classmethod
@abstractmethod
def field_type(cls):
"""Return the type name of field."""
pass
@abstractmethod
def _do_get_format(self):
"""Return the format string."""
pass
@abstractmethod
def _do_get_value(self, byte_string):
"""Return the value passed to the format.
Args:
byte_string (bytes): An instance of class bytes.
Returns:
(object): A numeric value.
"""
pass
def process(self, byte_string):
"""Process bytes and return a text.
Args:
byte_string (bytes): An instance of class bytes.
Returns:
(string): A formatted value.
"""
return self.format % self._do_get_value(byte_string)
def title(self):
"""Return the name of this member or field.
Returns:
(string): The name of this instance.
"""
return self.name
class BitField(AbstractField):
"""This class represents a bit-field member data."""
@classmethod
def field_type(cls):
"""Return the type name of field."""
return 'bits'
def _do_get_format(self):
self.format = '%X'
def _do_get_value(self, byte_string):
return get_bits(byte_string, self.offset, self.mask)
class ByteField(AbstractField):
"""This class represents a byte member data."""
@classmethod
def field_type(cls):
"""Return the type name of field."""
return '1byte'
def _do_get_format(self):
self.format = '%02X'
def _do_get_value(self, byte_string):
return get_int(byte_string, self.offset, 1)
class WordField(AbstractField):
"""This class represents a 2-byte member data."""
@classmethod
def field_type(cls):
"""Return the type name of field."""
return '2byte'
def _do_get_format(self):
self.format = '%04X'
def _do_get_value(self, byte_string):
return get_int(byte_string, self.offset, 2)
class LongField(AbstractField):
"""This class represents a 3-byte member data."""
@classmethod
def field_type(cls):
"""Return the type name of field."""
return '3byte'
def _do_get_format(self):
self.format = '%06X'
def _do_get_value(self, byte_string):
return get_int(byte_string, self.offset, 3)
class BadFieldType(TypeError):
"""An exception type for an unknown field."""
def __init__(self, bad_type):
super().__init__()
self.bad_type = bad_type
def __str__(self):
return "no such field type: {0}".format(self.bad_type)
_field_aliases = {
'bit': 'bits',
'byte': '1byte',
'bytes': '1byte',
'word': '2byte',
'long': '3byte',
'address': '3byte'}
def make_field(name, field_type, **kwargs):
"""Make an instance of field class.
Args:
name (string): The name of this member data or field.
field_type (string): The type name of field.
**kwargs: Keyword arguments passed to the constructor of field class.
Returns:
(AbstractField): A field instance.
Examples:
>>> params = dict(offset=0x12, mask=0xFFF8, format='%d')
>>> gold = make_field('Gold', 'bits', **params)
>>> isinstance(gold, BitField)
True
"""
field_type = field_type.lower()
if field_type in _field_aliases:
field_type = _field_aliases[field_type]
for cls in AbstractField.__subclasses__():
if cls.field_type() == field_type:
return cls(name, **kwargs)
raise BadFieldType(field_type)
|
Python
| 0
|
@@ -428,23 +428,37 @@
he b
-ase alignment (
+eginning of the array (given
in b
@@ -495,19 +495,26 @@
k value
-for
+if this is
a bit f
@@ -557,25 +557,15 @@
ntf%60
- style
format
-ting
str
@@ -1736,21 +1736,22 @@
rmatted
-value
+string
.%0A
|
4abea7bafa80547548488cf5445b42b3fccf7939
|
Version bump for development
|
discord/__init__.py
|
discord/__init__.py
|
"""
Discord API Wrapper
~~~~~~~~~~~~~~~~~~~
A basic wrapper for the Discord API.
:copyright: (c) 2015-present Rapptz
:license: MIT, see LICENSE for more details.
"""
__title__ = 'discord'
__author__ = 'Rapptz'
__license__ = 'MIT'
__copyright__ = 'Copyright 2015-present Rapptz'
__version__ = '2.0.0'
__path__ = __import__('pkgutil').extend_path(__path__, __name__)
import logging
from typing import NamedTuple, Literal
from .client import *
from .appinfo import *
from .user import *
from .emoji import *
from .partial_emoji import *
from .activity import *
from .channel import *
from .guild import *
from .flags import *
from .member import *
from .message import *
from .asset import *
from .errors import *
from .permissions import *
from .role import *
from .file import *
from .colour import *
from .integrations import *
from .invite import *
from .template import *
from .welcome_screen import *
from .widget import *
from .object import *
from .reaction import *
from . import (
utils as utils,
opus as opus,
abc as abc,
ui as ui,
app_commands as app_commands,
)
from .enums import *
from .embeds import *
from .mentions import *
from .shard import *
from .player import *
from .webhook import *
from .voice_client import *
from .audit_logs import *
from .raw_models import *
from .team import *
from .sticker import *
from .stage_instance import *
from .scheduled_event import *
from .interactions import *
from .components import *
from .threads import *
from .automod import *
class VersionInfo(NamedTuple):
major: int
minor: int
micro: int
releaselevel: Literal["alpha", "beta", "candidate", "final"]
serial: int
version_info: VersionInfo = VersionInfo(major=2, minor=0, micro=0, releaselevel='final', serial=0)
logging.getLogger(__name__).addHandler(logging.NullHandler())
del logging, NamedTuple, Literal, VersionInfo
|
Python
| 0
|
@@ -296,11 +296,12 @@
'2.
-0
+1
.0
+a
'%0A%0A_
@@ -1726,17 +1726,17 @@
, minor=
-0
+1
, micro=
@@ -1752,21 +1752,21 @@
elevel='
-final
+alpha
', seria
|
bda269c5b745703cf517222e004caf0233b40699
|
refactor p4io to io
|
tests/test_get_data.py
|
tests/test_get_data.py
|
from planet4 import p4io
import datetime as dt
def test_get_numbers_from_date_from_fname():
fname1 = '/a/b/c/2014-06-02_some_name.h5'
assert p4io.split_date_from_fname(fname1) == [2014, 6, 2]
def test_get_datetime_object_from_fname():
fname1 = '/a/b/c/2014-06-02_some_name.h5'
dt_obj = dt.datetime(2014, 6, 2)
assert dt_obj == p4io.get_dt_from_fname(fname1)
def test_from_2_files_get_latest_file(monkeypatch):
import glob
fname1 = '/a/b/c/2014-06-02_some_name.h5'
fname2 = '/a/b/c/2014-06-09_some_name.h5'
def mockreturn(path):
return [fname1, fname2]
monkeypatch.setattr(glob, 'glob', mockreturn)
x = p4io.get_current_database_fname()
assert x == fname2
|
Python
| 0.999999
|
@@ -17,13 +17,11 @@
ort
-p4
io%0A
+
impo
@@ -142,18 +142,16 @@
assert
-p4
io.split
@@ -340,18 +340,16 @@
_obj ==
-p4
io.get_d
@@ -645,16 +645,16 @@
rn)%0A
+
x =
p4io
@@ -653,10 +653,8 @@
x =
-p4
io.g
|
1a8ddc44a026919e404c23fe7f2d7b1ee8bac63e
|
Implement unit tests for write invalid type jsondict issue.
|
tests/test_jsondict.py
|
tests/test_jsondict.py
|
# Copyright (c) 2017 The Regents of the University of Michigan
# All rights reserved.
# This software is licensed under the BSD 3-Clause License.
import os
import unittest
import uuid
from signac.core.jsondict import JSONDict
from signac.common import six
if six.PY2:
from tempdir import TemporaryDirectory
else:
from tempfile import TemporaryDirectory
FN_DICT = 'jsondict.json'
def testdata():
return str(uuid.uuid4())
class BaseJSONDictTest(unittest.TestCase):
def setUp(self):
self._tmp_dir = TemporaryDirectory(prefix='jsondict_')
self._fn_dict = os.path.join(self._tmp_dir.name, FN_DICT)
self.addCleanup(self._tmp_dir.cleanup)
class JSONDictTest(BaseJSONDictTest):
def get_json_dict(self):
return JSONDict(filename=self._fn_dict)
def get_testdata(self):
return str(uuid.uuid4())
def test_init(self):
self.get_json_dict()
def test_set_get(self):
jsd = self.get_json_dict()
key = 'setget'
d = self.get_testdata()
jsd.clear()
self.assertFalse(bool(jsd))
self.assertEqual(len(jsd), 0)
self.assertNotIn(key, jsd)
self.assertFalse(key in jsd)
jsd[key] = d
self.assertTrue(bool(jsd))
self.assertEqual(len(jsd), 1)
self.assertIn(key, jsd)
self.assertTrue(key in jsd)
self.assertEqual(jsd[key], d)
self.assertEqual(jsd.get(key), d)
def test_set_get_explicit_nested(self):
jsd = self.get_json_dict()
key = 'setgetexplicitnested'
d = self.get_testdata()
jsd.setdefault('a', dict())
child1 = jsd['a']
child2 = jsd['a']
self.assertEqual(child1, child2)
self.assertEqual(type(child1), type(child2))
self.assertEqual(child1._parent, child2._parent)
self.assertEqual(id(child1._parent), id(child2._parent))
self.assertEqual(id(child1), id(child2))
self.assertFalse(child1)
self.assertFalse(child2)
child1[key] = d
self.assertTrue(child1)
self.assertTrue(child2)
self.assertIn(key, child1)
self.assertIn(key, child2)
self.assertEqual(child1, child2)
self.assertEqual(child1[key], d)
self.assertEqual(child2[key], d)
def test_copy_value(self):
jsd = self.get_json_dict()
key = 'copy_value'
key2 = 'copy_value2'
d = self.get_testdata()
self.assertNotIn(key, jsd)
self.assertNotIn(key2, jsd)
jsd[key] = d
self.assertIn(key, jsd)
self.assertEqual(jsd[key], d)
self.assertNotIn(key2, jsd)
jsd[key2] = jsd[key]
self.assertIn(key, jsd)
self.assertEqual(jsd[key], d)
self.assertIn(key2, jsd)
self.assertEqual(jsd[key2], d)
def test_iter(self):
jsd = self.get_json_dict()
key1 = 'iter1'
key2 = 'iter2'
d1 = self.get_testdata()
d2 = self.get_testdata()
d = {key1: d1, key2: d2}
jsd.update(d)
self.assertIn(key1, jsd)
self.assertIn(key2, jsd)
for i, key in enumerate(jsd):
self.assertIn(key, d)
self.assertEqual(d[key], jsd[key])
self.assertEqual(i, 1)
def test_delete(self):
jsd = self.get_json_dict()
key = 'delete'
d = self.get_testdata()
jsd[key] = d
self.assertEqual(len(jsd), 1)
self.assertEqual(jsd[key], d)
del jsd[key]
self.assertEqual(len(jsd), 0)
with self.assertRaises(KeyError):
jsd[key]
def test_update(self):
jsd = self.get_json_dict()
key = 'update'
d = {key: self.get_testdata()}
jsd.update(d)
self.assertEqual(len(jsd), 1)
self.assertEqual(jsd[key], d[key])
def test_clear(self):
jsd = self.get_json_dict()
key = 'clear'
d = self.get_testdata()
jsd[key] = d
self.assertEqual(len(jsd), 1)
self.assertEqual(jsd[key], d)
jsd.clear()
self.assertEqual(len(jsd), 0)
def test_reopen(self):
jsd = self.get_json_dict()
key = 'reopen'
d = self.get_testdata()
jsd[key] = d
jsd.save()
del jsd # possibly unsafe
jsd2 = self.get_json_dict()
jsd2.load()
self.assertEqual(len(jsd2), 1)
self.assertEqual(jsd2[key], d)
def test_copy_as_dict(self):
jsd = self.get_json_dict()
key = 'copy'
d = self.get_testdata()
jsd[key] = d
copy = dict(jsd)
del jsd
self.assertTrue(key in copy)
self.assertEqual(copy[key], d)
def test_reopen2(self):
jsd = self.get_json_dict()
key = 'reopen'
d = self.get_testdata()
jsd[key] = d
del jsd # possibly unsafe
jsd2 = self.get_json_dict()
self.assertEqual(len(jsd2), 1)
self.assertEqual(jsd2[key], d)
class JSONDictWriteConcernTest(JSONDictTest):
def get_json_dict(self):
return JSONDict(filename=self._fn_dict, write_concern=True)
class JSONDictNestedDataTest(JSONDictTest):
def get_testdata(self):
return dict(a=super(JSONDictNestedDataTest, self).get_testdata())
class JSONDictNestedDataWriteConcernTest(JSONDictNestedDataTest, JSONDictWriteConcernTest):
pass
if __name__ == '__main__':
unittest.main()
|
Python
| 0
|
@@ -4965,16 +4965,470 @@
y%5D, d)%0A%0A
+ def test_write_invalid_type(self):%0A class Foo(object):%0A pass%0A%0A jsd = self.get_json_dict()%0A key = 'write_invalid_type'%0A d = self.get_testdata()%0A jsd%5Bkey%5D = d%0A self.assertEqual(len(jsd), 1)%0A self.assertEqual(jsd%5Bkey%5D, d)%0A d2 = Foo()%0A with self.assertRaises(TypeError):%0A jsd%5Bkey + '2'%5D = d2%0A self.assertEqual(len(jsd), 1)%0A self.assertEqual(jsd%5Bkey%5D, d)%0A%0A
%0Aclass J
|
9b06a061a4bc439ea96761ead0a1397470cfff56
|
update tests
|
tests/test_labeling.py
|
tests/test_labeling.py
|
from __future__ import print_function
from builtins import zip
from builtins import object
from usaddress import parse, GROUP_LABEL
from parserator.training import readTrainingData
import unittest
class TestSimpleAddresses(object) : # for test generators, must inherit from object
def test_simple_addresses(self):
test_file = 'measure_performance/test_data/simple_address_patterns.xml'
data = list(readTrainingData([test_file], GROUP_LABEL))
for labeled_address in data :
address_text, components = labeled_address
_, labels_true = list(zip(*components))
_, labels_pred = list(zip(*parse(address_text)))
yield equals, address_text, labels_pred, labels_true
class TestSyntheticAddresses(object) :
def test_synthetic_addresses(self):
test_file = 'measure_performance/test_data/synthetic_osm_data.xml'
data = list(readTrainingData([test_file], GROUP_LABEL))
for labeled_address in data :
address_text, components = labeled_address
_, labels_true = list(zip(*components))
_, labels_pred = list(zip(*parse(address_text)))
yield equals, address_text, labels_pred, labels_true
class TestUS50Addresses(object) :
def test_us50(self):
test_file = 'measure_performance/test_data/us50_test_tagged.xml'
data = list(readTrainingData([test_file], GROUP_LABEL))
for labeled_address in data :
address_text, components = labeled_address
_, labels_true = list(zip(*components))
_, labels_pred = list(zip(*parse(address_text)))
yield fuzzyEquals, address_text, labels_pred, labels_true
def equals(addr,
labels_pred,
labels_true) :
prettyPrint(addr, labels_pred, labels_true)
assert labels_pred == labels_true
def fuzzyEquals(addr,
labels_pred,
labels_true) :
labels = []
fuzzy_labels = []
for label in labels_pred:
if label.startswith('StreetName') :
fuzzy_labels.append('StreetName')
elif label.startswith('AddressNumber') :
fuzzy_labels.append('AddressNumber')
else:
fuzzy_labels.append(label)
for label in labels_true:
labels.append(label)
prettyPrint(addr, fuzzy_labels, labels)
assert fuzzy_labels == labels
def prettyPrint(addr, predicted, true) :
print("ADDRESS: ", addr)
print("fuzzy pred: ", predicted)
print("true: ", true)
if __name__== "__main__":
unittest.main()
|
Python
| 0.000001
|
@@ -2193,16 +2193,95 @@
umber')%0A
+ elif label == ('Null') :%0A fuzzy_labels.append('NotAddress')%0A
|
ab574b6c40b6e58f396c9522be864a78478617c1
|
Remove TestMainLoop.test_concurrency
|
tests/test_mainloop.py
|
tests/test_mainloop.py
|
# -*- Mode: Python -*-
import os
import sys
import select
import signal
import time
import unittest
try:
from _thread import start_new_thread
start_new_thread # pyflakes
except ImportError:
# Python 2
from thread import start_new_thread
from gi.repository import GLib
from compathelper import _bytes
class TestMainLoop(unittest.TestCase):
@unittest.skipUnless(hasattr(os, "fork"), "no os.fork available")
def test_exception_handling(self):
pipe_r, pipe_w = os.pipe()
pid = os.fork()
if pid == 0:
os.close(pipe_w)
select.select([pipe_r], [], [])
os.close(pipe_r)
os._exit(1)
def child_died(pid, status, loop):
loop.quit()
raise Exception("deadbabe")
loop = GLib.MainLoop()
GLib.child_watch_add(GLib.PRIORITY_DEFAULT, pid, child_died, loop)
os.close(pipe_r)
os.write(pipe_w, _bytes("Y"))
os.close(pipe_w)
def excepthook(type, value, traceback):
self.assertTrue(type is Exception)
self.assertEqual(value.args[0], "deadbabe")
sys.excepthook = excepthook
try:
got_exception = False
try:
loop.run()
except:
got_exception = True
finally:
sys.excepthook = sys.__excepthook__
#
# The exception should be handled (by printing it)
# immediately on return from child_died() rather
# than here. See bug #303573
#
self.assertFalse(got_exception)
@unittest.skipUnless(hasattr(signal, "SIGUSR1"), "no SIGUSR1")
def test_concurrency(self):
def on_usr1(signum, frame):
pass
try:
# create a thread which will terminate upon SIGUSR1 by way of
# interrupting sleep()
orig_handler = signal.signal(signal.SIGUSR1, on_usr1)
start_new_thread(time.sleep, (10,))
# now create two main loops
loop1 = GLib.MainLoop()
loop2 = GLib.MainLoop()
GLib.timeout_add(100, lambda: os.kill(os.getpid(), signal.SIGUSR1))
GLib.timeout_add(500, loop1.quit)
loop1.run()
loop2.quit()
finally:
signal.signal(signal.SIGUSR1, orig_handler)
@unittest.skipUnless(hasattr(os, "fork"), "no os.fork available")
def test_sigint(self):
pid = os.fork()
if pid == 0:
time.sleep(0.5)
os.kill(os.getppid(), signal.SIGINT)
os._exit(0)
loop = GLib.MainLoop()
try:
loop.run()
self.fail('expected KeyboardInterrupt exception')
except KeyboardInterrupt:
pass
self.assertFalse(loop.is_running())
os.waitpid(pid, 0)
|
Python
| 0.013365
|
@@ -99,162 +99,8 @@
st%0A%0A
-try:%0A from _thread import start_new_thread%0A start_new_thread # pyflakes%0Aexcept ImportError:%0A # Python 2%0A from thread import start_new_thread%0A
from
@@ -1441,759 +1441,8 @@
n)%0A%0A
- @unittest.skipUnless(hasattr(signal, %22SIGUSR1%22), %22no SIGUSR1%22)%0A def test_concurrency(self):%0A def on_usr1(signum, frame):%0A pass%0A%0A try:%0A # create a thread which will terminate upon SIGUSR1 by way of%0A # interrupting sleep()%0A orig_handler = signal.signal(signal.SIGUSR1, on_usr1)%0A start_new_thread(time.sleep, (10,))%0A%0A # now create two main loops%0A loop1 = GLib.MainLoop()%0A loop2 = GLib.MainLoop()%0A GLib.timeout_add(100, lambda: os.kill(os.getpid(), signal.SIGUSR1))%0A GLib.timeout_add(500, loop1.quit)%0A loop1.run()%0A loop2.quit()%0A finally:%0A signal.signal(signal.SIGUSR1, orig_handler)%0A%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.