commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
dc56a80e80ea69aa633505f682bbf8a860a186f1
|
Allow check_oom_events to optionally make tickets. PAASTA-15879
|
paasta_tools/check_oom_events.py
|
paasta_tools/check_oom_events.py
|
#!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import sys
import time
from collections import namedtuple
from random import choice
from pysensu_yelp import Status
from paasta_tools import monitoring_tools
from paasta_tools.cli.utils import get_instance_config
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import get_services_for_cluster
from paasta_tools.utils import load_system_paasta_config
try:
from scribereader import scribereader
except ImportError:
scribereader = None
OOM_EVENTS_STREAM = "tmp_paasta_oom_events"
OOMEvent = namedtuple("OOMEvent", ["hostname", "container_id", "process_name"])
def compose_check_name_for_service_instance(check_name, service, instance):
return f"{check_name}.{service}.{instance}"
def parse_args(args):
parser = argparse.ArgumentParser(
description=(
"Check the %s stream and report to Sensu if"
" there are any OOM events." % OOM_EVENTS_STREAM
)
)
parser.add_argument(
"-d",
"--soa-dir",
dest="soa_dir",
default=DEFAULT_SOA_DIR,
help="define a different soa config directory",
)
parser.add_argument(
"-r",
"--realert-every",
dest="realert_every",
type=int,
default=1,
help="Sensu 'realert_every' to use.",
)
parser.add_argument(
"-s",
"--superregion",
dest="superregion",
required=True,
help="The superregion to read OOM events from.",
)
return parser.parse_args(args)
def read_oom_events_from_scribe(cluster, superregion, num_lines=1000):
"""Read the latest 'num_lines' lines from OOM_EVENTS_STREAM and iterate over them."""
host_port = choice(scribereader.get_default_scribe_hosts(tail=True))
stream = scribereader.get_stream_tailer(
stream_name=OOM_EVENTS_STREAM,
tailing_host=host_port["host"],
tailing_port=host_port["port"],
use_kafka=True,
lines=num_lines,
superregion=superregion,
)
for line in stream:
try:
j = json.loads(line)
if j.get("cluster", "") == cluster:
yield j
except json.decoder.JSONDecodeError:
pass
def latest_oom_events(cluster, superregion, interval=60):
"""
:returns: {(service, instance): [OOMEvent, OOMEvent,...] }
if the number of events > 0
"""
start_timestamp = int(time.time()) - interval
res = {}
for e in read_oom_events_from_scribe(cluster, superregion):
if e["timestamp"] > start_timestamp:
key = (e["service"], e["instance"])
res.setdefault(key, []).append(
OOMEvent(
hostname=e.get("hostname", ""),
container_id=e.get("container_id", ""),
process_name=e.get("process_name", ""),
)
)
return res
def compose_sensu_status(instance, oom_events, is_check_enabled):
"""
:param instance: InstanceConfig
:param oom_events: a list of OOMEvents
:param is_check_enabled: boolean to indicate whether the check enabled for the instance
"""
if not is_check_enabled:
return (
Status.OK,
"This check is disabled for {}.{}.".format(
instance.service, instance.instance
),
)
if len(oom_events) == 0:
return (
Status.OK,
"No oom events for %s.%s in the last minute."
% (instance.service, instance.instance),
)
else:
return (
Status.CRITICAL,
"The Out Of Memory killer killed %d processes (%s) "
"in the last minute in %s.%s containers."
% (
len(oom_events),
",".join(
sorted({e.process_name for e in oom_events if e.process_name})
),
instance.service,
instance.instance,
),
)
def send_sensu_event(instance, oom_events, args):
"""
:param instance: InstanceConfig
:param oom_events: a list of OOMEvents
"""
check_name = compose_check_name_for_service_instance(
"oom-killer", instance.service, instance.instance
)
monitoring_overrides = instance.get_monitoring()
status = compose_sensu_status(
instance=instance,
oom_events=oom_events,
is_check_enabled=monitoring_overrides.get("check_oom_events", True),
)
monitoring_overrides.update(
{
"page": False,
"ticket": False,
"alert_after": "0m",
"realert_every": args.realert_every,
"runbook": "y/check-oom-events",
"tip": "Try bumping the memory limit past %dMB" % instance.get_mem(),
}
)
return monitoring_tools.send_event(
service=instance.service,
check_name=check_name,
overrides=monitoring_overrides,
status=status[0],
output=status[1],
soa_dir=instance.soa_dir,
)
def main(sys_argv):
args = parse_args(sys_argv[1:])
cluster = load_system_paasta_config().get_cluster()
victims = latest_oom_events(cluster, args.superregion)
for (service, instance) in get_services_for_cluster(cluster, soa_dir=args.soa_dir):
try:
instance_config = get_instance_config(
service=service,
instance=instance,
cluster=cluster,
load_deployments=False,
soa_dir=args.soa_dir,
)
oom_events = victims.get((service, instance), [])
send_sensu_event(instance_config, oom_events, args)
except NotImplementedError: # When instance_type is not supported by get_instance_config
pass
if __name__ == "__main__":
main(sys.argv)
|
Python
| 0
|
@@ -5183,37 +5183,8 @@
se,%0A
- %22ticket%22: False,%0A
|
17f58b0b269dcdbd4c092bbcf1d0fc993f5807d8
|
Use realpath for template name in dependency list
|
yasha/scripts/yasha.py
|
yasha/scripts/yasha.py
|
"""
The MIT License (MIT)
Copyright (c) 2015-2016 Kim Blomqvist
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os, sys
import click
from ..parsers import *
from .. import yasha
def find_variables(template, filext):
return yasha.find_dependencies(template, filext)
def find_extensions(template):
return yasha.find_dependencies(template, [".py", ".j2ext", ".jinja-ext"])
def parse_variables(file, parsers):
if file:
filename, filext = os.path.splitext(file.name)
for parser in parsers:
if filext in parser.file_extension:
return parser.parse(file)
return {}
def load_extensions(file):
def error_handler(e):
msg = e.msg[0].upper() + e.msg[1:]
filename = os.path.relpath(e.filename)
click.echo("Error: Cannot load extensions", nl=False, err=True)
click.echo(": {} ({}, line {})".format(msg, filename, e.lineno), err=True)
raise click.Abort()
try:
from importlib.machinery import SourceFileLoader
module = SourceFileLoader("extensions", file.name).load_module()
except SyntaxError as e:
error_handler(e)
except ImportError:
pass # fallback to Python2
try:
import imp
desc = (".py", "rb", imp.PY_SOURCE)
module = imp.load_module("extensions", file, file.name, desc)
except SyntaxError as e:
error_handler(e)
return module
def parse_extensions(extmodule, extdict):
from jinja2.ext import Extension
import inspect
attrs = [getattr(extmodule, x) for x in dir(extmodule) if not x.startswith("__")]
for x in attrs:
if inspect.isfunction(x):
if x.__name__.startswith("test_"):
extdict["jinja_tests"].append(x)
elif x.__name__.startswith("filter_"):
extdict["jinja_filters"].append(x)
elif x.__name__.startswith("preprocess_"):
extdict["variable_preprocessors"].append(x)
elif inspect.isclass(x):
if issubclass(x, Extension):
extdict["jinja_extensions"].append(x)
elif issubclass(x, Parser):
extdict["variable_parsers"].insert(0, x()) # Prepend custom parser
return extdict
def load_jinja(searchpath, extdict):
from jinja2 import Environment, FileSystemLoader
jinja = Environment(extensions=extdict["jinja_extensions"],
loader=FileSystemLoader(searchpath))
for test in extdict["jinja_tests"]:
jinja.tests[test.__name__.replace("test_", "")] = test
for filt in extdict["jinja_filters"]:
jinja.filters[filt.__name__.replace("filter_", "")] = filt
return jinja
@click.command(context_settings=dict(help_option_names=["-h", "--help"]))
@click.argument("template", type=click.File("rb"))
@click.option("--output", "-o", type=click.File("wt"), help="Explicit output file.")
@click.option("--variables", "-v", type=click.File("rb"), envvar="YASHA_VARIABLES", help="Explicit template variables file.")
@click.option("--extensions", "-e", type=click.File("rb"), envvar="YASHA_EXTENSIONS", help="Explicit custom Jinja extensions file.")
@click.option("--no-variables", is_flag=True, help="Omit template variables.")
@click.option("--no-extensions", is_flag=True, help="Omit Jinja extensions.")
@click.option("--trim", is_flag=True, help="Strips extra whitespace. Spares the single empty lines, though.")
@click.option("-MD", is_flag=True, help="Creates Makefile compatible .d file alongside rendering. Not active with stdin.")
def cli(template, output, variables, extensions, no_variables, no_extensions, trim, md):
"""This script reads the given Jinja template and renders its content
into new file, which name is derived from the given template name. For
example the rendered foo.c.jinja template will be written into foo.c if
not explicitly specified."""
t_realpath = os.path.realpath(template.name)
t_basename = os.path.basename(t_realpath)
t_dirname = os.path.dirname(t_realpath)
vardict = {
}
extdict = {
"jinja_tests": [],
"jinja_filters": [],
"jinja_extensions": [],
"variable_parsers": [TomlParser(), YamlParser(), SvdParser()],
"variable_preprocessors": [],
}
if not extensions and not no_extensions:
extpath = find_extensions(template.name)
extensions = click.open_file(extpath, "rb") if extpath else None
if extensions and not no_extensions:
extmodule = load_extensions(extensions)
extdict = parse_extensions(extmodule, extdict)
if not variables and not no_variables:
filext = [p.file_extension for p in extdict["variable_parsers"]]
varpath = find_variables(template.name, sum(filext, []))
variables = click.open_file(varpath, "rb") if varpath else None
if variables and not no_variables:
vardict = parse_variables(variables, extdict["variable_parsers"])
for preprocessor in extdict["variable_preprocessors"]:
vardict = preprocessor(vardict)
jinja = load_jinja(t_dirname, extdict)
if template.name == "<stdin>":
template_string = ""
while True:
chunk = template.read(1024)
if not chunk:
break
template_string += chunk.decode("utf-8")
t = jinja.from_string(template_string)
else:
t = jinja.get_template(t_basename)
if not output:
if template.name == "<stdin>":
output = click.open_file("-", "wt")
else:
o_realpath = os.path.splitext(t_realpath)[0]
output = click.open_file(o_realpath, "wt")
if trim:
prevline = os.linesep
for line in t.render(vardict).splitlines():
line = line.rstrip() + os.linesep
if line == os.linesep and line == prevline:
continue
if sys.version_info[0] < 3:
output.write(line.encode("utf-8"))
else:
output.write(line)
prevline = line
else:
if sys.version_info[0] < 3:
output.write(t.render(vardict).encode("utf-8"))
else:
output.write(t.render(vardict))
if md and not output.name == "<stdout>":
deps = os.path.relpath(output.name) + ": " + template.name + " "
if variables:
deps += os.path.relpath(variables.name) + " "
if extensions:
deps += os.path.relpath(extensions.name)
deps += os.linesep
output_d = click.open_file(output.name + ".d", "wt")
output_d.write(deps)
|
Python
| 0
|
@@ -7286,19 +7286,49 @@
) + %22: %22
- +
+%0A deps += os.path.relpath(
template
@@ -7328,24 +7328,25 @@
emplate.name
+)
+ %22 %22%0A
|
40d59c44f8488ab6445b626637bfb3135cbbfd56
|
Clean up Firefox WebDriver constructor
|
py/selenium/webdriver/firefox/webdriver.py
|
py/selenium/webdriver/firefox/webdriver.py
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
try:
import http.client as http_client
except ImportError:
import httplib as http_client
try:
basestring
except NameError: # Python 3.x
basestring = str
import shutil
import socket
import sys
from .firefox_binary import FirefoxBinary
from .remote_connection import FirefoxRemoteConnection
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.firefox.extension_connection import ExtensionConnection
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from .service import Service
from .options import Options
class WebDriver(RemoteWebDriver):
# There is no native event support on Mac
NATIVE_EVENTS_ALLOWED = sys.platform != "darwin"
def __init__(self, firefox_profile=None, firefox_binary=None, timeout=30,
capabilities=None, proxy=None, executable_path="wires", firefox_options=None):
self.profile = firefox_profile
self.binary = firefox_binary
if firefox_options is None:
if self.profile is None:
self.profile = FirefoxProfile()
self.profile.native_events_enabled = (
self.NATIVE_EVENTS_ALLOWED and self.profile.native_events_enabled)
if capabilities is None:
capabilities = DesiredCapabilities.FIREFOX
if self.binary is None:
self.binary = capabilities.get("binary") or FirefoxBinary()
firefox_options = Options()
firefox_options.binary_location = self.binary if isinstance(self.binary, basestring) else self.binary._get_firefox_start_cmd()
firefox_options.profile = self.profile
if capabilities is None:
capabilities = firefox_options.to_capabilities()
else:
capabilities.update(firefox_options.to_capabilities())
# marionette
if capabilities.get("marionette"):
self.binary = firefox_options.binary_location
if isinstance(firefox_options.binary_location, FirefoxBinary):
self.binary = firefox_options.binary_location._get_firefox_start_cmd()
self.service = Service(executable_path, firefox_binary=self.binary)
self.service.start()
executor = FirefoxRemoteConnection(
remote_server_addr=self.service.service_url)
RemoteWebDriver.__init__(
self,
command_executor=executor,
desired_capabilities=capabilities,
keep_alive=True)
else:
# Oh well... sometimes the old way is the best way.
if proxy is not None:
proxy.add_to_capabilities(capabilities)
if self.binary is None:
self.binary = firefox_options.binary_location or FirefoxBinary()
if self.profile is None:
self.profile = firefox_options.profile or FirefoxProfile()
executor = ExtensionConnection("127.0.0.1", self.profile,
self.binary, timeout)
RemoteWebDriver.__init__(self,
command_executor=executor,
desired_capabilities=capabilities,
keep_alive=True)
self._is_remote = False
def quit(self):
"""Quits the driver and close every associated window."""
try:
RemoteWebDriver.quit(self)
except (http_client.BadStatusLine, socket.error):
# Happens if Firefox shutsdown before we've read the response from
# the socket.
pass
if "specificationLevel" in self.capabilities:
self.service.stop()
else:
self.binary.kill()
try:
shutil.rmtree(self.profile.path)
if self.profile.tempfolder is not None:
shutil.rmtree(self.profile.tempfolder)
except Exception as e:
print(str(e))
@property
def firefox_profile(self):
return self.profile
def set_context(self, context):
self.execute("SET_CONTEXT", {"context": context})
|
Python
| 0.000007
|
@@ -1778,111 +1778,73 @@
-self.profile = firefox_profile%0A self.binary = firefox_binary%0A if firefox_options is None:
+capabilities = capabilities or DesiredCapabilities.FIREFOX.copy()
%0A%0A
@@ -1841,39 +1841,32 @@
copy()%0A%0A
- if
self.profile is
@@ -1866,46 +1866,26 @@
ile
-is None:%0A self.
+= firefox_
profile
= Fi
@@ -1880,17 +1880,18 @@
profile
-=
+or
Firefox
@@ -1892,37 +1892,32 @@
irefoxProfile()%0A
-%0A
self.pro
@@ -1955,28 +1955,24 @@
-
-
self.NATIVE_
@@ -2039,162 +2039,39 @@
- if capabilities is None:%0A capabilities = DesiredCapabilities.FIREFOX%0A%0A if self.binary is None:%0A self.
+self.binary = firefox_
binary
-=
+or
cap
@@ -2096,12 +2096,9 @@
ary%22
-) or
+,
Fir
@@ -2101,32 +2101,33 @@
FirefoxBinary()
+)
%0A%0A fi
@@ -2112,35 +2112,46 @@
ary())%0A%0A
-
+self.options =
firefox_options
@@ -2151,17 +2151,18 @@
options
-=
+or
Options
@@ -2168,36 +2168,29 @@
s()%0A
- firefox_
+self.
options.bina
@@ -2308,20 +2308,13 @@
- firefox_
+self.
opti
@@ -2340,25 +2340,24 @@
profile%0A
-%0A
if capab
@@ -2340,35 +2340,32 @@
profile%0A
-if
capabilities is
@@ -2364,445 +2364,112 @@
ties
- is None:%0A capabilities = firefox_options.to_capabilities()%0A else:%0A capabilities.update(firefox_options.to_capabilities())%0A%0A # marionette%0A if capabilities.get(%22marionette%22):%0A self.binary = firefox_options.binary_location%0A if isinstance(firefox_options.binary_location, FirefoxBinary):%0A self.binary = firefox_options.binary_location._get_firefox_start_cmd()%0A
+.update(self.options.to_capabilities())%0A%0A # marionette%0A if capabilities.get(%22marionette%22):
%0A
@@ -2537,22 +2537,39 @@
ry=self.
-binary
+options.binary_location
)%0A
@@ -3065,239 +3065,8 @@
s)%0A%0A
- if self.binary is None:%0A self.binary = firefox_options.binary_location or FirefoxBinary()%0A%0A if self.profile is None:%0A self.profile = firefox_options.profile or FirefoxProfile()%0A%0A
|
c33b23e1d5263321cc29e2fe1f9871e36d97c5e5
|
add method get on opps db redis
|
opps/db/_redis.py
|
opps/db/_redis.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from opps.db.conf import settings
from redis import ConnectionPool
from redis import Redis as RedisClient
class Redis:
def __init__(self, key_prefix, key_sufix):
self.key_prefix = key_prefix
self.key_sufix = key_sufix
self.host = settings.OPPS_DB_HOST
self.port = settings.OPPS_DB_PORT
self.db = 0
pool = ConnectionPool(host=self.host,
port=self.port,
db=self.db)
self.conn = RedisClient(connection_pool=pool)
def close(self):
self.conn = None
return True
def key(self):
return '{}_{}_{}'.format(settings.OPPS_DB_NAME,
self.key_prefix,
self.key_sufix)
def save(self, document):
return self.conn.set(self.key(), document)
|
Python
| 0
|
@@ -898,8 +898,61 @@
ument)%0A%0A
+ def get(self):%0A self.conn.get(self.key())%0A
|
b03b168cd752d50f1091106d3f4fcc0a79b22203
|
Fix tests
|
siyavula/latex2image/tests/latex2image_tests.py
|
siyavula/latex2image/tests/latex2image_tests.py
|
# coding=utf-8
from unittest import TestCase
from lxml import etree
from siyavula.latex2image.imageutils import replace_latex_with_images
class TestBaseEquationToImageConversion(TestCase):
"""Test the equation to image conversion."""
def setUp(self):
self.element_input = etree.Element('xml')
self.div_input = etree.SubElement(self.element_input, 'div')
self.div_input.set('class', 'latex-math')
def test_complex_equation_to_png(self):
self.div_input.text = u'\\(\\begin{{aligned}} \\vec{{F}}_{{g}} & = m\\vec{{g}} \\\\ & = (\\text{{12,7}}\\ \\text{{kg}})(\\text{{9,8}}\\ \\text{{m·s$^{{-2}}$}}) \\\\ & = \\text{{124,46}}\\ \\text{{N}}\\text{{µ µ μ μ}} µ µ μ μ \\end{{aligned}}\\'.replace('{{', '{').replace('}}', '}')
self.assertEqual(replace_latex_with_images(self.element_input, 'latex-math', '', ''), None)
|
Python
| 0.000003
|
@@ -60,16 +60,22 @@
rt etree
+, html
%0A%0Afrom s
@@ -799,24 +799,27 @@
-self.assertEqual
+xml = html.tostring
(rep
@@ -886,12 +886,232 @@
'')
-, None
+)%0A self.assertEqual(xml, '%3Cxml%3E%3Cdiv class=%22latex-math%22%3E%3Ca href=%22/8996d7eee5c41cdf08aa8c0e9fe42e93.png%22%3E%3Cimg src=%22/8996d7eee5c41cdf08aa8c0e9fe42e93.png%22 srcset=%22/b0791f40d3207d55907aa0b7df78ca1e.png 2x%22%3E%3C/a%3E%3C/div%3E%3C/xml%3E'
)%0A
|
3fd74018c87ec598de173de7d13224523ee98ec5
|
update LATEX_SUBS table
|
IPython/nbconvert/filters/latex.py
|
IPython/nbconvert/filters/latex.py
|
"""Latex filters.
Module of useful filters for processing Latex within Jinja latex templates.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import re
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
# Latex substitutions for escaping latex.
# see: http://stackoverflow.com/questions/16259923/how-can-i-escape-latex-special-characters-inside-django-templates
LATEX_SUBS = {
'&': r'\&',
'%': r'\%',
'$': r'\$',
'#': r'\#',
'_': r'\letterunderscore{}',
'{': r'\letteropenbrace{}',
'}': r'\letterclosebrace{}',
'~': r'\lettertilde{}',
'^': r'\letterhat{}',
'\\': r'\letterbackslash{}'}
#-----------------------------------------------------------------------------
# Functions
#-----------------------------------------------------------------------------
__all__ = ['escape_latex',
'strip_math_space']
def escape_latex(text):
"""
Escape characters that may conflict with latex.
Parameters
----------
text : str
Text containing characters that may conflict with Latex
"""
return ''.join([LATEX_SUBS.get(c, c) for c in text])
def strip_math_space(text):
"""
Remove the space between latex math commands and enclosing $ symbols.
This filter is important because latex isn't as flexible as the notebook
front end when it comes to flagging math using ampersand symbols.
Parameters
----------
text : str
Text to filter.
"""
# First, scan through the markdown looking for $. If
# a $ symbol is found, without a preceding \, assume
# it is the start of a math block. UNLESS that $ is
# not followed by another within two math_lines.
math_regions = []
math_lines = 0
within_math = False
math_start_index = 0
ptext = ''
last_character = ""
skip = False
for index, char in enumerate(text):
#Make sure the character isn't preceeded by a backslash
if (char == "$" and last_character != "\\"):
# Close the math region if this is an ending $
if within_math:
within_math = False
skip = True
ptext = ptext+'$'+text[math_start_index+1:index].strip()+'$'
math_regions.append([math_start_index, index+1])
else:
# Start a new math region
within_math = True
math_start_index = index
math_lines = 0
# If we are in a math region, count the number of lines parsed.
# Cancel the math region if we find two line breaks!
elif char == "\n":
if within_math:
math_lines += 1
if math_lines > 1:
within_math = False
ptext = ptext+text[math_start_index:index]
# Remember the last character so we can easily watch
# for backslashes
last_character = char
if not within_math and not skip:
ptext = ptext+char
if skip:
skip = False
return ptext
|
Python
| 0
|
@@ -966,16 +966,17 @@
mplates%0A
+%0A
LATEX_SU
@@ -1015,17 +1015,16 @@
r'%5C%25',
-
%0A '$'
@@ -1032,17 +1032,16 @@
r'%5C$',
-
%0A '#'
@@ -1049,17 +1049,16 @@
r'%5C#',
-
%0A '_'
@@ -1067,29 +1067,11 @@
r'%5C
-letterunderscore%7B%7D
+_
',
-
%0A
@@ -1084,28 +1084,11 @@
r'%5C
-letteropenbrace%7B%7D
+%7B
',
-
%0A
@@ -1101,25 +1101,8 @@
r'%5C
-letterclosebrace%7B
%7D',%0A
@@ -1114,22 +1114,25 @@
~': r'%5C
-letter
+textascii
tilde%7B%7D'
@@ -1128,25 +1128,24 @@
ciitilde%7B%7D',
-
%0A '%5E': r
@@ -1150,22 +1150,13 @@
r'%5C
-letterhat
+%5E
%7B%7D',
-
%0A
@@ -1169,14 +1169,12 @@
r'%5C
-letter
+text
back
@@ -1181,16 +1181,18 @@
slash%7B%7D'
+,%0A
%7D%0A%0A%0A#---
|
e8836b134c47080edaf47532d7cb844b307dfb08
|
Add a guard against the task list changing when shutting down (#776)
|
zeroconf/_utils/aio.py
|
zeroconf/_utils/aio.py
|
""" Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine
This module provides a framework for the use of DNS Service Discovery
using IP multicast.
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""
import asyncio
import contextlib
import queue
from typing import Optional, Set, cast
def get_best_available_queue() -> queue.Queue:
"""Create the best available queue type."""
if hasattr(queue, "SimpleQueue"):
return queue.SimpleQueue() # type: ignore # pylint: disable=all
return queue.Queue()
# Switch to asyncio.wait_for once https://bugs.python.org/issue39032 is fixed
async def wait_event_or_timeout(event: asyncio.Event, timeout: float) -> None:
"""Wait for an event or timeout."""
loop = asyncio.get_event_loop()
future = loop.create_future()
def _handle_timeout() -> None:
if not future.done():
future.set_result(None)
timer_handle = loop.call_later(timeout, _handle_timeout)
event_wait = loop.create_task(event.wait())
def _handle_wait_complete(_: asyncio.Task) -> None:
if not future.done():
future.set_result(None)
event_wait.add_done_callback(_handle_wait_complete)
try:
await future
finally:
timer_handle.cancel()
if not event_wait.done():
event_wait.cancel()
with contextlib.suppress(asyncio.CancelledError):
await event_wait
async def _get_all_tasks(loop: asyncio.AbstractEventLoop) -> Set[asyncio.Task]:
"""Return all tasks running."""
await asyncio.sleep(0) # flush out any call_soon_threadsafe
if hasattr(asyncio, 'all_tasks'):
return cast(Set[asyncio.Task], asyncio.all_tasks(loop)) # type: ignore # pylint: disable=no-member
return cast(Set[asyncio.Task], asyncio.Task.all_tasks(loop)) # type: ignore # pylint: disable=no-member
async def _wait_for_loop_tasks(wait_tasks: Set[asyncio.Task]) -> None:
"""Wait for the event loop thread we started to shutdown."""
await asyncio.wait(wait_tasks, timeout=1)
def shutdown_loop(loop: asyncio.AbstractEventLoop) -> None:
"""Wait for pending tasks and stop an event loop."""
pending_tasks = asyncio.run_coroutine_threadsafe(_get_all_tasks(loop), loop).result()
done_tasks = set(task for task in pending_tasks if not task.done())
pending_tasks -= done_tasks
if pending_tasks:
asyncio.run_coroutine_threadsafe(_wait_for_loop_tasks(pending_tasks), loop).result()
loop.call_soon_threadsafe(loop.stop)
# Remove the call to _get_running_loop once we drop python 3.6 support
def get_running_loop() -> Optional[asyncio.AbstractEventLoop]:
"""Check if an event loop is already running."""
with contextlib.suppress(RuntimeError):
if hasattr(asyncio, "get_running_loop"):
return cast(
asyncio.AbstractEventLoop,
asyncio.get_running_loop(), # type: ignore # pylint: disable=no-member # noqa
)
return asyncio._get_running_loop() # pylint: disable=no-member,protected-access
return None
|
Python
| 0.009597
|
@@ -1029,16 +1029,22 @@
g import
+ List,
Optiona
@@ -2241,18 +2241,19 @@
oop) -%3E
-Se
+Lis
t%5Basynci
@@ -2362,16 +2362,84 @@
eadsafe%0A
+ # Make a copy of the tasks in case they change during iteration%0A
if h
@@ -2487,32 +2487,13 @@
urn
-ca
+li
st(
-Set%5Basyncio.Task%5D,
asyn
@@ -2573,32 +2573,13 @@
urn
-ca
+li
st(
-Set%5Basyncio.Task%5D,
asyn
@@ -2972,16 +2972,20 @@
tasks =
+set(
asyncio.
@@ -3041,24 +3041,25 @@
op).result()
+)
%0A done_ta
|
3989abf6de879af6982a76ea3522f11f789c6569
|
Increment version for speedup release
|
MarkovNetwork/_version.py
|
MarkovNetwork/_version.py
|
# -*- coding: utf-8 -*-
"""
Copyright 2016 Randal S. Olson
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
__version__ = '1.2'
|
Python
| 0
|
@@ -1100,11 +1100,11 @@
__ = '1.
-2
+3
'%0A
|
75e42bbc1d6d6c1692082f8567ac777eaf3b6616
|
use real timeouts
|
src/astroprint/printfiles/downloadmanager.py
|
src/astroprint/printfiles/downloadmanager.py
|
# coding=utf-8
__author__ = "AstroPrint Product Team <product@astroprint.com>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2016-2019 3DaGoGo, Inc - Released under terms of the AGPLv3 License"
import threading
import logging
import requests
from Queue import Queue
from astroprint.printer.manager import printerManager
from octoprint.events import eventManager, Events
from astroprint.printfiles import FileDestinations
# singleton
_instance = None
def downloadManager():
global _instance
if _instance is None:
_instance = DownloadManager()
return _instance
# download item is:
# downloadUrl : url to of the file to download
# destFile : destination file
# printFileId : Id of the print file to be downloaded,
# printFileInfo : Cloud info of the print file to be downloaded,
# progressCb : callback to report progress
# successCb : callback to report success
# errorCb : callback to report errors
class DownloadWorker(threading.Thread):
def __init__(self, manager):
self._daemon = True
self._manager = manager
self._activeRequest = None
self._canceled = False
self.activeDownload = False
super(DownloadWorker, self).__init__()
def run(self):
downloadQueue = self._manager.queue
while True:
item = downloadQueue.get()
if item == 'shutdown':
return
printFileId = item['printFileId']
printFileName = item['printFileName']
progressCb = item['progressCb']
successCb = item['successCb']
errorCb = item['errorCb']
destFile = item['destFile']
printer = None
material = None
quality = None
image = None
created = None
retries = 3
if "printer" in item:
printer = item['printer']
if "material" in item:
material = item['material']
if "quality" in item:
quality = item['quality']
if "image" in item:
image = item['image']
if "created" in item:
created = item['created']
self._manager._logger.info('Download started for %s' % printFileId)
self.activeDownload = printFileId
self._canceled = False
while retries > 0:
try:
#Perform download here
#r = requests.get(item['downloadUrl'], stream= True, timeout= (10.0, 60.0))
r = requests.get(item['downloadUrl'], stream= True, timeout= (10.0, 5.0))
self._activeRequest = r
if r.status_code == 200:
content_length = float(r.headers['Content-Length'])
downloaded_size = 0.0
with open(destFile, 'wb') as fd:
for chunk in r.iter_content(100000): #download 100kb at a time
if self._canceled: #check right after reading
break
downloaded_size += len(chunk)
fd.write(chunk)
progressCb(2 + round((downloaded_size / content_length) * 98.0, 1))
if self._canceled: #check again before going to read next chunk
break
retries = 0 #No more retries after this
if self._canceled:
r.close()
self._manager._logger.warn('Download canceled for %s' % printFileId)
errorCb(destFile, 'cancelled')
else:
self._manager._logger.info('Download completed for %s' % printFileId)
if item['printFileInfo'] is None:
printerManager().fileManager._metadataAnalyzer.addFileToQueue(destFile)
fileInfo = {
'id': printFileId,
'printFileName': printFileName,
'info': item['printFileInfo'],
'printer': printer,
'material': material,
'quality': quality,
'image': image,
'created': created
}
em = eventManager()
if printerManager().fileManager.saveCloudPrintFile(destFile, fileInfo, FileDestinations.LOCAL):
em.fire(
Events.CLOUD_DOWNLOAD, {
"type": "success",
"id": printFileId,
"filename": printerManager().fileManager._getBasicFilename(destFile),
"info": fileInfo["info"],
"printer": fileInfo["printer"],
"material": fileInfo["material"],
"quality": fileInfo["quality"],
"image": fileInfo["image"],
"created": fileInfo["created"]
}
)
successCb(destFile, fileInfo)
else:
errorCb(destFile, "Couldn't save the file")
elif r.status_code in [502, 503, 500]:
self._manager._logger.warn('Download failed for %s with %d. Retrying...' % (printFileId, r.status_code))
retries -= 1 #This error can be retried
else:
r.close()
self._manager._logger.error('Download failed for %s' % printFileId)
errorCb(destFile, 'The device is unable to download the print file')
retries = 0 #No more retries after this
except requests.exceptions.ConnectTimeout as e:
self._manager._logger.warn('Connection timeout for %s. Retrying...' % printFileId)
retries -= 1 #This error can be retried
except requests.exceptions.RequestException as e:
self._manager._logger.error('Download connection exception for %s: %s' % (printFileId, e))
errorCb(destFile, 'Connection Error while downloading the print file')
retries = 0 #No more retries after this
except Exception as e:
retries = 0 #No more retries after this
if "'NoneType' object has no attribute 'recv'" == str(e):
#This is due to a problem in the underlying library when calling r.close in the cancel routine
self._manager._logger.warn('Download canceled for %s' % printFileId)
errorCb(destFile, 'cancelled')
else:
self._manager._logger.error('Download exception for %s: %s' % (printFileId, e))
errorCb(destFile, 'The device is unable to download the print file')
self.activeDownload = False
self._activeRequest = None
downloadQueue.task_done()
def cancel(self):
if self.activeDownload:
if self._activeRequest:
self._activeRequest.close()
self._manager._logger.warn('Download canceled requested for %s' % self.activeDownload)
self._canceled = True
class DownloadManager(object):
_maxWorkers = 3
def __init__(self):
self._logger = logging.getLogger(__name__)
self.queue = Queue()
self._workers = []
for i in range(self._maxWorkers):
w = DownloadWorker(self)
self._workers.append( w )
w.start()
def isDownloading(self, printFileId):
for w in self._workers:
if w.activeDownload == printFileId:
return True
return False
def startDownload(self, item):
self.queue.put(item)
def cancelDownload(self, printFileId):
for w in self._workers:
if w.activeDownload == printFileId:
w.cancel()
return True
return False
def shutdown(self):
self._logger.info('Shutting down Download Manager...')
for w in self._workers:
self.queue.put('shutdown')
if w.activeDownload:
w.cancel()
|
Python
| 0.000004
|
@@ -2149,17 +2149,16 @@
re%0A%09%09%09%09%09
-#
r = requ
@@ -2228,87 +2228,8 @@
0))%0A
-%09%09%09%09%09r = requests.get(item%5B'downloadUrl'%5D, stream= True, timeout= (10.0, 5.0))%0A
%09%09%09%09
|
0a9bd97598bc63450bcf0956242d3b67e2a52d9b
|
Remove testing code
|
pysis/reqs/buildings/__init__.py
|
pysis/reqs/buildings/__init__.py
|
# -*- encoding: utf-8 -*-
from pysis.reqs.base import Request
from pysis.resources.buildings import Buildings
from pysis.resources.outputs import Outputs
from pysis.resources.blastcells import Blastcells
from pysis.resources.metrics import Metrics
class Get(Request):
uri = 'buildings/{id}'
resource = Buildings
def clean_uri(self):
if not self.id:
return 'buildings'
class GetOutputs(Request):
uri = 'buildings/{id}/outputs'
resource = Outputs
class GetBlastcells(Request):
uri = 'buildings/{id}/blastcells'
resource = Blastcells
class GetInfo(Request):
uri = 'buildings/{id}/info'
resource = Buildings
def clean_uri(self):
if not self.id:
return 'buildings/{id}/info'
class GetMetricsScores(Request):
uri = 'buildings/{id}/metrics/energystar'
resource = Metrics
print vars(Request)
def clean_uri(self):
if not self.id:
return 'buildings/{id}/metrics'
class GetMetrics(Request):
uri = 'buildings/{id}/metrics'
resource = Metrics
def clean_uri(self):
if not self.id:
return 'buildings/{id}/metrics'
class Set(Request):
uri = 'buildings/{id}'
resource = Buildings
def clean_uri(self):
if not self.id:
return 'buildings/{id}'
|
Python
| 0.000002
|
@@ -849,32 +849,8 @@
ics%0A
- print vars(Request)%0A
|
dddb366dd56b85070d9ab51dab7a9ab7d317d1e5
|
Include working directory path from settings
|
src/tenyksafk/main.py
|
src/tenyksafk/main.py
|
import sqlite3
from os.path import join
from tenyksservice import TenyksService, run_service
class AFK(TenyksService):
direct_only = False
irc_message_filters = {
'depart': [r'^(?i)(xopa|away|afk|brb)'],
'return': [r'^(?i)(xoka|back)'],
'query': [r'(?P<nick>(.*))\?$'],
'list': [r'list']
}
def __init__(self, *args, **kwargs):
super(AFK, self).__init__(*args, **kwargs)
self.create_tables(self.fetch_cursor())
def handle_depart(self, data, match):
nick = data['nick']
if not self.user_exists(self.fetch_cursor(), nick):
self.create_user(self.fetch_cursor(), nick, True)
if not self.user_away(self.fetch_cursor(), nick):
self.send('{nick} is now AFK.'.format(nick=nick), data)
self.user_depart(self.fetch_cursor(), nick)
def handle_return(self, data, match):
nick = data['nick']
if not self.user_exists(self.fetch_cursor(), nick):
self.create_user(self.fetch_cursor(), nick, False)
if self.user_away(self.fetch_cursor(), nick):
self.send('{nick} is no longer AFK.'.format(nick=nick), data)
self.user_return(self.fetch_cursor(), nick)
def handle_query(self, data, match):
nick = match.groupdict()['nick']
if self.user_exists(self.fetch_cursor(), nick):
status = 'AFK' if self.user_away(self.fetch_cursor(), nick) else 'present'
self.send('{nick} is currently {status}.'.format(nick=nick, status=status), data)
else:
self.send('{nick}\'s status is unknown.'.format(nick=nick), data)
def handle_list(self, data, match):
afkers = self.fetch_afk(self.fetch_cursor())
if len(afkers) == 0:
self.send('There are currently no AFKers.', data)
else:
self.send('AFKers: {afk}'.format(afk=', '.join('%s' % nick for nick in afkers)), data)
def create_tables(self, cur):
table_sql = '''
CREATE TABLE IF NOT EXISTS afkers (
id INTEGER PRIMARY KEY,
nick TEXT,
away BOOLEAN
);
'''
cur.executescript(table_sql)
def fetch_cursor(self):
db_file = '{name}.db'.format(name=self.name)
conn = sqlite3.connect(db_file)
return conn.cursor()
def create_user(self, cur, nick, away=False):
result = cur.execute('''
INSERT INTO afkers (nick, away)
VALUES (?, ?)
''', (nick, away))
result.connection.commit()
def user_exists(self, cur, nick):
result = cur.execute('''
SELECT * FROM afkers
WHERE nick = ?
''', (nick,))
return result.fetchone() is not None
def user_depart(self, cur, nick):
result = cur.execute('''
UPDATE afkers SET away = ?
WHERE nick = ?
''', (True, nick))
result.connection.commit()
def user_return(self, cur, nick):
result = cur.execute('''
UPDATE afkers SET away = ?
WHERE nick = ?
''', (False, nick))
result.connection.commit()
def user_away(self, cur, nick):
result = cur.execute('''
SELECT away from afkers
WHERE nick = ?
''', (nick,))
return result.fetchone()[0]
def fetch_afk(self, cur):
result = cur.execute('''
SELECT nick FROM afkers
WHERE away = 1
ORDER BY nick ASC
''')
return result.fetchall();
def main():
run_service(AFK)
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -85,16 +85,58 @@
_service
+%0Afrom tenyksservice.config import settings
%0A%0Aclass
@@ -2339,16 +2339,43 @@
ect(
+join(settings.WORKING_DIR,
db_file)
%0A
@@ -2370,16 +2370,17 @@
db_file)
+)
%0A
|
d2ae65564c173789578c0119be7d1143d7c59641
|
Fix mistaken variable name.
|
pybtex/style/formatting/__init__.py
|
pybtex/style/formatting/__init__.py
|
# Copyright (C) 2006, 2007, 2008, 2009 Andrey Golovizin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from pybtex.core import FormattedEntry
from pybtex.style.template import node, join
from pybtex.richtext import Symbol, Text
from pybtex.plugin import find_plugin
@node
def toplevel(children, data):
return join(sep=Symbol('newblock')) [children].format_data(data)
class FormatterBase:
default_label_style = 'number'
default_name_style = 'plain'
def __init__(self, label_style=None, name_style=None, abbreviate_names=False, **kwargs):
if name_style is None:
name_style = find_plugin('pybtex.style.names', self.default_name_style)
if label_style is None:
label_format = find_plugin('pybtex.style.labels', self.default_label_style)
self.format_label = label_style.LabelStyle().format
self.format_name = name_style.NameStyle().format
self.abbreviate_names = abbreviate_names
def format_entries(self, entries):
for number, (key, entry) in enumerate(entries):
entry.number = number + 1
for persons in entry.persons.itervalues():
for person in persons:
person.text = self.format_name(person, self.abbreviate_names)
f = getattr(self, "format_" + entry.type)
text = f(entry)
label = self.format_label(entry)
yield FormattedEntry(key, text, label)
|
Python
| 0.000063
|
@@ -1310,22 +1310,21 @@
label_
-format
+style
= find_
|
d71092864fd9693b2762b862212573ef2f0917cb
|
Add testcase for command chaining.
|
src/test/test_pipe.py
|
src/test/test_pipe.py
|
#!python
import sys
if '../' not in sys.path:
sys.path.insert(0, '../')
from cmdlet import *
from cmdlet.cmd import *
upper = pipe.map(string.upper)
lower = pipe.map(string.lower)
def test_pipe_ans():
import string
register_default_types()
test_num = 100
cmd_upper = format('fOo%d') | upper
cmd_lower = lower | format('%sBaR')
cmd1 = range(test_num) | cmd_upper
ans = cmd1.run()
assert ans == ('FOO%d' % (test_num-1))
cmd2 = range(test_num) | cmd_upper | cmd_lower
ans = cmd2.run()
assert ans == ('foo%dBaR' % (test_num-1))
def test_pipe_result():
import string
register_default_types()
test_num = 100
cmd_upper = format('fOo%d') | upper
cmd_lower = lower | format('%sBaR')
cmd1 = range(test_num) | cmd_upper
for i, data in enumerate(cmd1.result()):
assert data == ('FOO%d' % i)
cmd2 = range(test_num) | cmd_upper | cmd_lower
for i, data in enumerate(cmd2.result()):
assert data == ('foo%dBaR' % i)
def test_pipe_iter():
import string
register_default_types()
test_num = 100
cmd_upper = format('fOo%d') | upper
cmd_lower = lower | format('%sBaR')
cmd1 = range(test_num) | cmd_upper
for i, data in enumerate(cmd1):
assert data == ('FOO%d' % i)
cmd2 = range(test_num) | cmd_upper | cmd_lower
for i, data in enumerate(cmd2):
assert data == ('foo%dBaR' % i)
for i, data in enumerate(cmd1.iter()):
assert data == ('FOO%d' % i)
for i, data in enumerate(cmd2.iter()):
assert data == ('foo%dBaR' % i)
def test_pipe_map():
import string
register_default_types()
test_num = 100
test_list = list(range(test_num))
upper = pipe.map(string.upper)
cmd1 = seq(test_list) | str | upper
for i, data in enumerate(cmd1):
assert str(i).upper() == data
cmd2 = upper | str
try:
cmd2.run()
except TypeError as e:
assert e.message == 'A mapper must have input.'
def test_pipe_filter():
import string
register_default_types()
test_num = 100
test_list = list(range(test_num))
upper = pipe.map(string.upper)
less_or_equal_3 = pipe.filter(lambda x: x <= 3)
cmd1 = seq(test_list) | less_or_equal_3 | str | upper
for i, data in enumerate(cmd1):
assert str(i).upper() == data
last = cmd1.run()
assert last == '3'
cmd2 = less_or_equal_3 | str
try:
cmd2.run()
except TypeError as e:
assert e.message == 'A filter must have input.'
def test_pipe_reduce():
import string
register_default_types()
@pipe.reduce
def count_mod_10(accu, data):
return accu + (1 if (data % 10) == 0 else 0)
test_num = 100
test_list = list(range(test_num))
cmd1 = test_list | count_mod_10(init=0)
ans = cmd1.run()
assert ans == (test_num // 10)
test_num = 100
test_list = list(range(test_num))
cmd2 = test_list | count_mod_10(0)
ans = cmd2.run()
assert ans == (test_num // 10)
cmd3 = count_mod_10 | str
try:
cmd3.run()
except TypeError as e:
assert e.message == 'A reducer must have input.'
def test_pipe_type_registration():
unregister_all_types()
test_list = ['abc','def','ghi','jkl','mno','pqr','stu','vex','yz']
exception_catched = False
try:
cmd1 = test_list | upper
cmd1.run()
except UnregisteredPipeType:
exception_catched = True
assert exception_catched
exception_catched = False
try:
cmd2 = sh | test_list | upper
cmd2.run()
except UnregisteredPipeType:
exception_catched = True
assert exception_catched
if not has_registered_type(list):
register_type(list, seq)
cmd3 = sh | test_list | upper
for i, data in enumerate(cmd3):
assert data == test_list[i].upper()
cmd_list = seq(test_list)
cmd4 = sh | cmd_list | cmd3
cmd5 = sh | cmd_list | cmd3
for i, data in enumerate(cmd4):
assert data == test_list[i].upper()
for i, data in enumerate(cmd5):
assert data == test_list[i].upper()
unregister_type(list)
unregister_type(list)
assert not has_registered_type(list)
|
Python
| 0
|
@@ -3172,16 +3172,433 @@
nput.'%0A%0A
+%0Adef test_pipe_chain():%0A import string%0A register_default_types()%0A%0A @pipe.reduce%0A def count(accu, data):%0A return accu + 1%0A%0A @pipe.filter%0A def low_pass(data, threshold):%0A return data %3C= threshold%0A%0A count_low_pass = low_pass(10) %7C count(init=0)%0A%0A ans = run(range(1,100) %7C count_low_pass)%0A assert ans == 10%0A%0A ans = run(range(1,100) %7C count_low_pass(20))%0A assert ans == 20%0A%0A%0A
def test
|
43294bc83d013d79d909cadfcf2508aca0c575f6
|
Fix for bad y param.
|
exp/sandbox/predictors/profile/DecisionTreeLearnerProfile.py
|
exp/sandbox/predictors/profile/DecisionTreeLearnerProfile.py
|
import numpy
import logging
import sys
from apgl.util.ProfileUtils import ProfileUtils
from exp.sandbox.predictors.DecisionTreeLearner import DecisionTreeLearner
from apgl.data.ExamplesGenerator import ExamplesGenerator
from sklearn.tree import DecisionTreeRegressor
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
numpy.random.seed(22)
class DecisionTreeLearnerProfile(object):
def profileLearnModel(self):
numExamples = 1000
numFeatures = 20
minSplit = 10
maxDepth = 20
generator = ExamplesGenerator()
X, y = generator.generateBinaryExamples(numExamples, numFeatures)
learner = DecisionTreeLearner(minSplit=minSplit, maxDepth=maxDepth, pruneType="REP-CV")
#learner.learnModel(X, y)
#print("Done")
ProfileUtils.profile('learner.learnModel(X, y) ', globals(), locals())
print(learner.getTree().getNumVertices())
def profileDecisionTreeRegressor(self):
numExamples = 1000
numFeatures = 20
minSplit = 10
maxDepth = 20
generator = ExamplesGenerator()
X, y = generator.generateBinaryExamples(numExamples, numFeatures)
regressor = DecisionTreeRegressor(min_split=minSplit, max_depth=maxDepth, min_density=0.0)
ProfileUtils.profile('regressor.fit(X, y)', globals(), locals())
def profilePredict(self):
#Make the prdiction function faster
numExamples = 1000
numFeatures = 20
minSplit = 1
maxDepth = 20
generator = ExamplesGenerator()
X, y = generator.generateBinaryExamples(numExamples, numFeatures)
learner = DecisionTreeLearner(minSplit=minSplit, maxDepth=maxDepth)
learner.learnModel(X, y)
print(learner.getTree().getNumVertices())
ProfileUtils.profile('learner.predict(X)', globals(), locals())
print(learner.getTree().getNumVertices())
profiler = DecisionTreeLearnerProfile()
profiler.profileLearnModel() #0.418
#profiler.profileDecisionTreeRegressor() #0.020
#profiler.profilePredict() #0.024
|
Python
| 0
|
@@ -464,33 +464,33 @@
numFeatures =
-2
+5
0%0A minSpl
@@ -637,32 +637,72 @@
numFeatures) %0A
+ y = numpy.array(y, numpy.float)%0A
%0A
|
cef60ed7b69b5aec75267ecfa609a5adab9045a8
|
fix pycodestyle issue.
|
accelerator/migrations/0005_legalcheck_userlegalcheck.py
|
accelerator/migrations/0005_legalcheck_userlegalcheck.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-05-14 09:25
from __future__ import unicode_literals
import django.db.models.deletion
import swapper
from django.conf import settings
from django.db import (
migrations,
models,
)
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0004_fix_acstream_contenttypes'),
]
operations = [
migrations.CreateModel(
name='LegalCheck',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')),
('created_at',
models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('name', models.CharField(default='',
max_length=128,
unique=True)),
('description', models.TextField()),
],
options={
'verbose_name': 'Legal Check',
'db_table': 'accelerator_legalcheck',
'abstract': False,
'managed': True,
'swappable': swapper.swappable_setting('accelerator',
'LegalCheck'),
},
),
migrations.CreateModel(
name='UserLegalCheck',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')),
('created_at',
models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('accepted', models.BooleanField(default=False)),
('legal_check',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='user_set',
to=swapper.get_model_name(
'accelerator', 'LegalCheck'))),
('user',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='legalcheck_set',
to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'User Legal Check',
'db_table': 'accelerator_userlegalcheck',
'abstract': False,
'managed': True,
'swappable': swapper.swappable_setting('accelerator',
'UserLegalCheck'),
},
),
migrations.AlterUniqueTogether(
name='userlegalcheck',
unique_together=set([('user', 'legal_check')]),
),
]
|
Python
| 0
|
@@ -2972,9 +2972,8 @@
,%0A %5D%0A
-%0A
|
505d20b1f4de60bdb13810a989b5ea203553c850
|
Remove use of np.true_divide
|
skbio/maths/subsample.py
|
skbio/maths/subsample.py
|
#!/usr/bin/env python
r"""
Subsampling (:mod:`skbio.maths.subsample`)
==========================================
.. currentmodule:: skbio.maths.subsample
This module provides functionality for subsampling from vectors of counts.
Functions
---------
.. autosummary::
:toctree: generated/
subsample
"""
from __future__ import division
#-----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import numpy as np
def subsample(counts, n, replace=False):
"""Randomly subsample from a vector of counts.
Returns a copy of `counts` if `n` is equal to or larger than the number of
items in `counts`.
Parameters
----------
counts : 1-D array_like
Vector of counts (integers) to randomly subsample from.
n : int
Number of items to subsample from `counts`.
replace : bool, optional
If ``True``, subsample with replacement. If ``False`` (the default),
subsample without replacement.
Returns
-------
subsampled : ndarray
Subsampled vector of counts where the sum of the elements equals `n`
(i.e., ``subsampled.sum() == n``).
Raises
------
TypeError
If `counts` cannot be safely converted to an integer datatype.
Examples
--------
Subsample 4 items (without replacement) from a vector of counts:
>>> import numpy as np
>>> from skbio.maths.subsample import subsample
>>> a = np.array([4, 5, 0, 2, 1])
>>> sub = subsample(a, 4)
>>> sub.sum()
4
Trying to subsample an equal or greater number of items results in the same
vector as our input:
>>> subsample([0, 3, 0, 1], 8)
array([0, 3, 0, 1])
Subsample 5 items (with replacement):
>>> sub = subsample([1, 0, 1, 2, 2, 3, 0, 1], 5, replace=True)
>>> sub.sum()
5
"""
counts = np.asarray(counts)
counts = counts.astype(int, casting='safe')
if counts.ndim != 1:
raise ValueError("Only 1-D vectors are supported.")
counts_sum = counts.sum()
if counts_sum <= n:
return counts
if replace:
probs = np.true_divide(counts, counts_sum)
result = np.random.multinomial(n, probs)
else:
nz = counts.nonzero()[0]
unpacked = np.concatenate([np.repeat(np.array(i,), counts[i])
for i in nz])
permuted = np.random.permutation(unpacked)[:n]
result = np.zeros(len(counts), dtype=int)
for p in permuted:
result[p] += 1
return result
|
Python
| 0
|
@@ -2377,30 +2377,16 @@
s =
-np.true_divide(
counts
-,
+ /
cou
@@ -2392,17 +2392,16 @@
unts_sum
-)
%0A
|
3de3e4bf2f0df0d602c2f69dd5a06016bf31eb9d
|
rebuild checkpoints when something breaks while updating group exports
|
couchexport/groupexports.py
|
couchexport/groupexports.py
|
from couchexport.models import GroupExportConfiguration, SavedBasicExport
from couchdbkit.exceptions import ResourceNotFound
from datetime import datetime
import os
import json
from couchexport.tasks import Temp
def export_for_group(export_id, output_dir):
try:
config = GroupExportConfiguration.get(export_id)
except ResourceNotFound:
raise Exception("Couldn't find an export with id %s" % export_id)
for config, schema in config.all_exports:
tmp, _ = schema.get_export_files(format=config.format)
payload = Temp(tmp).payload
if output_dir == "couch":
saved = SavedBasicExport.view("couchexport/saved_exports",
key=json.dumps(config.index),
include_docs=True,
reduce=False).one()
if not saved:
saved = SavedBasicExport(configuration=config)
saved.save()
saved.put_attachment(payload, config.filename)
saved.last_updated = datetime.utcnow()
# force update the config in case it changed.
# redundant in the create case
saved.configuration = config
saved.save()
else:
with open(os.path.join(output_dir, config.filename), "wb") as f:
f.write(payload)
|
Python
| 0
|
@@ -204,17 +204,139 @@
ort Temp
+, rebuild_schemas%0Afrom couchexport.export import SchemaMismatchException%0Afrom dimagi.utils.logging import notify_exception
%0A
-
%0Adef exp
@@ -593,16 +593,33 @@
xports:%0A
+ try:%0A
@@ -673,16 +673,561 @@
format)%0A
+ except SchemaMismatchException, e:%0A # fire off a delayed force update to prevent this from happening again%0A rebuild_schemas.delay(config.index)%0A msg = %22Saved export failed for group export %7Bindex%7D. The specific error is %7Bmsg%7D.%22%0A notify_exception(None, msg.format(index=config.index,%0A msg=str(e)))%0A # TODO: do we care enough to notify the user?%0A # This is typically only called by things like celerybeat.%0A continue%0A%0A
|
1746dad3e5bb218aede86cdb38e458a3f7ce270c
|
Update Inputkey.py
|
python/inputkeyboard/Inputkey.py
|
python/inputkeyboard/Inputkey.py
|
import sys, tty, termios, time
class _Getch:
def __call__(self, a):
return self._get_key(a)
def _get_key(self, a):
fd = sys.stdin.fileno()
old = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(a)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old)
return ch
def get():
while True:
raw_key = _Getch()
while True:
input_key = raw_key(1)
if input_key != '':
break
return input_key
def arrow_get():
ARROW_KEY = {
'\x1b[A' :'up',
'\x1b[B' :'down',
'\x1b[C' :'right',
'\x1b[D' :'left'
}
while True:
raw_key = _Getch()
while True:
# 방향키 읽으려면 3으로 줘야함
# 이유: 방향키가 이스케이프문자포함해서 3자리
# 그런데 3으로 주면 일반문자 3자리쌓여야 출력함
input_key = raw_key(3)
if input_key != '':
break
if input_key in ARROW_KEY.keys():
return ARROW_KEY.get(input_key)
else:
continue
|
Python
| 0.000002
|
@@ -21,15 +21,9 @@
mios
-, time
%0A
+
%0Acla
@@ -377,205 +377,8 @@
ch%0A%0A
-%0Adef get():%0A while True:%0A raw_key = _Getch() %0A while True:%0A input_key = raw_key(1)%0A if input_key != '':%0A break%0A%0A return input_key
%0A%0Ade
@@ -539,16 +539,48 @@
:'left'
+,%0A '%5Cx1b%5Cx1b%5Cx1b' :'esc'
%0A %7D%0A%0A
@@ -951,16 +951,16 @@
else:%0A
-
@@ -972,8 +972,222 @@
ontinue%0A
+%0A%0Adef get():%0A while True:%0A raw_key = _Getch() %0A while True:%0A input_key = raw_key(1)%0A if input_key != '':%0A break%0A%0A return input_key%0A%0A %0A %0A
|
2c43cf3368742d7bb0acb91118ff07aeb1fe4183
|
Fix comment typo.
|
qipipe/staging/sarcoma_config.py
|
qipipe/staging/sarcoma_config.py
|
import os
from six.moves.configparser import ConfigParser as Config
from six.moves.configparser import NoOptionError
CFG_FILE = os.path.abspath(
os.path.join( os.path.dirname(__file__), '..', 'conf', 'sarcoma.cfg')
)
"""
The Sarcoma Tumor Location configuration file. This file contains
properties that associat the subject name to the location, e.g.::
Sarcoma004 = SHOULDER
The value is the SNOMED anatomy term.
"""
class ConfigError(Exception):
pass
def sarcoma_location(subject):
"""
:param subject: the XNAT Subject ID
:return: the subject tumor location
"""
try:
return sarcoma_config().get('Tumor Location', subject)
except NoOptionError:
raise ConfigError("Tumor location for subject %s was not found in the"
" sarcoma configuration file %s" % (subject, CFG_FILE))
def sarcoma_config():
"""
:return: the sarcoma configuration
:rtype: ConfigParser
"""
# Read the configuration file on demand.
if not hasattr(sarcoma_config, 'instance'):
sarcoma_config.instance = Config()
sarcoma_config.instance.read(CFG_FILE)
return sarcoma_config.instance
|
Python
| 0
|
@@ -309,16 +309,17 @@
associat
+e
the sub
@@ -352,20 +352,16 @@
e.g.::%0A
-
%0A Sar
@@ -1132,20 +1132,16 @@
G_FILE)%0A
-
%0A ret
|
f52fd8f00a401c7b02fb437ece1fabb3d1289ee1
|
fix bug in sort_by_hw_count
|
cs251tk/toolkit/tabulate.py
|
cs251tk/toolkit/tabulate.py
|
"""Make a nice table from the student results"""
import re
from sys import stdout
from termcolor import colored
UNICODE = stdout.encoding == 'UTF-8' and stdout.isatty()
# unicode = False
COL = '│' if UNICODE else '|'
ROW = '─' if UNICODE else '-'
JOIN = '┼' if UNICODE else '-'
MISSING = '─' if UNICODE else '-'
HIGHLIGHT_PARTIALS = False
ANSI_ESCAPE = re.compile(r'\x1b[^m]*m')
def sort_by_hw_count(user):
"""Sort students by the number of completed homeworks"""
return sum([1 if hw['status'] == 'complete' else 0 for hw in user['homework']])
def sort_by_username(user):
"""Sort students by their username"""
return user['username']
def asciiify(table):
"""Take a flashy unicode table and render it with ASCII-only chars"""
table = table.replace('│', '|')
table = table.replace('─', '-')
table = table.replace('┼', '-')
table = table.replace('─', '-')
table = ANSI_ESCAPE.sub('', table)
return table
def pad(string, index):
"""Pad a string to the width of the stringified number"""
padding_char = string if string == MISSING else ' '
return string.ljust(len(str(index)), padding_char)
def symbol(assignment):
"""Turn an assignment status into the symbol for the table"""
if assignment['status'] == 'success':
return str(assignment['number'])
elif assignment['status'] == 'partial':
retval = str(assignment['number'])
if HIGHLIGHT_PARTIALS:
return colored(retval, 'red', attrs={'bold': True})
return retval
return MISSING
def concat(lst, to_num):
"""Create the informative row of data for a list of assignment statuses"""
nums = {item['number']: item for item in lst}
lst = [pad(symbol(nums[idx]), idx)
if idx in nums
else pad('-', idx)
for idx in range(1, to_num + 1)]
return ' '.join(lst)
def find_columns(num):
"""Build the table headings for the assignment sections"""
return ' '.join([str(i) for i in range(1, num + 1)])
def columnize(student, longest_user, max_hwk_num, max_lab_num):
"""Build the data for each row of the information table"""
name = '{0:<{1}}'.format(student['username'], len(longest_user))
if student.get('unmerged_branches', False):
name = colored(name, attrs={'bold': True})
homework_row = concat(student['homeworks'], max_hwk_num)
lab_row = concat(student['labs'], max_lab_num)
if 'error' in student:
return '{name} {sep} {err}'.format(
name=name,
sep=COL,
err=student['error'])
return '{name} {sep} {hws} {sep} {labs}'.format(
name=name,
hws=homework_row,
labs=lab_row,
sep=COL)
def get_nums(students):
"""Given a list of students, return the higest hw and lab number among them"""
homework_nums = [hw['number'] for s in students for hw in s.get('homeworks', [])]
lab_nums = [lab['number'] for s in students for lab in s.get('labs', [])]
max_hwk_num = max(homework_nums, default=0)
max_lab_num = max(lab_nums, default=0)
return max_hwk_num, max_lab_num
def tabulate(students, sort_by, partials):
"""Actually build the table"""
global HIGHLIGHT_PARTIALS
HIGHLIGHT_PARTIALS = partials
# be sure that the longest username will be at least 4 chars
usernames = [user['username'] for user in students] + ['USER']
longest_user = max(usernames, key=len)
# build the header row of the table
max_hwk_num, max_lab_num = get_nums(students)
header_hw_nums = find_columns(max_hwk_num)
header_lab_nums = find_columns(max_lab_num)
header = '{name:<{namesize}} {sep} {hwnums} {sep} {labnums}'.format(
name='USER',
namesize=len(longest_user),
hwnums=header_hw_nums,
labnums=header_lab_nums,
sep=COL)
# build the header's bottom border
border = ''.join([
''.ljust(len(longest_user) + 2, ROW),
JOIN,
''.ljust(len(header_hw_nums) + 2, ROW),
JOIN,
''.ljust(len(header_lab_nums) + 1, ROW),
])
# build the table body
if sort_by == 'count':
sorter = sort_by_hw_count
should_reverse = True
else:
sorter = sort_by_username
should_reverse = False
lines = [columnize(student, longest_user, max_hwk_num, max_lab_num)
for student in sorted(students, reverse=should_reverse, key=sorter)]
# and make the table to return
table = [header, border] + lines
return '\n'.join(table)
|
Python
| 0.000002
|
@@ -543,16 +543,17 @@
homework
+s
'%5D%5D)%0A%0A%0Ad
|
d623431bb078526366210285e9f6a35ef516e528
|
extract the sorting functions into top-level tabulate
|
cs251tk/toolkit/tabulate.py
|
cs251tk/toolkit/tabulate.py
|
"""Make a nice table from the student results"""
import re
from sys import stdout
from termcolor import colored
from logging import warning
from cs251tk.common import flatten
UNICODE = stdout.encoding == 'UTF-8' and stdout.isatty()
# unicode = False
COL = '│' if UNICODE else '|'
ROW = '─' if UNICODE else '-'
JOIN = '┼' if UNICODE else '-'
MISSING = '─' if UNICODE else '-'
HIGHLIGHT_PARTIALS = False
ANSI_ESCAPE = re.compile(r'\x1b[^m]*m')
def asciiify(table):
table = table.replace('│', '|')
table = table.replace('─', '-')
table = table.replace('┼', '-')
table = table.replace('─', '-')
table = ANSI_ESCAPE.sub('', table)
return table
def pad(string, index):
"""Pad a string to the width of the stringified number"""
padding_char = string if string == MISSING else ' '
return string.ljust(len(str(index)), padding_char)
def symbol(assignment):
"""Turn an assignment status into the symbol for the table"""
if assignment['status'] == 'success':
return str(assignment['number'])
elif assignment['status'] == 'partial':
retval = str(assignment['number'])
if HIGHLIGHT_PARTIALS:
return colored(retval, 'red', attrs={'bold': True})
return retval
return MISSING
def concat(lst, to_num):
"""Create the informative row of data for a list of assignment statuses"""
nums = {item['number']: item for item in lst}
lst = [pad(symbol(nums[idx]), idx)
if idx in nums
else pad('-', idx)
for idx in range(1, to_num + 1)]
return ' '.join(lst)
def find_columns(num):
"""Build the table headings for the assignment sections"""
return ' '.join([str(i) for i in range(1, num + 1)])
def columnize(student, longest_user, max_hwk_num, max_lab_num):
"""Build the data for each row of the information table"""
name = '{0:<{1}}'.format(student['username'], len(longest_user))
if student.get('unmerged_branches', False):
name = colored(name, attrs={'bold': True})
homework_row = concat(student['homeworks'], max_hwk_num)
lab_row = concat(student['labs'], max_lab_num)
if 'error' in student:
return '{name} {sep} {err}'.format(
name=name,
sep=COL,
err=student['error'])
return '{name} {sep} {hws} {sep} {labs}'.format(
name=name,
hws=homework_row,
labs=lab_row,
sep=COL)
def get_nums(students):
homework_nums = flatten([[hw['number'] for hw in s.get('homeworks', [])] for s in students])
lab_nums = flatten([[lab['number'] for lab in s.get('labs', [])] for s in students])
if not homework_nums:
warning('no homework assignments were given to tabulate')
warning('from these students:')
warning(students)
return 0, 0
if not lab_nums:
warning('no labs were given to tabulate')
warning('from these students:')
warning(students)
return 0, 0
max_hwk_num = max(homework_nums)
max_lab_num = max(lab_nums)
return max_hwk_num, max_lab_num
def tabulate(students, sort_by, partials):
"""Actually build the table"""
global HIGHLIGHT_PARTIALS
HIGHLIGHT_PARTIALS = partials
# be sure that the longest username will be at least 4 chars
usernames = [user['username'] for user in students] + ['USER']
longest_user = max(usernames, key=len)
# build the header row of the table
max_hwk_num, max_lab_num = get_nums(students)
header_hw_nums = find_columns(max_hwk_num)
header_lab_nums = find_columns(max_lab_num)
header = '{name:<{namesize}} {sep} {hwnums} {sep} {labnums}'.format(
name='USER',
namesize=len(longest_user),
hwnums=header_hw_nums,
labnums=header_lab_nums,
sep=COL)
# build the header's bottom border
border = ''.join([
''.ljust(len(longest_user) + 2, ROW),
JOIN,
''.ljust(len(header_hw_nums) + 2, ROW),
JOIN,
''.ljust(len(header_lab_nums) + 1, ROW),
])
# build the table body
if sort_by == 'count':
def sorter(user):
return sum([1 if hw['status'] == 'complete' else 0 for hw in user['homework']])
should_reverse = True
else:
def sorter(user):
return user['username']
should_reverse = False
lines = [columnize(student, longest_user, max_hwk_num, max_lab_num)
for student in sorted(students, reverse=should_reverse, key=sorter)]
# and make the table to return
table = [header, border] + lines
return '\n'.join(table)
|
Python
| 0.999934
|
@@ -434,24 +434,299 @@
b%5B%5Em%5D*m')%0A%0A%0A
+def sort_by_hw_count(user):%0A %22%22%22Sort students by the number of completed homeworks%22%22%22%0A return sum(%5B1 if hw%5B'status'%5D == 'complete' else 0 for hw in user%5B'homework'%5D%5D)%0A%0A%0Adef sort_by_username(user):%0A %22%22%22Sort students by their username%22%22%22%0A return user%5B'username'%5D%0A%0A%0A
def asciiify
@@ -4378,117 +4378,33 @@
-def
sorter
-(user):%0A return sum(%5B1 if hw%5B'status'%5D == 'complete' else 0 for hw in user%5B'homework'%5D%5D)
+ = sort_by_hw_count
%0A
@@ -4452,59 +4452,33 @@
-def
sorter
-(user):%0A return user%5B'
+ = sort_by_
username
'%5D%0A
@@ -4473,18 +4473,16 @@
username
-'%5D
%0A
|
efc00476784fc5e456d156707d949d57334329c4
|
update c 2
|
redis_i_action/c2/token_redis.py
|
redis_i_action/c2/token_redis.py
|
import json
import time
from helper import *
QUIT = False
LIMIT = 10000000
def check_token(conn,token):
return conn.hget('login:',token)
def update_token(conn,token,user,item=None):
timestamp = time.time()
conn.hset('login:',token,user)
conn.zadd('recent:',token,timestamp)
if item:
conn.zadd('viewed:' + token,item,timestamp)
conn.zremrangebyrank('viewed:' + token,0,-26)
conn.zincrby('viewed:',item,-1)
def clean_sessions(conn):
while not QUIT:
size = conn.zcard('recent:')
if size <= LIMIT:
time.sleep(1)
continue
end_index = min(size - LIMIT,100)
tokens = conn.zrange('recent:',0,end_index - 1)
session_keys = []
for token in tokens:
session_keys.append('viewed:' + token)
conn.delete(*session_keys)
conn.hdel('login:',*tokens)
conn.zrem('recent:',*tokens)
def add_to_cart(conn,session,item,count):
if count <= 0:
conn.hrem('cart:' + session,item)
else:
conn.hset('cart:'+session,item,count)
def clean_full_sessions(conn):
while not QUIT:
size = conn.zcard('recent:')
time.sleep(1)
continue
end_index = min(size - LIMIT,100)
sessions = conn.zrange('recent:',0,end_index - 1)
session_keys = []
for sess in sessions:
session_keys.append('viewed:'+ sess)
session_keys.append('cart:'+sess)
conn.delete(*sessions)
conn.hdel('login:',*sessions)
conn.zrem('recent:',*sessions)
def cache_request(conn,request,callback):
if not can_cache(conn,request):
return callback(request)
page_key = 'cache:' + hash_request(request)
content = conn.get(page_key)
if not content:
content = callback(request)
conn.setex(page_key,content,300)
return content
def schedule_row_cache(conn,row_id,delay):
conn.zadd('delay:',row_id,delay)
conn.zadd('schedule:',row_id,time.time())
def cache_rows(conn):
while not QUIT:
next = conn.zrange('schedule:',0,0,withscores=True)
now = time.time()
if not next or next[0][1] > now:
time.sleep(.05)
continue
row_id = next[0][0]
delay = conn.zscore('delay',row_id)
if delay <= 0:
conn.zrem('delay:',row_id)
conn.zrem('schedule:',row_id)
conn.delete('inv:'+row_id)
continue
row = Inventory.get(row_id)
conn.zadd('schedule:',row_id,now+delay)
conn.set('inv:'+row_id,json.dumps(row.to_dict()))
def rescale_viewed(conn):
while not QUIT:
conn.zremrangebyrank('viewed:',20000,-1)
conn.zinterstore('viewed:',{'viewed:':.5})
time.sleep(300)
def can_cache(conn,request):
item_id = extract_item_id(request)
if not item_id or is_dynamic(request):
return False
rank = conn.zrank('viewed:',item_id)
return rank is not None and rank < 10000
|
Python
| 0.000001
|
@@ -155,269 +155,920 @@
oken
-(conn,token,user,item=None):%0A%09timestamp = time.time()%0A%09conn.hset('login:',token,user)%0A%09conn.zadd('recent:',token,timestamp)%0A%09if item:%0A%09%09conn.zadd('viewed:' + token,item,timestamp)%0A%09%09conn.zremrangebyrank('viewed:' + token,0,-26)%0A%09%09conn.zincrby('viewed:',item,-1)
+_pipeline(conn,token,user,item=None):%0A%09timestamp = time.time()%0A%09pipe = conn.pipeline()%0A%09pipe.hset('login:',token,user)%0A%09pipe.zadd('recent:',token,timestamp)%0A%0A%09if item:%0A%09%09pipe.zadd('viewed:' + token,item,timestamp)%0A%09%09pipe.zremrangebyrank('viewed:' + token,0,-26)%0A%09%09pipe.zincrby('viewed:',item,-1)%0A%09pipe.execute()%0A%0Adef update_token(conn,token,user,item=None):%0A%09timestamp = time.time()%0A%09conn.hset('login:',token,user)%0A%09conn.zadd('recent:',token,timestamp)%0A%09if item:%0A%09%09conn.zadd('viewed:' + token,item,timestamp)%0A%09%09conn.zremrangebyrank('viewed:' + token,0,-26)%0A%09%09conn.zincrby('viewed:',item,-1)%0A%0Adef benchmark_update_token(conn,duration):%0A%09for function in (update_token,update_token_pipeline):%0A%09%09count = 0%0A%09%09start = time.time()%0A%09%09end = start + duration%0A%09%09while time.time() %3C end:%0A%09%09%09count += 1%0A%09%09%09function(conn,'token','user','item')%0A%09%09delta = time.time() - start%0A%09%09print function.__name__,count,delta,count / delta%0A
%0Adef
|
f0e07f97fd43a0f54c8b0996944038a07e9a0e96
|
Add error handling for when the meter name does not match the NEM file
|
metering/loader.py
|
metering/loader.py
|
"""
metering.loader
~~~~~~~~~
Define the meter data models
"""
from nemreader import read_nem_file
from sqlalchemy.orm import sessionmaker
from energy_shaper import split_into_daily_intervals
from . import get_db_engine
from . import save_energy_reading
from . import refresh_daily_stats
from . import refresh_monthly_stats
def load_nem_data(meter_id, nmi, nem_file):
""" Load data from NEM file and save to database """
engine = get_db_engine(meter_id)
Session = sessionmaker(bind=engine)
session = Session()
m = read_nem_file(nem_file)
channels = m.readings[nmi]
for ch_name in channels.keys():
reads = split_into_daily_intervals(channels[ch_name])
for read in reads:
try:
quality_method = read[3]
except IndexError:
quality_method = None
save_energy_reading(session, ch_name,
read[0], read[1],
read[2], quality_method)
session.commit()
refresh_daily_stats(meter_id)
refresh_monthly_stats(meter_id)
|
Python
| 0.000001
|
@@ -69,16 +69,31 @@
ls%0A%22%22%22%0A%0A
+import logging%0A
from nem
@@ -583,16 +583,29 @@
m_file)%0A
+ try:%0A
chan
@@ -626,16 +626,206 @@
ngs%5Bnmi%5D
+%0A except KeyError:%0A first_nmi = list(m.readings.keys())%5B0%5D%0A logging.warning('NMI of %25s not found, using %25s instead', nmi, first_nmi)%0A channels = m.readings%5Bfirst_nmi%5D
%0A%0A fo
@@ -1303,28 +1303,24 @@
monthly_stats(meter_id)%0A
-
|
57a14c56305f3542e5383bb8189a298bb62f853a
|
remove qqq debug from wb_debug
|
Source/Common/wb_debug.py
|
Source/Common/wb_debug.py
|
'''
====================================================================
Copyright (c) 2016 Barry A Scott. All rights reserved.
This software is licensed as described in the file LICENSE.txt,
which you should have received as part of this distribution.
====================================================================
wb_debug.py
'''
import time
class WbDebugOption:
__slots__ = ('__enabled', '_log', '__name', '__fmt')
def __init__( self, log, name ):
assert log is not None
self.__enabled = False
self._log = log
self.__name = name
self.__fmt = '%s %%s' % (name,)
def __repr__( self ):
return '<WbDebugOption: %s enabled=%r>' % (self.__name, self.isEnabled())
def enable( self, state=True ):
self.__enabled = state
def isEnabled( self ):
return self.__enabled
def __call__( self, msg ):
if self.__enabled:
self._log.debug( self.__fmt % (msg,) )
class WbDebugSpeedOption(WbDebugOption):
__slots__ = ('__speed_start_time', '__speed_last_event_time')
def __init__( self, log, name ):
super().__init__( log, name )
self.__speed_start_time = time.time()
self.__speed_last_event_time = self.__speed_start_time
def __call__( self, msg, start_timer=False ):
if self.isEnabled():
now = time.time()
if start_timer:
self.__speed_start_time = now
self.__speed_last_event_time = now
start_delta = now - self.__speed_start_time
last_delta = now - self.__speed_last_event_time
self.__speed_last_event_time = now
self._log.debug( 'SPEED %.6f %.6f %s' % (start_delta, last_delta, msg,) )
class WbDebug:
def __init__( self, log ):
self._log = log
self._debugSpeed = WbDebugSpeedOption( self._log, 'SPEED' )
self._debugApp = self.addDebugOption( 'APP' )
self._debugThreading = self.addDebugOption( 'THREADING' )
self._debugMainWindow = self.addDebugOption( 'MAIN WINDOW' )
self._debugTreeModel = self.addDebugOption( 'TREE MODEL' )
self._debugTableModel = self.addDebugOption( 'TABLE MODEL' )
self._debugDiff = self.addDebugOption( 'DIFF' )
def setDebug( self, str_options ):
print( 'qqq setDebug str_options %r' % (str_options,) )
for option in [s.strip().lower() for s in str_options.split(',')]:
name = '_debug%s' % (''.join( s.capitalize() for s in option.lower().split('-') ),)
print( 'qqq setDebug name %r' % (name,) )
if hasattr( self, name ):
getattr( self, name ).enable( True )
else:
msg = 'Unknown debug option %s - see wb_debug.py for available options' % (option,)
print( msg )
def addDebugOption( self, name ):
return WbDebugOption( self._log, name )
|
Python
| 0.000622
|
@@ -2314,72 +2314,8 @@
):%0A
- print( 'qqq setDebug str_options %25r' %25 (str_options,) )%0A
@@ -2485,62 +2485,8 @@
),)%0A
- print( 'qqq setDebug name %25r' %25 (name,) )%0A
|
36e656cc4e3271371053859be8817034dc73f7a2
|
fix config var
|
recruit_app/user/eve_api_manager.py
|
recruit_app/user/eve_api_manager.py
|
import evelink.api
import evelink.char
import evelink.eve
from flask import config
class EveApiManager():
def __init__(self):
pass
@staticmethod
def get_characters_from_api(api_id, api_key):
chars = []
try:
api = evelink.api.API(api_key=(api_id, api_key))
# Should get characters
account = evelink.account.Account(api=api)
chars = account.characters()
except evelink.api.APIError as error:
print error
return chars
@staticmethod
def get_corporation_ticker_from_id(corp_id):
ticker = ""
try:
api = evelink.api.API()
corp = evelink.corp.Corp(api)
response = corp.corporation_sheet(corp_id)
ticker = response[0]['ticker']
except evelink.api.APIError as error:
print error
return ticker
@staticmethod
def get_alliance_information(alliance_id):
results = {}
try:
api = evelink.api.API()
eve = evelink.eve.EVE(api=api)
alliance = eve.alliances()
results = alliance[0][int(alliance_id)]
except evelink.api.APIError as error:
print error
return results
@staticmethod
def get_corporation_information(corp_id):
results = {}
try:
api = evelink.api.API()
corp = evelink.corp.Corp(api=api)
corpinfo = corp.corporation_sheet(corp_id=int(corp_id))
results = corpinfo[0]
except evelink.api.APIError as error:
print error
return results
@staticmethod
def check_api_is_type_account(api_id, api_key):
try:
api = evelink.api.API(api_key=(api_id, api_key))
account = evelink.account.Account(api=api)
info = account.key_info()
return info[0]['type'] == "account"
except evelink.api.APIError as error:
print error
return False
@staticmethod
def check_api_is_full(api_id, api_key):
try:
api = evelink.api.API(api_key=(api_id, api_key))
account = evelink.account.Account(api=api)
info = account.key_info()
return info[0]['access_mask'] == config.API_MASK
except evelink.api.APIError as error:
print error
return False
@staticmethod
def check_api_is_not_expire(api_id, api_key):
try:
api = evelink.api.API(api_key=(api_id, api_key))
account = evelink.account.Account(api=api)
info = account.key_info()
return info[0]['expire_ts'] is None
except evelink.api.APIError as error:
print error
return False
@staticmethod
def get_api_info(api_id, api_key):
try:
api = evelink.api.API(api_key=(api_id, api_key))
account = evelink.account.Account(api=api)
info = account.key_info()
return info
except evelink.api.APIError as error:
print error
return False
@staticmethod
def api_key_is_valid(api_id, api_key):
try:
api = evelink.api.API(api_key=(api_id, api_key))
account = evelink.account.Account(api=api)
info = account.key_info()
return True
except evelink.api.APIError as error:
return False
return False
@staticmethod
def check_if_api_server_online():
try:
api = evelink.api.API()
server = evelink.server.Server(api=api)
info = server.server_status()
return True
except evelink.api.APIError as error:
return False
return False
@staticmethod
def check_if_id_is_corp(corp_id):
try:
api = evelink.api.API()
corp = evelink.corp.Corp(api=api)
corpinfo = corp.corporation_sheet(corp_id=int(corp_id))
results = corpinfo[0]
return True
except evelink.api.APIError as error:
return False
return False
@staticmethod
def check_if_id_is_alliance(alliance_id):
try:
api = evelink.api.API()
eve = evelink.eve.EVE(api=api)
alliance = eve.alliances()
results = alliance[0][int(alliance_id)]
if results:
return True
except evelink.api.APIError as error:
return False
return False
@staticmethod
def check_if_id_is_character(character_id):
try:
api = evelink.api.API()
eve = evelink.eve.EVE(api=api)
results = eve.character_info_from_id(character_id)
if results:
return True
except evelink.api.APIError as error:
return False
return False
|
Python
| 0.000002
|
@@ -70,21 +70,26 @@
import c
-onfig
+urrent_app
%0A%0Aclass
@@ -2297,23 +2297,36 @@
==
+current_app.
config
-.
+%5B'
API_MASK
%0A%0A
@@ -2321,16 +2321,18 @@
API_MASK
+'%5D
%0A%0A
|
a5dc9603bddad1d1f19fa2e03003d469a9c647ae
|
add startup callbacks to ConsoleApp
|
src/python/director/consoleapp.py
|
src/python/director/consoleapp.py
|
import os
import sys
import traceback
import argparse
from director import applogic
from director import objectmodel as om
from director import viewbehaviors
from director import visualization as vis
from director.timercallback import TimerCallback
import PythonQt
from PythonQt import QtCore, QtGui
def _consoleAppExceptionHook(exc_type, exc_value, exc_traceback):
msg = ''.join(traceback.format_exception(exc_type, exc_value, exc_traceback))
sys.stderr.write(msg)
ConsoleApp.exit(1)
class ConsoleApp(object):
def __init__(self):
om.init()
self.objectModelWidget = None
@staticmethod
def start(enableAutomaticQuit=True):
'''
In testing mode, the application will quit automatically after starting
unless enableAutomaticQuit is set to False. Tests that need to perform
work after the QApplication has started can set this flag to False and
call quit or exit themselves.
In testing mode, this function will register an exception hook so that
tests will return on error code if an unhandled exception is raised.
'''
if enableAutomaticQuit:
ConsoleApp.startTestingModeQuitTimer()
if ConsoleApp.getTestingEnabled() and not ConsoleApp.getTestingInteractiveEnabled():
sys.excepthook = _consoleAppExceptionHook
result = ConsoleApp.applicationInstance().exec_()
if ConsoleApp.getTestingEnabled() and not ConsoleApp.getTestingInteractiveEnabled():
print 'TESTING PROGRAM RETURNING EXIT CODE:', result
sys.exit(result)
return result
@staticmethod
def startTestingModeQuitTimer(timeoutInSeconds=0.1):
if ConsoleApp.getTestingEnabled() and not ConsoleApp.getTestingInteractiveEnabled():
ConsoleApp.startQuitTimer(timeoutInSeconds)
@staticmethod
def startQuitTimer(timeoutInSeconds):
quitTimer = TimerCallback()
quitTimer.callback = ConsoleApp.quit
quitTimer.singleShot(timeoutInSeconds)
@staticmethod
def quit():
ConsoleApp.applicationInstance().quit()
@staticmethod
def exit(exitCode=0):
ConsoleApp.applicationInstance().exit(exitCode)
@staticmethod
def applicationInstance():
return QtCore.QCoreApplication.instance()
def showObjectModel(self):
if not self.objectModelWidget:
w = QtGui.QWidget()
l = QtGui.QVBoxLayout(w)
model = om.getDefaultObjectModel()
l.addWidget(model.getTreeWidget())
l.addWidget(model.getPropertiesPanel())
applogic.addShortcut(w, 'Ctrl+Q', self.quit)
self.objectModelWidget = w
self.objectModelWidget.resize(350, 700)
self.objectModelWidget.show()
self.objectModelWidget.raise_()
self.objectModelWidget.activateWindow()
return self.objectModelWidget
def createView(self, useGrid=True):
view = PythonQt.dd.ddQVTKWidgetView()
view.resize(600, 400)
applogic.setCameraTerrainModeEnabled(view, True)
if useGrid:
self.gridObj = vis.showGrid(view, parent='scene')
self.viewOptions = vis.ViewOptionsItem(view)
om.addToObjectModel(self.viewOptions, parentObj=om.findObjectByName('scene'))
applogic.resetCamera(viewDirection=[-1,-1,-0.3], view=view)
self.viewBehaviors = viewbehaviors.ViewBehaviors(view)
applogic._defaultRenderView = view
applogic.addShortcut(view, 'Ctrl+Q', self.quit)
applogic.addShortcut(view, 'F8', self.showPythonConsole)
applogic.addShortcut(view, 'F1', self.showObjectModel)
view.setWindowIcon(om.Icons.getIcon(om.Icons.Robot))
view.setWindowTitle('View')
return view
@staticmethod
def showPythonConsole():
applogic.showPythonConsole()
def setupGlobals(self, globalsDict):
quit = ConsoleApp.quit
exit = ConsoleApp.exit
globalsDict.update(locals())
for arg in ['globalsDict', 'self']:
del globalsDict[arg]
@staticmethod
def getTestingArgs(dataDirRequired=False, outputDirRequired=False):
parser = argparse.ArgumentParser()
parser.add_argument('--testing', action='store_true', help='enable testing mode')
parser.add_argument('-d', '--data-dir', type=str, help='testing data directory', required=dataDirRequired)
parser.add_argument('-o', '--output-dir', type=str, help='output directory for writing test output', required=outputDirRequired)
parser.add_argument('-i', '--interactive', action='store_true', help='enable interactive testing mode')
args, unknown = parser.parse_known_args()
return args
@staticmethod
def getTestingDataDirectory():
path = ConsoleApp.getTestingArgs(dataDirRequired=True).data_dir
if not os.path.isdir(path):
raise Exception('Testing data directory does not exist: %s' % path)
return path
@staticmethod
def getTestingOutputDirectory(outputDirRequired=True):
path = ConsoleApp.getTestingArgs().output_dir
if not os.path.isdir(path):
raise Exception('Testing output directory does not exist: %s' % path)
return path
@staticmethod
def getTestingInteractiveEnabled():
return ConsoleApp.getTestingArgs().interactive
@staticmethod
def getTestingEnabled():
return ConsoleApp.getTestingArgs().testing
def main(globalsDict=None):
app = ConsoleApp()
app.showPythonConsole()
view = app.createView()
view.show()
view.raise_()
view.activateWindow()
if globalsDict is not None:
app.setupGlobals(globalsDict)
globalsDict.update(dict(view=view, app=app))
app.start()
if __name__ == '__main__':
main(globals())
|
Python
| 0.000001
|
@@ -520,24 +520,52 @@
p(object):%0A%0A
+ _startupCallbacks = %5B%5D%0A%0A
def __in
@@ -1382,16 +1382,208 @@
onHook%0A%0A
+ def onStartup():%0A for func in ConsoleApp._startupCallbacks:%0A func()%0A%0A startTimer = TimerCallback(callback=onStartup)%0A startTimer.singleShot(0)%0A%0A
|
fbd02da826a46ae7be403b5338236e04aa1133bc
|
fix missing quote
|
crankycoin/models/transaction.py
|
crankycoin/models/transaction.py
|
import coincurve
import hashlib
import json
import time
from crankycoin.models.enums import TransactionType
class Transaction(object):
def __init__(self, source, destination, amount, fee, prev_hash='0', tx_type=TransactionType.STANDARD.value,
timestamp=None, tx_hash=None, asset=None, data="", signature=None):
"""
tx_type:
0=genesis,
1=coinbase,
2=standard,
3=asset creation,
4=asset addendum,
5=order,
6=fill,
7=registration
"""
self._source = source
self._destination = destination
self._amount = amount
self._fee = fee
self._timestamp = timestamp
self._signature = signature
self._tx_hash = tx_hash
self._tx_type = tx_type
self._asset = asset
self._data = data
self._prev_hash = prev_hash
if timestamp is None:
self._timestamp = int(time.time())
if asset is None:
self._asset = '29bb7eb4fa78fc709e1b8b88362b7f8cb61d9379667ad4aedc8ec9f664e16680'
if tx_hash is None and signature is not None:
self._tx_hash = self._calculate_tx_hash()
@property
def source(self):
return self._source
@property
def destination(self):
return self._destination
@property
def amount(self):
return self._amount
@property
def fee(self):
return self._fee
@property
def timestamp(self):
return self._timestamp
@property
def tx_hash(self):
return self._tx_hash
@property
def tx_type(self):
return self._tx_type
@property
def asset(self):
return self._asset
@property
def data(self):
return self._data
@property
def prev_hash(self):
return self._prev_hash
@property
def signature(self):
return self._signature
def _calculate_tx_hash(self):
"""
Calculates sha256 hash of transaction (source, destination, amount, timestamp, signature)
:return: sha256 hash
:rtype: str
"""
data = {
"source": self._source,
"destination": self._destination,
"amount": self._amount,
"fee": self._fee,
"timestamp": self._timestamp,
"tx_type": self._tx_type,
"asset": self._asset,
"data": self._data,
"prev_hash": self._prev_hash,
"signature": self._signature
}
data_json = json.dumps(data, sort_keys=True)
hash_object = hashlib.sha256(data_json)
return hash_object.hexdigest()
def sign(self, private_key):
signature = codecs.encode(coincurve.PrivateKey.from_hex(private_key).sign(self.to_signable()), 'hex')
self._signature = signature
self._tx_hash = self._calculate_tx_hash()
return signature
def to_signable(self):
return ":".join((
self._source,
self._destination,
str(self._amount),
str(self._fee),
str(self._timestamp),
str(self._tx_type),
self._asset,
self._data,
self._prev_hash
))
def verify(self):
return coincurve.PublicKey(codecs.decode(self._source, 'hex)).verify(codecs.decode(self._signature, 'hex'), self.to_signable())
def to_json(self):
return json.dumps(self, default=lambda o: {key.lstrip('_'): value for key, value in o.__dict__.items()},
sort_keys=True)
def to_dict(self):
return {key.lstrip('_'): value for key, value in self.__dict__.items()}
@classmethod
def from_dict(cls, transaction_dict):
return cls(
transaction_dict['source'],
transaction_dict['destination'],
transaction_dict['amount'],
transaction_dict['fee'],
tx_type=transaction_dict['tx_type'],
timestamp=transaction_dict['timestamp'],
asset=transaction_dict['asset'],
data=transaction_dict['data'],
prev_hash=transaction_dict['prev_hash'],
signature=transaction_dict['signature']
)
def __repr__(self):
return "<Transaction {}>".format(self._tx_hash)
def __str__(self):
return str(self.__dict__)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
if __name__ == "__main__":
pass
|
Python
| 0.982782
|
@@ -3378,16 +3378,17 @@
ce, 'hex
+'
)).verif
|
6a1b5003547833ffb0cddea933594c0322ad1bf2
|
Add complete utils instead
|
frappe/social/doctype/energy_point_rule/energy_point_rule.py
|
frappe/social/doctype/energy_point_rule/energy_point_rule.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
import frappe.cache_manager
from frappe.model.document import Document
from frappe.social.doctype.energy_point_settings.energy_point_settings import is_energy_point_enabled
from frappe.social.doctype.energy_point_log.energy_point_log import create_energy_points_log, revert
class EnergyPointRule(Document):
def on_update(self):
frappe.cache_manager.clear_doctype_map('Energy Point Rule', self.name)
def on_trash(self):
frappe.cache_manager.clear_doctype_map('Energy Point Rule', self.name)
def apply(self, doc):
whitelisted_globals = {
"getdate": frappe.utils.getdate
}
if frappe.safe_eval(self.condition, whitelisted_globals, {'doc': doc.as_dict()}):
multiplier = 1
if self.multiplier_field:
multiplier = doc.get(self.multiplier_field) or 1
points = round(self.points * multiplier)
reference_doctype = doc.doctype
reference_name = doc.name
user = doc.get(self.user_field)
rule = self.name
# incase of zero as result after roundoff
if not points: return
# if user_field has no value
if not user or user == 'Administrator': return
try:
create_energy_points_log(reference_doctype, reference_name, {
'points': points,
'user': user,
'rule': rule
})
except Exception as e:
frappe.log_error(frappe.get_traceback(), 'apply_energy_point')
def process_energy_points(doc, state):
if (frappe.flags.in_patch
or frappe.flags.in_install
or not is_energy_point_enabled()):
return
old_doc = doc.get_doc_before_save()
# check if doc has been cancelled
if old_doc and old_doc.docstatus == 1 and doc.docstatus == 2:
return revert_points_for_cancelled_doc(doc)
for d in frappe.cache_manager.get_doctype_map('Energy Point Rule', doc.doctype,
dict(reference_doctype = doc.doctype, enabled=1)):
frappe.get_doc('Energy Point Rule', d.get('name')).apply(doc)
def revert_points_for_cancelled_doc(doc):
energy_point_logs = frappe.get_all('Energy Point Log', {
'reference_doctype': doc.doctype,
'reference_name': doc.name,
'type': 'Auto'
})
for log in energy_point_logs:
revert(log.name, _('Reference document has been cancelled'))
def get_energy_point_doctypes():
return [
d.reference_doctype for d in frappe.get_all('Energy Point Rule',
['reference_doctype'], {'enabled': 1})
]
|
Python
| 0
|
@@ -754,23 +754,21 @@
= %7B%0A%09%09%09%22
-getdate
+utils
%22: frapp
@@ -778,16 +778,8 @@
tils
-.getdate
%0A%09%09%7D
|
c906e675bb4c75286d98d78e4625d12a158652c7
|
Update accel.py
|
apps/accelerometer/accel.py
|
apps/accelerometer/accel.py
|
#!/usr/bin/python
# Author : ipmstyle, https://github.com/ipmstyle
# : jeonghoonkang, https://github.com/jeonghoonkang
# for the detail of HW connection, see lcd_connect.py
import sys
from time import strftime, localtime
# beware the dir location, it should exist
sys.path.append("../lcd_berepi/lib")
sys.path.append("../sht20")
from lcd import *
from sht25class import *
def main():
# Initialise display
lcd_init()
#print ip_chk(), wip_chk(), mac_chk(), wmac_chk(), stalk_chk(), time_chk()
while True:
str = ip_chk()
str = str[:-1]
lcd_string('%s ET' %str,LCD_LINE_1,1)
str = mac_chk()
str = str[:-1]
str = wip_chk()
str = str[:-1]
lcd_string('%s WL ' % (str),LCD_LINE_2,1)
str = wmac_chk()
str = str[:-1]
# lcd_string('%s' % (str),LCD_LINE_2,1)
blueLCDon()
time.sleep(1.2)
str = stalk_chk()
str = str[:-1]
lcd_string('%s' % (tstr),LCD_LINE_1,1)
lcd_string('%s ' % (str),LCD_LINE_2,1)
blueLCDon()
time.sleep(1)
lcd_string('%s' % (tstr),LCD_LINE_1,1)
str = humi_chk()
lcd_string('%.5s ' % (str),LCD_LINE_2,1)
whiteLCDon()
time.sleep(2)
def run_cmd(cmd):
p = Popen(cmd, shell=True, stdout=PIPE)
output = p.communicate()[0]
return output
def temp_chk():
temperature = getTemperature()
return temperature
def humi_chk():
humidity = getHumidity()
return humidity
def time_chk():
time = strftime("%Y-%m%d %H:%M", localtime())
return time
def ip_chk():
cmd = "ip addr show eth0 | grep inet | awk '$2 !~ /^169/ {print $2}' | cut -d/ -f1"
ipAddr = run_cmd(cmd)
return ipAddr
def wip_chk():
cmd = "ip addr show wlan0 | grep inet | awk '{print $2}' | cut -d/ -f1"
wipAddr = run_cmd(cmd)
return wipAddr
def mac_chk():
cmd = "ifconfig -a | grep ^eth | awk '{print $5}'"
macAddr = run_cmd(cmd)
return macAddr
def wmac_chk():
cmd = "ifconfig -a | grep ^wlan | awk '{print $5}'"
wmacAddr = run_cmd(cmd)
return wmacAddr
def stalk_chk():
cmd = "hostname"
return run_cmd(cmd)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
finally:
lcd_byte(0x01, LCD_CMD)
lcd_string("Goodbye!",LCD_LINE_1,2)
GPIO.cleanup()
|
Python
| 0.000001
|
@@ -640,398 +640,8 @@
1%5D%0A%0A
- str = wip_chk()%0A str = str%5B:-1%5D%0A lcd_string('%25s WL ' %25 (str),LCD_LINE_2,1)%0A str = wmac_chk()%0A str = str%5B:-1%5D%0A# lcd_string('%25s' %25 (str),LCD_LINE_2,1)%0A blueLCDon()%0A time.sleep(1.2) %0A %0A str = stalk_chk()%0A str = str%5B:-1%5D%0A lcd_string('%25s' %25 (tstr),LCD_LINE_1,1)%0A lcd_string('%25s ' %25 (str),LCD_LINE_2,1)%0A blueLCDon()%0A time.sleep(1) %0A%0A
|
1ed14e9231d295c6db83337f7cf2b586a39dc3dc
|
Add timestamp to payment log list display
|
apps/cowry_docdata/admin.py
|
apps/cowry_docdata/admin.py
|
from babel.numbers import format_currency
from django.contrib import admin
from django.core.urlresolvers import reverse
from django.utils import translation
from .models import DocDataPaymentOrder, DocDataPayment, DocDataPaymentLogEntry
class DocDataPaymentLogEntryInine(admin.TabularInline):
model = DocDataPaymentLogEntry
can_delete = False
extra = 0
max_num = 0
fields = ('timestamp', 'level', 'message')
readonly_fields = fields
class DocDataPaymentInline(admin.TabularInline):
model = DocDataPayment
can_delete = False
extra = 0
max_num = 0
fields = ('payment_method', 'status', 'created', 'updated')
readonly_fields = fields
class DocDataPaymentOrderAdmin(admin.ModelAdmin):
list_filter = ('status',)
list_display = ('created', 'amount_override', 'status')
raw_id_fields = ('order',)
search_fields = ('payment_order_id', 'merchant_order_reference')
inlines = (DocDataPaymentInline, DocDataPaymentLogEntryInine)
def amount_override(self, obj):
language = translation.get_language().split('-')[0]
return format_currency(obj.amount / 100, obj.currency, locale=language)
amount_override.short_description = 'amount'
admin.site.register(DocDataPaymentOrder, DocDataPaymentOrderAdmin)
class DocDataPaymentLogEntryAdmin(admin.ModelAdmin):
# List view.
list_display = ('payment', 'level', 'message')
list_filter = ('level', 'timestamp')
search_fields = ('message',)
def payment(self, obj):
payment = obj.docdata_payment_order
url = reverse('admin:%s_%s_change' % (payment._meta.app_label, payment._meta.module_name), args=[payment.id])
return "<a href='%s'>%s</a>" % (str(url), payment)
payment.allow_tags = True
# Don't allow the detail view to be accessed.
def has_change_permission(self, request, obj=None):
if not obj:
return True
return False
admin.site.register(DocDataPaymentLogEntry, DocDataPaymentLogEntryAdmin)
|
Python
| 0.000001
|
@@ -1381,16 +1381,29 @@
ayment',
+ 'timestamp',
'level'
|
2a813a0e4e32a5511c756fd8243c1cbee142d9f9
|
fix typo in price range query setting
|
oscar/defaults.py
|
oscar/defaults.py
|
from django.utils.translation import ugettext_lazy as _
OSCAR_SHOP_NAME = 'Oscar'
OSCAR_SHOP_TAGLINE = ''
# Basket settings
OSCAR_BASKET_COOKIE_LIFETIME = 7 * 24 * 60 * 60
OSCAR_BASKET_COOKIE_OPEN = 'oscar_open_basket'
OSCAR_BASKET_COOKIE_SAVED = 'oscar_saved_basket'
OSCAR_MAX_BASKET_QUANTITY_THRESHOLD = 10000
# Recently-viewed products
OSCAR_RECENTLY_VIEWED_COOKIE_LIFETIME = 7 * 24 * 60 * 60
OSCAR_RECENTLY_VIEWED_COOKIE_NAME = 'oscar_history'
OSCAR_RECENTLY_VIEWED_PRODUCTS = 20
# Currency
OSCAR_DEFAULT_CURRENCY = 'GBP'
OSCAR_CURRENCY_LOCALE = 'en_GB'
# Paths
OSCAR_IMAGE_FOLDER = 'images/products/%Y/%m/'
OSCAR_PROMOTION_FOLDER = 'images/promotions/'
# Copy this image from oscar/static/img to your MEDIA_ROOT folder.
# It needs to be there so Sorl can resize it.
OSCAR_MISSING_IMAGE_URL = 'image_not_found.jpg'
OSCAR_UPLOAD_ROOT = '/tmp'
# Address settings
OSCAR_REQUIRED_ADDRESS_FIELDS = ('first_name', 'last_name', 'line1',
'line4', 'postcode', 'country')
# Product list settings
OSCAR_PRODUCTS_PER_PAGE = 20
# Checkout
OSCAR_ALLOW_ANON_CHECKOUT = False
# Partners
OSCAR_PARTNER_WRAPPERS = {}
# Promotions
COUNTDOWN, LIST, SINGLE_PRODUCT, TABBED_BLOCK = (
'Countdown', 'List', 'SingleProduct', 'TabbedBlock')
OSCAR_PROMOTION_MERCHANDISING_BLOCK_TYPES = (
(COUNTDOWN, "Vertical list"),
(LIST, "Horizontal list"),
(TABBED_BLOCK, "Tabbed block"),
(SINGLE_PRODUCT, "Single product"),
)
OSCAR_PROMOTION_POSITIONS = (('page', 'Page'),
('right', 'Right-hand sidebar'),
('left', 'Left-hand sidebar'))
# Reviews
OSCAR_ALLOW_ANON_REVIEWS = True
OSCAR_MODERATE_REVIEWS = False
# Accounts
OSCAR_ACCOUNTS_REDIRECT_URL = 'customer:profile-view'
# This enables sending alert notifications/emails
# instantly when products get back in stock
# by listening to stock record update signals
# this might impact performace for large numbers
# stock record updates.
# Alternatively, the management command
# ``oscar_send_alerts`` can be used to
# run periodically, e.g. as a cronjob. In this case
# instant alerts should be disabled.
OSCAR_EAGER_ALERTS = True
# Registration
OSCAR_SEND_REGISTRATION_EMAIL = True
OSCAR_FROM_EMAIL = 'oscar@example.com'
# Offers
OSCAR_OFFER_BLACKLIST_PRODUCT = None
# Cookies
OSCAR_COOKIES_DELETE_ON_LOGOUT = ['oscar_recently_viewed_products', ]
# Hidden Oscar features, e.g. wishlists or reviews
OSCAR_HIDDEN_FEATURES = []
# Menu structure of the dashboard navigation
OSCAR_DASHBOARD_NAVIGATION = [
{
'label': _('Dashboard'),
'icon': 'icon-th-list',
'url_name': 'dashboard:index',
},
{
'label': _('Catalogue'),
'icon': 'icon-sitemap',
'children': [
{
'label': _('Products'),
'url_name': 'dashboard:catalogue-product-list',
},
{
'label': _('Categories'),
'url_name': 'dashboard:catalogue-category-list',
},
{
'label': _('Ranges'),
'url_name': 'dashboard:range-list',
},
{
'label': _('Low stock alerts'),
'url_name': 'dashboard:stock-alert-list',
},
]
},
{
'label': _('Fulfilment'),
'icon': 'icon-shopping-cart',
'children': [
{
'label': _('Order management'),
'url_name': 'dashboard:order-list',
},
{
'label': _('Statistics'),
'url_name': 'dashboard:order-stats',
},
{
'label': _('Partners'),
'url_name': 'dashboard:partner-list',
},
]
},
{
'label': _('Customers'),
'icon': 'icon-group',
'children': [
{
'label': _('Customer management'),
'url_name': 'dashboard:users-index',
},
{
'label': _('Stock alert requests'),
'url_name': 'dashboard:user-alert-list',
},
]
},
{
'label': _('Offers'),
'icon': 'icon-bullhorn',
'children': [
{
'label': _('Offer management'),
'url_name': 'dashboard:offer-list',
},
{
'label': _('Vouchers'),
'url_name': 'dashboard:voucher-list',
},
],
},
{
'label': _('Content'),
'icon': 'icon-folder-close',
'children': [
{
'label': _('Content blocks'),
'url_name': 'dashboard:promotion-list',
},
{
'label': _('Content blocks by page'),
'url_name': 'dashboard:promotion-list-by-page',
},
{
'label': _('Pages'),
'url_name': 'dashboard:page-list',
},
{
'label': _('Email templates'),
'url_name': 'dashboard:comms-list',
},
{
'label': _('Reviews'),
'url_name': 'dashboard:reviews-list',
},
]
},
{
'label': _('Reports'),
'icon': 'icon-bar-chart',
'url_name': 'dashboard:reports-index',
},
]
# Search facets
OSCAR_SEARCH_FACETS = {
'fields': {
# The key for these dicts will be used when passing facet data
# to the template. Same for the 'queries' dict below.
'category': {
'name': _('Category'),
'field': 'category'
},
'product_class': {
'name': _('Type'),
'field': 'product_class'
},
#'rating': {
# 'name': _('Rating'),
# 'field': 'rating'
#}
},
'queries': {
'price_range': {
'name': _('Price range'),
'field': 'price',
'queries': [
# This is a list of (name, query) tuples where the name will
# be displayed on the front-end.
(_('0 to 40'), '[0 TO 20]'),
(_('20 to 40'), '[20 TO 40]'),
(_('40 to 60'), '[40 TO 60]'),
(_('60+'), '[60 TO *]'),
]
}
}
}
OSCAR_SETTINGS = dict(
[(k, v) for k, v in locals().items() if k.startswith('OSCAR_')])
|
Python
| 0.000074
|
@@ -6193,25 +6193,25 @@
(_('0 to
-4
+2
0'), '%5B0 TO
|
e1c892cbec5091b6f1bf5065bfed00c7d7149b4f
|
change get_jd12 to instead manually catch the IERS error
|
astropy/coordinates/builtin_frames/utils.py
|
astropy/coordinates/builtin_frames/utils.py
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains functions/values used repeatedly in different modules of
the ``builtin_frames`` package.
"""
from __future__ import (absolute_import, unicode_literals, division,
print_function)
import warnings
import numpy as np
from ... import units as u
from ...time import Time
from ...utils import iers
from ...utils.exceptions import AstropyWarning
from ..representation import CartesianRepresentation
# The UTC time scale is not properly defined prior to 1960, so Time('B1950',
# scale='utc') will emit a warning. Instead, we use Time('B1950', scale='tai')
# which is equivalent, but does not emit a warning.
EQUINOX_J2000 = Time('J2000', scale='utc')
EQUINOX_B1950 = Time('B1950', scale='tai')
# This is a time object that is the default "obstime" when such an attribute is
# necessary. Currently, we use J2000.
DEFAULT_OBSTIME = Time('J2000', scale='utc')
PIOVER2 = np.pi / 2.
#comes from the mean of the 1962-2014 IERS B data
_DEFAULT_PM = (0.035, 0.29)*u.arcsec
_IERS_HINT = """
If you need enough precision such that this matters (~<10 arcsec), you can
use the latest IERS predictions by running:
>>> from astropy.utils import iers
>>> iers.IERS.iers_table = iers.IERS_A.open(iers.IERS_A_URL)
"""
def cartrepr_from_matmul(pmat, coo, transpose=False):
"""
Note that pmat should be an ndarray, *not* a matrix.
"""
if pmat.shape[-2:] != (3, 3):
raise ValueError("tried to do matrix multiplication with an array that "
"doesn't end in 3x3")
if coo.isscalar:
# a simpler path for scalar coordinates
if transpose:
pmat = pmat.T
newxyz = np.sum(pmat * coo.cartesian.xyz, axis=-1)
else:
xyz = coo.cartesian.xyz.T
# these expression are the same as iterating over the first dimension of
# pmat and xyz and doing matrix multiplication on each in turn. resulting
# dimension is <coo shape> x 3
pmat = pmat.reshape(pmat.size//9, 3, 3)
if transpose:
pmat = pmat.transpose(0, 2, 1)
newxyz = np.sum(pmat * xyz.reshape(xyz.size//3, 1, 3), axis=-1).T
return CartesianRepresentation(newxyz)
def get_polar_motion(time):
"""
gets the two polar motion components in radians for use with apio13
"""
#get the polar motion from the IERS table
xp, yp, status = iers.IERS.open().pm_xy(time, return_status=True)
wmsg = None
if np.any(status == iers.TIME_BEFORE_IERS_RANGE):
wmsg = ('Tried to get polar motions for times before IERS data is '
'valid. Defaulting to polar motion from the 50-yr mean for those.')
xp.ravel()[status.ravel()==iers.TIME_BEFORE_IERS_RANGE] = _DEFAULT_PM[0]
yp.ravel()[status.ravel()==iers.TIME_BEFORE_IERS_RANGE] = _DEFAULT_PM[1]
warnings.warn(wmsg, AstropyWarning)
if np.any(status == iers.TIME_BEYOND_IERS_RANGE):
wmsg = ('Tried to get polar motions for times after IERS data is '
'valid. Defaulting to polar motion from the 50-yr mean for those.' + _IERS_HINT)
xp.ravel()[status.ravel()==iers.TIME_BEYOND_IERS_RANGE] = _DEFAULT_PM[0]
yp.ravel()[status.ravel()==iers.TIME_BEYOND_IERS_RANGE] = _DEFAULT_PM[1]
warnings.warn(wmsg, AstropyWarning)
return xp.to(u.radian).value, yp.to(u.radian).value
def get_dut1utc(time):
"""
This function is used to get UT1-UTC in coordinates because normally it
gives an error outside the IERS range, but in coordinates we want to allow
it to go through but with a warning.
"""
try:
return time.delta_ut1_utc
except IndexError as e:
msg = e.args[0] + ' Assuming UT1-UTC=0 for coordinate transformations.' + _IERS_HINT
warnings.warn(msg, AstropyWarning)
return np.zeros(time.shape)
def get_jd12(time, scale):
"""
Gets ``jd1`` and ``jd2`` from a time object in a particular scale.
Parameters
----------
time : `~astropy.time.Time`
The time to get the jds for
scale : str
The time scale to get the jds for
Returns
-------
jd1 : float
jd2 : float
"""
if time.scale == scale:
newtime = time
elif time.scale == 'ut1' or scale == 'ut1':
olddt = time.delta_ut1_utc
time.delta_ut1_utc = get_dut1utc(time)
newtime = getattr(time, scale)
time.delta_ut1_utc = olddt # ensures no changes to the input `time`
else:
newtime = getattr(time, scale)
return newtime.jd1, newtime.jd2
|
Python
| 0
|
@@ -3452,16 +3452,333 @@
value%0A%0A%0A
+def _warn_iers(ierserr):%0A %22%22%22%0A Generate a warning for an IERSRangeerror%0A%0A Parameters%0A ----------%0A ierserr : An %60~astropy.utils.iers.IERSRangeError%60%0A %22%22%22%0A msg = '%7B0%7D Assuming UT1-UTC=0 for coordinate transformations.%7B1%7D'%0A warnings.warn(msg.format(ierserr.args%5B0%5D, _IERS_HINT), AstropyWarning)%0A%0A
def get_
@@ -4062,13 +4062,22 @@
ept
-Index
+iers.IERSRange
Erro
@@ -4096,134 +4096,20 @@
-msg = e.args%5B0%5D + ' Assuming UT1-UTC=0 for coordinate transformations.' + _IERS_HINT%0A warnings.warn(msg, AstropyWarning
+_warn_iers(e
)%0A
@@ -4532,297 +4532,161 @@
el
-if time.scale == 'ut1' or scale == 'ut1':%0A olddt = time.delta_ut1_utc%0A time.delta_ut1_utc = get_dut1utc(time)%0A newtime = getattr(time, scale)%0A time.delta_ut1_utc = olddt # ensures no changes to the input %60time%60%0A else:%0A newtime = getattr(time, scale)
+se:%0A try:%0A newtime = getattr(time, scale)%0A except iers.IERSRangeError as e:%0A _warn_iers(e)%0A newtime = time
%0A%0A
|
8064be72de340fca963da2cade2b73aa969fbdbd
|
Add string representation for Activity model
|
csunplugged/activities/models.py
|
csunplugged/activities/models.py
|
from django.db import models
class Activity(models.Model):
name = models.CharField(max_length=200)
description = models.TextField()
|
Python
| 0.000039
|
@@ -134,8 +134,57 @@
Field()%0A
+%0A def __str__(self):%0A return self.name%0A
|
dd4e62667da94469a8bbb6dd0ccd881124e7665f
|
Fix return value of terraform.render
|
src/buildercore/terraform.py
|
src/buildercore/terraform.py
|
import json
from buildercore.utils import ensure
RESOURCE_TYPE_FASTLY = 'fastly_service_v1'
RESOURCE_NAME_FASTLY = 'fastly-cdn'
def render(context):
if not context['fastly']:
return None
ensure(len(context['fastly']['subdomains']) == 1, "Only 1 subdomain for Fastly CDNs is supported")
tf_file = {
'resource': {
RESOURCE_TYPE_FASTLY: {
# must be unique but only in a certain context like this, use some constants
RESOURCE_NAME_FASTLY: {
'name': context['stackname'],
'domain': {
'name': context['fastly']['subdomains'][0],
},
'backend': {
'address': context['full_hostname'],
'name': context['stackname'],
'port': 443,
'use_ssl': True,
'ssl_check_cert': False # bad option
# it's for minimal fuss. Before we start customizing this, a lot of the risk to be tackled
# is integrating everything together with a good lifecycle for adding, modifying and removing
# CDNs that point to CloudFormation-managed resources.
},
'force_destroy': True
}
}
},
}
return json.dumps(tf_file)
|
Python
| 0.000126
|
@@ -193,12 +193,12 @@
urn
-None
+'%7B%7D'
%0A%0A
|
1472e4204e9a654a2296f690e8420c97ef98fb7c
|
Read device entity id from config file
|
senic_hub/nuimo_app/components/__init__.py
|
senic_hub/nuimo_app/components/__init__.py
|
import logging
from pprint import pformat
from threading import Thread
from .. import matrices
from ..hass import HomeAssistant
logger = logging.getLogger(__name__)
def clamp_value(value, range_):
return min(max(value, range_.start), range_.stop)
class BaseComponent:
MATRIX = matrices.ERROR
def __init__(self, config):
self.name = config['name']
def start(self):
pass
def stop(self):
pass
class ThreadComponent(BaseComponent):
def __init__(self, config):
super().__init__(config)
self.stopping = True
self.thread = None
def run(self):
"""
Concrete components must implement run() method
"""
raise NotImplementedError()
def start(self):
self.stopping = False
self.thread = Thread(target=self.run)
self.thread.start()
def stop(self):
self.stopping = True
class HomeAssistantComponent(BaseComponent):
def __init__(self, ha_domain, config):
super().__init__(config)
self.is_on = False
# TODO: Parametrize HA's address?
self._ha_address = "localhost:8123"
# TODO: Read entity id from config
self._ha_entity_id = "media_player.office"
self._ha_domain = ha_domain
def start(self):
super().start()
# TODO: Provide single HA instance to all HA-based components
self._ha = HomeAssistant(self._ha_address, on_connect=self._ha_connected, on_disconnect=self._ha_disconnected)
self._ha.start()
self._ha.register_state_listener(self._ha_entity_id, self._ha_state_changed)
def stop(self):
super().stop()
self._ha.stop()
self._ha.unregister_state_listener(self._ha_entity_id)
def run(self):
pass
def update_from_ha_state(self, state):
self.is_on = state.get('state', None) != 'off'
def call_ha_service(self, service, data={}, on_success=None, on_error=None):
def _on_success(result):
logger.debug("Calling service %s:%s succeeded with result: %s", self._ha_domain, service, pformat(result))
def _on_error():
logger.debug("Failed calling service %s:%s", self._ha_domain, service)
data["entity_id"] = self._ha_entity_id
logger.debug("Call service %s:%s with data: %s", self._ha_domain, service, pformat(data))
self._ha.call_service(self._ha_domain, service, data, on_success or _on_success, on_error or _on_error)
def _ha_connected(self):
def on_state_retrieved(state):
self._ha_state_changed(state)
def on_state_retrieve_failed():
logger.debug("HA get state failed")
self._ha.get_state(self._ha_entity_id, on_state_retrieved, on_state_retrieve_failed)
def _ha_disconnected(self):
pass
def _ha_state_changed(self, state):
if "data" in state:
self.update_from_ha_state(state["data"]["new_state"])
else:
self.update_from_ha_state(state)
|
Python
| 0
|
@@ -1159,93 +1159,48 @@
-# TODO: Read
+self._ha_
entity
- id from
+_id =
config
-%0A self._ha_entity_id = %22media_player.office%22
+%5B'entity_id'%5D
%0A
|
09467d27c9891f3aa672c41273a5704f65a8ae86
|
undo naieve path hacking
|
replayswithfriends/settings/prod.py
|
replayswithfriends/settings/prod.py
|
"""Production settings and globals."""
from os import environ
from sys import exc_info
from urlparse import urlparse, uses_netloc
from S3 import CallingFormat
from common import *
# Helper lambda for gracefully degrading environmental variables:
env = lambda e, d: environ[e] if environ.has_key(e) else d
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = env('EMAIL_HOST', 'smtp.gmail.com')
EMAIL_HOST_PASSWORD = env('EMAIL_HOST_PASSWORD', '')
EMAIL_HOST_USER = env('EMAIL_HOST_USER', 'your_email@example.com')
EMAIL_PORT = env('EMAIL_PORT', 587)
EMAIL_SUBJECT_PREFIX = '[%s] ' % SITE_NAME
EMAIL_USE_TLS = True if env('EMAIL_USE_TLS', 'True') == 'True' else False
SERVER_EMAIL = EMAIL_HOST_USER
########## DATABASE CONFIGURATION
# See: http://devcenter.heroku.com/articles/django#postgres_database_config
uses_netloc.append('postgres')
uses_netloc.append('mysql')
try:
if environ.has_key('DATABASE_URL'):
url = urlparse(environ['DATABASE_URL'])
DATABASES['default'] = {
'NAME': url.path[1:],
'USER': url.username,
'PASSWORD': url.password,
'HOST': url.hostname,
'PORT': url.port,
}
if url.scheme == 'postgres':
DATABASES['default']['ENGINE'] = 'django.db.backends.postgresql_psycopg2'
if url.scheme == 'mysql':
DATABASES['default']['ENGINE'] = 'django.db.backends.mysql'
except:
print "Unexpected error:", exc_info()
CACHES = {
'default': {
'BACKEND': 'django_pylibmc.memcached.PyLibMCCache',
'LOCATION': 'localhost:11211',
'TIMEOUT': 500,
'BINARY': True,
'OPTIONS': {
'tcp_nodelay': True,
'ketama': True,
}
}
}
########## CELERY CONFIGURATION
BROKER_URL = 'redis://redistogo:ec07a0173994aab43a457a6c6c9a761c@herring.redistogo.com:9920/'
CELERY_RESULT_BACKEND = "redis://redistogo:ec07a0173994aab43a457a6c6c9a761c@herring.redistogo.com:9920/"
CELERY_TASK_RESULT_EXPIRES = 60 * 60
########## END CELERY CONFIGURATION
INSTALLED_APPS += [
'storages',
'raven.contrib.django',
]
MIDDLEWARE_CLASSES = [
'raven.contrib.django.middleware.SentryResponseErrorIdMiddleware',
] + MIDDLEWARE_CLASSES + [
'raven.contrib.django.middleware.Sentry404CatchMiddleware',
]
SENTRY_DSN = 'https://783890b5ea754868bbb54dad7632285a:56827db1f54345978e06573aa9755b24@app.getsentry.com/671'
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry'],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.handlers.SentryHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
},
}
MEDIA_ROOT = normpath(join(DJANGO_ROOT, "replayswithfriends", 'packaged', 'media'))
MEDIA_URL = '/media/'
STATIC_ROOT = normpath(join(DJANGO_ROOT, "replayswithfriends", 'packaged', 'static'))
STATIC_URL = '/static/'
ADMIN_MEDIA_PREFIX = '/static/admin/'
COMPRESS_ROOT = STATIC_ROOT
COMPRESS_ENABLED = True
COMPRESS_OFFLINE = True
COMPRESS_URL = STATIC_URL
COMPRESS_PARSER = 'compressor.parser.LxmlParser'
COMPRESS_PRECOMPILERS = [
('text/less', 'lessc {infile} {outfile}'),
]
STATICFILES_DIRS = [
normpath(join(DJANGO_ROOT, "replayswithfriends", 'static')),
]
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
]
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
AWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN
AWS_ACCESS_KEY_ID = env('AWS_ACCESS_KEY_ID', 'AKIAJ35NWHEJY2Z3VWNQ')
AWS_SECRET_ACCESS_KEY = env('AWS_SECRET_ACCESS_KEY', '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')
AWS_STORAGE_BUCKET_NAME = env('AWS_STORAGE_BUCKET_NAME', 'r-play')
STATIC_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
MEDIA_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
COMPRESS_URL = STATIC_URL
SECRET_KEY = environ.get('SECRET_KEY', SECRET_KEY)
|
Python
| 0.000001
|
@@ -3473,768 +3473,8 @@
%0A%7D%0A%0A
-MEDIA_ROOT = normpath(join(DJANGO_ROOT, %22replayswithfriends%22, 'packaged', 'media'))%0AMEDIA_URL = '/media/'%0ASTATIC_ROOT = normpath(join(DJANGO_ROOT, %22replayswithfriends%22, 'packaged', 'static'))%0ASTATIC_URL = '/static/'%0AADMIN_MEDIA_PREFIX = '/static/admin/'%0ACOMPRESS_ROOT = STATIC_ROOT%0ACOMPRESS_ENABLED = True%0ACOMPRESS_OFFLINE = True%0ACOMPRESS_URL = STATIC_URL%0ACOMPRESS_PARSER = 'compressor.parser.LxmlParser'%0ACOMPRESS_PRECOMPILERS = %5B%0A ('text/less', 'lessc %7Binfile%7D %7Boutfile%7D'),%0A%5D%0A%0ASTATICFILES_DIRS = %5B%0A normpath(join(DJANGO_ROOT, %22replayswithfriends%22, 'static')),%0A%5D%0ASTATICFILES_FINDERS = %5B%0A 'django.contrib.staticfiles.finders.FileSystemFinder',%0A 'django.contrib.staticfiles.finders.AppDirectoriesFinder',%0A 'compressor.finders.CompressorFinder',%0A%5D%0A
%0A# S
|
993c1a5e0d5576427fea6e4630ae94c21acf40ff
|
Repair switch on CLI.
|
hyhyhy.py
|
hyhyhy.py
|
#
# Hyhyhy - https://github.com/MaciejCzyzewski/Hyhyhy
#
# The MIT License (MIT)
#
# Copyright (c) 2014 Maciej A. Czyzewski
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Author: Maciej A. Czyzewski <maciejanthonyczyzewski@gmail.com>
#
import sys, os
if sys.version_info < (3,2):
sys.exit( "Hyhyhy requires python in version >= 3.2" )
import string, glob, configparser
__version__ = '1.0.0'
def num(a):
b = a.split('.')[-2]
c = b.split('/')[-1]
return int(c)
def prf(a):
b = {}
b['OK'] = '\033[92m[OK]\033[0m'
b['FAIL'] = '\033[91m[FAIL]\033[0m'
b['INFO'] = '\033[95m[INFO]\033[0m'
b['WARNING'] = '\033[93m[WARNING]\033[0m'
return b[str(a)]
print( prf('INFO'), "Starting hyhyhyhyhyhyhyhyhy", "..." )
assets, sections, default = list(glob.glob("assets/*")), list(glob.glob("sections/*")), 'default.cfg'
html = '<html><head>(head)</head><body>(body)</body></html>'
config = configparser.ConfigParser()
config.read(default)
sections.sort(key = num)
print( prf('OK'), "Reading config file", default, "..." )
print( "------------------------------------------------------------------" )
def getHead(a = ""):
global html, assets, sections
if config['head']['charset']:
a += "<meta charset='" + str(config['head']['charset']) + "'>"
if config['head']['title']:
a += "<title>" + str(config['head']['title']) + "</title>"
for i in assets:
print( prf('OK'), "Parsing file", i, "..." )
if i.split('.')[-1] == "css":
a += "<style>" + str(open(i, 'r').read()) + "</style>"
elif i.split('.')[-1] == "js":
a += "<script>" + str(open(i, 'r').read()) + "</script>"
return a
def getBody(a = ""):
global html, assets, sections
for i in sections:
print( prf('OK'), "Parsing file", i, "..." )
a += "<section>" + str(open(i, 'r').read()) + "</section>"
return a
def getHtml():
global html
html = html.replace('(head)', getHead())
html = html.replace('(body)', getBody())
return html
def initBuild():
global config
if not os.path.exists(config['core']['build']):
os.makedirs(config['core']['build'].split('/')[-2])
build = open(config['core']['build'], 'w')
build.write(getHtml())
build.close()
print( prf('OK'), "Saved in", config['core']['build'], "->", config['head']['title'] )
def initStatus():
global assets, sections
print( prf('OK'), "Structure of project", "[" + str(len(sections)) + " slides]" )
print( " - assets/" )
for i in assets:
if i.split('.')[-1] == "css":
print( " -", i.split('/')[-1], "[style]" )
elif i.split('.')[-1] == "js":
print( " -", i.split('/')[-1], "[script]" )
else:
print( " -", i.split('/')[-1] )
print( " - sections/" )
for i in sections:
print( " -", i.split('/')[-1] )
if len(sys.argv) == 1 or sys.argv[1] == 'build':
initBuild()
elif sys.argv[1] == 'status':
initStatus()
else:
docs = '''
Hyhyhy - Presentation nano-framework. [v ''' + __version__ + ''']
hyhyhy.py build - Build project to file
hyhyhy.py status - Show project structure
'''
print( docs )
|
Python
| 0
|
@@ -1409,16 +1409,234 @@
1.0.0'%0A%0A
+__help__ = '''%0A Hyhyhy - Presentation nano-framework. %5Bv ''' + __version__ + '''%5D%0A hyhyhy.py build - Build project to file%0A hyhyhy.py status - Show project structure%0A '''%0A%0A
def num(
@@ -1919,68 +1919,8 @@
)%5D%0A%0A
-print( prf('INFO'), %22Starting hyhyhyhyhyhyhyhyhy%22, %22...%22 )%0A%0A
asse
@@ -2138,16 +2138,16 @@
fault)%0A%0A
+
sections
@@ -2168,145 +2168,8 @@
m)%0A%0A
-print( prf('OK'), %22Reading config file%22, default, %22...%22 )%0Aprint( %22------------------------------------------------------------------%22 )%0A%0A
def
@@ -3970,16 +3970,17 @@
%5B-1%5D )%0A%0A
+%0A
if len(s
@@ -3995,12 +3995,13 @@
==
-1 or
+2 and
sys
@@ -4017,317 +4017,272 @@
== '
-build
+help
':%0A
-initBuild()%0Aelif sys.argv%5B1%5D == 'status':%0A initStatus()%0Aelse:%0A docs = '''%0A Hyhyhy - Presentation nano-framework. %5Bv ''' + __version__ + '''%5D%0A hyhyhy.py build - Build project to file%0A hyhyhy.py
+print( __help__ )%0Aelse:%0A print( prf('INFO'), %22Starting hyhyhyhyhyhyhyhyhy%22, %22...%22 )%0A print( prf('OK'), %22Reading config file%22, default, %22...%22 )%0A%0A if len(sys.argv) == 2 and sys.argv%5B1%5D == '
status
+':%0A
--
- Show project structure%0A '''%0A print( docs
+initStatus()%0A else:%0A initBuild(
)%0A%0A
|
a98e536334eb3d3376efe93c1bdc639ecdc4a2a0
|
remove unused code
|
approvaltests/reporters/generic_diff_reporter_factory.py
|
approvaltests/reporters/generic_diff_reporter_factory.py
|
import json
from approvaltests.reporters.generic_diff_reporter import GenericDiffReporter
from approvaltests.utils import get_adjacent_file
class GenericDiffReporterFactory(object):
reporters = []
def __init__(self):
self.load(get_adjacent_file('reporters.json'))
self.add_fallback_reporter_config(["PythonNative", "python", [get_adjacent_file("python_native_reporter.py")]])
def add_default_reporter_config(self, config):
self.reporters.insert(0, config)
def add_fallback_reporter_config(self, config):
self.reporters.append(config)
def list(self):
return [r[0] for r in self.reporters]
def get(self, reporter_name):
config = next((r for r in self.reporters if r[0] == reporter_name), None)
return self._create_reporter(config)
@staticmethod
def _create_reporter(config):
if not config:
return None
return GenericDiffReporter(config)
def save(self, file_name):
with open(file_name, 'w') as f:
json.dump(
self.reporters,
f,
sort_keys=True,
indent=2,
separators=(',', ': ')
)
return file_name
def load(self, file_name):
with open(file_name, 'r') as f:
self.reporters = json.load(f)
return self.reporters
def get_first_working(self):
working = (i for i in self.get_all_reporters() if i.is_working())
return next(working, None)
def get_all_reporters(self):
instances = (self._create_reporter(r) for r in self.reporters)
return instances
def remove(self, reporter_name):
self.reporters = [r for r in self.reporters if r[0] != reporter_name]
|
Python
| 0.000017
|
@@ -402,101 +402,8 @@
%5D)%0A%0A
- def add_default_reporter_config(self, config):%0A self.reporters.insert(0, config)%0A%0A
|
54fbb379300c04bed88776d5c4113671b32af1f1
|
use dtypes in dd.demo
|
dask/dataframe/demo.py
|
dask/dataframe/demo.py
|
from __future__ import absolute_import, division, print_function
import pandas as pd
import numpy as np
from .core import tokenize, DataFrame
from ..utils import different_seeds
__all__ = ['make_timeseries']
def make_float(n, rstate):
return rstate.rand(n) * 2 - 1
def make_int(n, rstate):
return rstate.poisson(1000, size=n)
names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank', 'George',
'Hannah', 'Ingrid', 'Jerry', 'Kevin', 'Laura', 'Michael', 'Norbert', 'Oliver',
'Patricia', 'Quinn', 'Ray', 'Sarah', 'Tim', 'Ursula', 'Victor', 'Wendy',
'Xavier', 'Yvonne', 'Zelda']
def make_string(n, rstate):
return rstate.choice(names, size=n)
def make_categorical(n, rstate):
return pd.Categorical.from_codes(rstate.randint(0, len(names), size=n),
names)
make = {float: make_float,
int: make_int,
str: make_string,
object: make_string,
'category': make_categorical}
def make_timeseries_part(start, end, dtypes, freq, seed):
index = pd.DatetimeIndex(start=start, end=end, freq=freq)
state = np.random.RandomState(seed)
columns = dict((k, make[dt](len(index), state)) for k, dt in dtypes.items())
df = pd.DataFrame(columns, index=index, columns=sorted(columns))
if df.index[-1] == end:
df = df.iloc[:-1]
return df
def make_timeseries(start, end, dtypes, freq, partition_freq, seed=None):
""" Create timeseries dataframe with random data
Parameters
----------
start: datetime (or datetime-like string)
Start of time series
end: datetime (or datetime-like string)
End of time series
dtypes: dict
Mapping of column names to types.
Valid types include {float, int, str, 'category'}
freq: string
String like '2s' or '1H' or '12W' for the time series frequency
partition_freq: string
String like '1M' or '2Y' to divide the dataframe into partitions
seed: int (optional)
Randomstate seed
>>> import dask.dataframe as dd
>>> df = dd.demo.make_timeseries('2000', '2010',
... {'value': float, 'name': str, 'id': int},
... freq='2H', partition_freq='1D', seed=1)
>>> df.head()
id name value
2000-01-01 00:00:00 960 Dan 0.824008
2000-01-01 02:00:00 1033 Xavier 0.575390
2000-01-01 04:00:00 986 George 0.693842
2000-01-01 06:00:00 1073 Sarah 0.900580
2000-01-01 08:00:00 976 Hannah -0.373847
"""
divisions = list(pd.DatetimeIndex(start=start, end=end,
freq=partition_freq))
state = np.random.RandomState(seed)
seeds = different_seeds(len(divisions), state)
name = 'make-timeseries-' + tokenize(start, end, dtypes, freq, partition_freq)
dsk = dict(((name, i), (make_timeseries_part, divisions[i], divisions[i + 1],
dtypes, freq, seeds[i]))
for i in range(len(divisions) - 1))
return DataFrame(dsk, name, sorted(dtypes), divisions)
|
Python
| 0.000001
|
@@ -3060,16 +3060,80 @@
) - 1))%0A
+ head = make_timeseries_part('2000','2000', dtypes, '1H', 1)%0A
retu
@@ -3160,22 +3160,12 @@
me,
-sorted(dtypes)
+head
, di
|
5ea90191ac4bf381b015bb9a82f26466e35ffe06
|
Create default ignore rules before creating ignore tree. Fixes an issue where we upload crazy stuff like node_modules when calling share_dir.
|
ignore.py
|
ignore.py
|
import os
import errno
import fnmatch
import stat
try:
from . import msg, utils
from .exc_fmt import str_e
assert msg and str_e and utils
except ImportError:
import msg
from exc_fmt import str_e
IGNORE_FILES = ['.gitignore', '.hgignore', '.flooignore']
HIDDEN_WHITELIST = ['.floo'] + IGNORE_FILES
BLACKLIST = [
'.DS_Store',
'.git',
'.svn',
'.hg',
]
# TODO: grab global git ignores:
# gitconfig_file = popen("git config -z --get core.excludesfile", "r");
DEFAULT_IGNORES = [
'#*',
'*.o',
'*.pyc',
'*~',
'extern/',
'node_modules/',
'tmp',
'vendor/',
]
MAX_FILE_SIZE = 1024 * 1024 * 5
IS_IG_IGNORED = 1
IS_IG_CHECK_CHILD = 2
def create_flooignore(path):
flooignore = os.path.join(path, '.flooignore')
# A very short race condition, but whatever.
if os.path.exists(flooignore):
return
try:
with open(flooignore, 'w') as fd:
fd.write('\n'.join(DEFAULT_IGNORES))
except Exception as e:
msg.error('Error creating default .flooignore: ', str_e(e))
def create_ignore_tree(path):
ig = Ignore(path)
ig.ignores['/DEFAULT/'] = BLACKLIST
ig.recurse(ig)
return ig
class Ignore(object):
def __init__(self, path, parent=None):
self.parent = parent
self.size = 0
self.total_size = 0
self.children = {}
self.files = []
self.ignores = {
'/TOO_BIG/': []
}
self.path = utils.unfuck_path(path)
def recurse(self, root):
try:
paths = os.listdir(self.path)
except OSError as e:
if e.errno != errno.ENOTDIR:
msg.error('Error listing path ', self.path, ': ', str_e(e))
return
except Exception as e:
msg.error('Error listing path ', self.path, ': ', str_e(e))
return
msg.debug('Initializing ignores for ', self.path)
for ignore_file in IGNORE_FILES:
try:
self.load(ignore_file)
except Exception:
pass
for p in paths:
if p == '.' or p == '..':
continue
if p in BLACKLIST:
msg.log('Ignoring blacklisted file ', p)
continue
p_path = os.path.join(self.path, p)
try:
s = os.stat(p_path)
except Exception as e:
msg.error('Error stat()ing path ', p_path, ': ', str_e(e))
continue
if stat.S_ISREG(s.st_mode) and p in HIDDEN_WHITELIST:
# Don't count these whitelisted files in size
self.files.append(p_path)
continue
is_dir = stat.S_ISDIR(s.st_mode)
if root.is_ignored(p_path, is_dir, True):
continue
if is_dir:
ig = Ignore(p_path, self)
self.children[p] = ig
ig.recurse(root)
self.total_size += ig.total_size
continue
if stat.S_ISREG(s.st_mode):
if s.st_size > (MAX_FILE_SIZE):
self.ignores['/TOO_BIG/'].append(p)
msg.log(self.is_ignored_message(p_path, p, '/TOO_BIG/', False))
else:
self.size += s.st_size
self.total_size += s.st_size
self.files.append(p_path)
def load(self, ignore_file):
with open(os.path.join(self.path, ignore_file), 'r') as fd:
ignores = fd.read()
rules = []
for ignore in ignores.split('\n'):
ignore = ignore.strip()
if len(ignore) == 0:
continue
if ignore[0] == '#':
continue
msg.debug('Adding ', ignore, ' to ignore patterns')
rules.insert(0, ignore)
self.ignores[ignore_file] = rules
def get_children(self):
children = list(self.children.values())
for c in self.children.values():
children += c.get_children()
return children
def list_paths(self):
for f in self.files:
yield os.path.join(self.path, f)
for c in self.children.values():
for p in c.list_paths():
yield p
def is_ignored_message(self, rel_path, pattern, ignore_file, exclude):
path = os.path.join(self.path, rel_path)
exclude_msg = ''
if exclude:
exclude_msg = '__NOT__ '
if ignore_file == '/TOO_BIG/':
return '%s %signored because it is too big (more than %s bytes)' % (path, exclude_msg, MAX_FILE_SIZE)
return '%s %signored by pattern %s in %s' % (path, exclude_msg, pattern, os.path.join(self.path, ignore_file))
def is_ignored(self, path, is_dir=None, log=False):
if is_dir is None:
try:
s = os.stat(path)
except Exception as e:
msg.error('Error lstat()ing path ', path, ': ', str_e(e))
return True
is_dir = stat.S_ISDIR(s.st_mode)
rel_path = os.path.relpath(path, self.path).replace(os.sep, '/')
return self._is_ignored(rel_path, is_dir, log)
def _is_ignored(self, rel_path, is_dir, log):
base_path, file_name = os.path.split(rel_path)
if not is_dir and file_name in HIDDEN_WHITELIST:
return False
for ignore_file, patterns in self.ignores.items():
for pattern in patterns:
orig_pattern = pattern
exclude = False
match = False
if pattern[0] == "!":
exclude = True
pattern = pattern[1:]
if pattern[0] == '/':
match = fnmatch.fnmatch(rel_path, pattern[1:])
else:
if len(pattern) > 0 and pattern[-1] == '/':
if is_dir:
pattern = pattern[:-1]
if fnmatch.fnmatch(file_name, pattern):
match = True
elif fnmatch.fnmatch(rel_path, pattern):
match = True
if match:
if log:
msg.log(self.is_ignored_message(rel_path, orig_pattern, ignore_file, exclude))
if exclude:
return False
return True
split = rel_path.split("/", 1)
if len(split) != 2:
return False
name, new_path = split
ig = self.children.get(name)
if ig:
return ig._is_ignored(new_path, is_dir, log)
return False
|
Python
| 0
|
@@ -1091,24 +1091,52 @@
tree(path):%0A
+ create_flooignore(path)%0A
ig = Ign
|
c5422645773b43de8811c691dfe03c82eda0b935
|
put cflags into configure
|
robustus/detail/install_protobuf.py
|
robustus/detail/install_protobuf.py
|
# =============================================================================
# COPYRIGHT 2013 Brain Corporation.
# License under MIT license (see LICENSE file)
# =============================================================================
import logging
import os
from requirement import RequirementException
from utility import unpack, safe_remove, run_shell, ln
import shutil
import subprocess
def install(robustus, requirement_specifier, rob_file, ignore_index):
cwd = os.getcwd()
os.chdir(robustus.cache)
install_dir = os.path.join(robustus.cache, 'protobuf-%s' % requirement_specifier.version)
if not os.path.isdir(install_dir) and not ignore_index:
archive_name = 'protobuf-%s.tar.gz' % requirement_specifier.version
subprocess.call(['wget', '-c', 'https://protobuf.googlecode.com/svn/rc/%s' % (archive_name,)])
subprocess.call(['tar', 'zxvf', archive_name])
# move sources to a folder in order to use a clean name for installation
src_dir = 'protobuf-%s' % requirement_specifier.version
shutil.move(src_dir, src_dir+'_src')
src_dir += '_src'
os.chdir(src_dir)
os.mkdir(install_dir)
old_cflags = os.environ['CFLAGS']
os.environ['CFLAGS'] = '-fPIC'
retcode = run_shell(['./configure', '--disable-shared', '--prefix', install_dir],
verbose=robustus.settings['verbosity'] >= 1)
os.environ['CFLAGS'] = old_cflags
if retcode:
raise RequirementException('Failed to configure protobuf compilation')
retcode = run_shell('make', shell=True,
verbose=robustus.settings['verbosity'] >= 1)
if retcode:
raise RequirementException('Failed compile protobuf')
retcode = run_shell('make install', shell=True)
if retcode:
raise RequirementException('Failed install protobuf')
os.chdir(robustus.cache)
shutil.rmtree(src_dir)
venv_install_folder = os.path.join(robustus.env, 'protobuf')
if os.path.exists(venv_install_folder):
shutil.rmtree(venv_install_folder)
shutil.copytree(install_dir, venv_install_folder)
executable_path = os.path.join(install_dir, 'bin', 'protoc')
ln(executable_path, os.path.join(robustus.env, 'bin', 'protoc'), force=True)
os.chdir(cwd)
# now install python part
robustus.install_through_wheeling(requirement_specifier, rob_file, ignore_index)
|
Python
| 0.000001
|
@@ -753,37 +753,31 @@
ion%0A
-subprocess.ca
+run_she
ll(%5B'wget',
@@ -848,63 +848,183 @@
e,)%5D
-)%0A subprocess.call(%5B'tar', 'zxvf', archive_name%5D
+,%0A verbose=robustus.settings%5B'verbosity'%5D %3E= 1)%0A run_shell(%5B'tar', 'zxvf', archive_name%5D,%0A verbose=robustus.settings%5B'verbosity'%5D %3E= 1
)%0A%0A
@@ -1308,144 +1308,184 @@
-old_cflags = os.environ%5B'CFLAGS'%5D%0A os.environ%5B'CFLAGS'%5D = '-fPIC'%0A retcode = run_shell(%5B'./configure', '--disable-shared',
+retcode = run_shell(%5B'./configure', '--disable-shared',%0A 'CFLAGS=-fPIC',%0A 'CXXFLAGS=-fPIC',%0A
'--
@@ -1583,50 +1583,8 @@
= 1)
-%0A os.environ%5B'CFLAGS'%5D = old_cflags
%0A%0A
|
ae2981b26fce2641a9bae5af68a3d5043fdd8b46
|
Fix disapear exception message (#31)
|
ovh/exceptions.py
|
ovh/exceptions.py
|
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2013-2016, OVH SAS.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of OVH SAS nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY OVH SAS AND CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL OVH SAS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
All exceptions used in OVH SDK derives from `APIError`
"""
class APIError(Exception):
"""Base OVH API exception, all specific exceptions inherits from it."""
def __init__(self, *args, **kwargs):
self.response = kwargs.get('response')
class HTTPError(APIError):
"""Raised when the request fails at a low level (DNS, network, ...)"""
class InvalidKey(APIError):
"""Raised when trying to sign request with invalid key"""
class InvalidCredential(APIError):
"""Raised when trying to sign request with invalid consumer key"""
class InvalidResponse(APIError):
"""Raised when api response is not valid json"""
class InvalidRegion(APIError):
"""Raised when region is not in `REGIONS`."""
class ReadOnlyError(APIError):
"""Raised when attempting to modify readonly data."""
class ResourceNotFoundError(APIError):
"""Raised when requested resource does not exist."""
class BadParametersError(APIError):
"""Raised when request contains bad parameters."""
class ResourceConflictError(APIError):
"""Raised when trying to create an already existing resource."""
class NetworkError(APIError):
"""Raised when there is an error from network layer."""
class NotGrantedCall(APIError):
"""Raised when there is an error from network layer."""
class NotCredential(APIError):
"""Raised when there is an error from network layer."""
class Forbidden(APIError):
"""Raised when there is an error from network layer."""
|
Python
| 0
|
@@ -1772,11 +1772,11 @@
rgs.
-get
+pop
('re
@@ -1782,16 +1782,78 @@
esponse'
+, None)%0A super(APIError, self).__init__(*args, **kwargs
)%0A%0Aclass
|
63f6637228153b1f77ca860c297ff3554d802ce9
|
Fix order history sorting logic, #sort() should be called before #reverse().
|
model/orderbook.py
|
model/orderbook.py
|
# -*- encoding:utf8 -*-
import os
from model.oandapy import oandapy
class OrderBook(object):
def get_latest_orderbook(self, instrument, period, history):
oanda_token = os.environ.get('OANDA_TOKEN')
oanda = oandapy.API(environment="practice", access_token=oanda_token)
orders = oanda.get_orderbook(instrument=instrument)
try:
timeset = orders.keys()
timeset.reverse()
target_time = timeset[history]
except:
return None
order = orders[target_time]
order['time'] = target_time
return order
|
Python
| 0.000019
|
@@ -394,16 +394,43 @@
.keys()%0A
+ timeset.sort()%0A
|
6741c59d726f1ceaf6edba82b6e97f501fc265ee
|
fix zero shape bug!
|
src/scripts/make_parts_dataset.py
|
src/scripts/make_parts_dataset.py
|
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
import settings
sys.path.append(settings.CAFFE_PYTHON_PATH)
import skimage.io
import caffe
import numpy as np
import click
from glob import glob
import utils
from dataset import CUB_200_2011
from parts import Parts
@click.command()
@click.argument('out-path', type=click.Path(exists=True))
def main(out_path):
cub = CUB_200_2011(settings.CUB_ROOT)
cub_images = cub.get_all_images()
for image in cub_images:
image_path = image['img_file']
image_id = image['img_id']
cub_parts = cub.get_parts()
rel_image_path = image_path[len(settings.CUB_IMAGES_FOLDER):]
o_image = caffe.io.load_image(image_path)
parts = cub_parts.for_image(image_id)
head_parts = parts.filter_by_name(Parts.HEAD_PART_NAMES)
if len(head_parts) <= 2:
print "#parts:%d \tID:%d \tName:%s" % (len(head_parts), int(image_id), rel_image_path)
if len(head_parts) <= 1:
continue
part_image = head_parts.get_rect(o_image)
out_image_path = os.path.join(out_path, rel_image_path)
utils.ensure_dir(os.path.dirname(out_image_path))
skimage.io.imsave(out_image_path, part_image)
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -1085,16 +1085,183 @@
_image)%0A
+ if 0 in part_image.shape:%0A print %22#parts:%25d %5CtID:%25d %5CtName:%25s + Shape:%25s%22 %25 (len(head_parts), int(image_id), rel_image_path, str(part_image.shape))%0A
|
2466a9b3e0c95c86bf3b2d220d7635a6061eeadd
|
Fix some moments in roots.py
|
module1-2/roots.py
|
module1-2/roots.py
|
# Кириллов Алексей, ИУ7-22
# Уточнение корней методом brenth
from math import sin, cos, ceil, log
from scipy.optimize import brenth
from time import perf_counter as perfc
from prettytable import PrettyTable
from numpy import arange
import matplotlib.pyplot as plt
'''
def f(x):
return sin(x)
def fs(x):
return -1*cos(x)
def fss(x):
return -1*sin(x)
'''
def f(x):
return sin(x)*(2**(x/10))
def fs(x):
return 2**(x/10-2)*(10*cos(x)+log(2)*sin(x))
def fss(x):
return (2**(x/10-3)*((log(2)*log(2)-100)*sin(x)+20*log(2)*cos(x)))/5
print('Вывод первой строки занял {:4.2g}s\n'.format(perfc()))
print('Уточнение корней методом brenth, функция y=sin(x)\n')
a, b = map(int,input('Задайте границы интервала: ').split())
eps_x = float(input('Укажите требуемую точность по X (>=0): '))
n = int(input('Введите число разбиений (0, если нужен шаг): '))
if n:
h = (b-a)/n
else:
h = float(input('Введите шаг (больше нуля): '))
n = ceil((b-a+eps_x)/h)
max_iter = int(input('Задайте максимальное число итераций: '))
print('\n')
start_time = perfc()
x_points = []
x_points_fs = []
x_points_fss = []
table = PrettyTable(["№", "x1", "x2", "x", "f(x)", "error"])
# Нахождение нулей функции, а так же нулей двух её производных
for i in range(n):
try:
x_left = a+h*i
x_right = a+h*(i+1)
if x_right > b:
x_right = b
x = brenth(f, x_left, x_right, xtol= eps_x, maxiter= max_iter)
x_points.append(x)
table.add_row([i+1,"{: 5.5f}".format(x_left),"{: 5.5f}".\
format(x_right),"{: 9.6f}".format(x),"{: 2.1g}".format(f(x)), 0])
except ValueError:
# Одинаковый знак
#table.add_row([i+1, "{: 5.5f}".format(x_left), "{: 5.5f}".\
# format(x_right), "-", "-", 1])
pass
except RuntimeError:
# Не сошлось за н итераций
table.add_row([i+1, "{: 5.5f}".format(x_left), "{: 5.5f}".\
format(x_right), "-", "-", 2])
pass
try:
x_points_fs.append(brenth(fs, (x_left), (x_right), xtol= eps_x,\
maxiter= 1000))
except:
pass
try:
x_points_fss.append(brenth(fss, (x_left), (x_right), xtol= eps_x,\
maxiter= 1000))
except:
pass
print(table)
print("\n\nОбщее время вычислений и вывода - {:5.2g}с"\
.format(perfc() - start_time))
# Вывод графика
#plt.xkcd(scale=1, length=100, randomness=3)
plt.axhline(0, color='black')
plt.axvline(0, color='black')
x_graph = arange(a-eps_x, b+eps_x, abs(a-b)/300)
y_graph = [f(i) for i in x_graph]
plt.plot(x_graph, y_graph)
if len(x_points) > 0:
# Точки зануления
y_points = [f(i) for i in x_points]
plt.plot(x_points, y_points, marker='x', ls='',\
label='zero points', markersize=15, color='r')
if len(x_points_fs) > 0:
# Точки экстремума
y_points_fs = [f(i) for i in x_points_fs]
plt.plot(x_points_fs, y_points_fs, marker='o', ls='',\
label='extreme points', color='g')
# Точки максимума и минимума
if len(x_points_fss) > 0:
# Точки перегиба
y_points_fss = [f(i) for i in x_points_fss]
plt.plot(x_points_fss, y_points_fss, marker='o', ls='',\
label='bench points', color='y')
if len(x_points_fs) > 0:
# Точки максимума и минимума
plt.plot([x_points_fs[y_points_fs.index(max(y_points_fs))]],\
[max(y_points_fs)], marker='v', ls='',\
label='max point', color='c', markersize=9)
plt.plot([x_points_fs[y_points_fs.index(min(y_points_fs))]],\
[min(y_points_fs)], marker='^', ls='',\
label='min point', color='m', markersize=9)
plt.legend(loc=0)
plt.xlim(a, b)
plt.ylim(-2, 2)
plt.show()
print("\nОбщее время работы программы - {:5.2g}с".format(perfc()))
|
Python
| 0.004045
|
@@ -719,12 +719,15 @@
map(
-in
+floa
t,
+
inpu
@@ -3170,48 +3170,8 @@
g')%0D
-%0A # %D0%A2%D0%BE%D1%87%D0%BA%D0%B8 %D0%BC%D0%B0%D0%BA%D1%81%D0%B8%D0%BC%D1%83%D0%BC%D0%B0 %D0%B8 %D0%BC%D0%B8%D0%BD%D0%B8%D0%BC%D1%83%D0%BC%D0%B0%0D%0A %0D
%0A%0D%0Ai
|
4e57f73597f8d5dc3ccee9d815657a774dc52d62
|
Handle package dirs correctly
|
src/setuptools_epydoc/__init__.py
|
src/setuptools_epydoc/__init__.py
|
import os
import sys
import re
from setuptools import Command
class EpydocCommand(Command):
'''
Setuptools command used to build an API documentation with epydoc.
@author: jwienke
'''
user_options = [('format=', 'f',
'the output format to use (html and pdf)'),
('config=', 'c',
'Epydoc configuration file'),
('names=', None,
'Names of packages to document. Defaults to all '
'configured packages in the project. Comma-separated.'),
('output-dir=', 'o',
'Folder for generated output. Default: docs'),
('verbose', 'v', 'print verbose warnings')]
description = 'Generates an API documentation using epydoc.'
FORMAT_HTML = 'html'
FORMAT_PDF = 'pdf'
def initialize_options(self):
self.format = None
self.verbose = False
self.config = None
self.names = ''
self.output_dir = 'docs'
def finalize_options(self):
if self.format is None:
self.format = self.FORMAT_HTML
if not self.format in [self.FORMAT_HTML, self.FORMAT_PDF]:
self.format = self.FORMAT_HTML
self.names = [module.strip()
for module in re.split('[\s,]+', self.names)
if len(module.strip()) > 0]
def run(self):
# ensure that everything that's needed is built
self.run_command('build')
outdir = os.path.join(self.output_dir, self.format)
try:
os.makedirs(outdir)
except OSError:
pass
# build the argument string
cmdline = ['foo']
cmdline.append('--' + self.format)
cmdline.append('-o')
cmdline.append(outdir)
if self.verbose:
cmdline.append('-v')
if self.config is not None:
cmdline.append('--config')
cmdline.append(self.config)
base = self.get_finalized_command('build_py')
if self.names is None or len(self.names) == 0:
for package, _, _ in base.find_all_modules():
cmdline.append(package)
else:
cmdline = cmdline + self.names
import copy
import epydoc.cli as ep
argv = copy.copy(sys.argv)
try:
sys.argv = cmdline
ep.cli()
finally:
sys.argv = argv
|
Python
| 0
|
@@ -1725,13 +1725,8 @@
= %5B
-'foo'
%5D%0A
@@ -2054,16 +2054,35 @@
ld_py')%0A
+ names = %5B%5D%0A
@@ -2206,30 +2206,127 @@
-cmdline.append(package
+pdir = base.get_package_dir(package)%0A names.append(pdir)%0A cmdline = cmdline + list(set(names)
)%0A
|
581dfd3190ccfabf43e6aebdedd1f022aa146b0e
|
Add default bedtools if not specified in the system file.
|
bcbio/variation/bedutils.py
|
bcbio/variation/bedutils.py
|
"""Utilities for manipulating BED files.
"""
import os
import shutil
import sys
import subprocess
import toolz as tz
from bcbio import utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils
from bcbio.provenance import do
from bcbio.variation import vcfutils
def get_sort_cmd():
"""Retrieve GNU coreutils sort command, using version-sort if available.
Recent versions of sort have alpha-numeric sorting, which provides
more natural sorting of chromosomes (chr1, chr2) instead of (chr1, chr10).
This also fixes versions of sort, like 8.22 in CentOS 7.1, that have broken
sorting without version sorting specified.
https://github.com/chapmanb/bcbio-nextgen/issues/624
https://github.com/chapmanb/bcbio-nextgen/issues/1017
"""
has_versionsort = subprocess.check_output("sort --help | grep version-sort; exit 0", shell=True).strip()
if has_versionsort:
return "sort -V"
else:
return "sort"
def clean_file(in_file, data, prefix="", bedprep_dir=None):
"""Prepare a clean sorted input BED file without headers
"""
if in_file:
if not bedprep_dir:
bedprep_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "bedprep"))
out_file = os.path.join(bedprep_dir, "%s%s" % (prefix, os.path.basename(in_file))).replace(".gz", "")
if not utils.file_uptodate(out_file, in_file):
with file_transaction(data, out_file) as tx_out_file:
py_cl = os.path.join(os.path.dirname(sys.executable), "py")
cat_cmd = "zcat" if in_file.endswith(".gz") else "cat"
sort_cmd = get_sort_cmd()
cmd = ("{cat_cmd} {in_file} | grep -v ^track | grep -v ^browser | "
"grep -v ^# | "
"{py_cl} -x 'bcbio.variation.bedutils.remove_bad(x)' | "
"{sort_cmd} -k1,1 -k2,2n > {tx_out_file}")
do.run(cmd.format(**locals()), "Prepare cleaned BED file", data)
vcfutils.bgzip_and_index(out_file, data.get("config", {}), remove_orig=False)
return out_file
def sort_merge(in_file, data):
"""Sort and merge a BED file, collapsing gene names.
"""
out_file = "%s-sort.bed" % os.path.splitext(in_file)[0]
if not utils.file_uptodate(out_file, in_file):
with file_transaction(data, out_file) as tx_out_file:
cat_cmd = "zcat" if in_file.endswith(".gz") else "cat"
sort_cmd = get_sort_cmd()
cmd = ("{cat_cmd} {in_file} | {sort_cmd} -k1,1 -k2,2n | "
"bedtools merge -i - -c 4 -o distinct > {tx_out_file}")
do.run(cmd.format(**locals()), "Sort BED file", data)
return out_file
def remove_bad(line):
"""Remove non-increasing BED lines which will cause variant callers to choke.
"""
parts = line.strip().split("\t")
if int(parts[2]) > int(parts[1]):
return line
else:
return None
def merge_overlaps(in_file, data, distance=None, out_dir=None):
"""Merge bed file intervals to avoid overlapping regions.
Overlapping regions (1:1-100, 1:90-100) cause issues with callers like FreeBayes
that don't collapse BEDs prior to using them.
"""
if in_file:
bedtools = config_utils.get_program("bedtools", data["config"])
work_dir = tz.get_in(["dirs", "work"], data)
if out_dir:
bedprep_dir = out_dir
elif work_dir:
bedprep_dir = utils.safe_makedir(os.path.join(work_dir, "bedprep"))
else:
bedprep_dir = os.path.dirname(in_file)
out_file = os.path.join(bedprep_dir, "%s-merged.bed" % (utils.splitext_plus(os.path.basename(in_file))[0]))
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
distance = "-d %s" % distance if distance else ""
cmd = "{bedtools} merge {distance} -i {in_file} > {tx_out_file}"
do.run(cmd.format(**locals()), "Prepare merged BED file", data)
vcfutils.bgzip_and_index(out_file, data["config"], remove_orig=False)
return out_file
def clean_inputs(data):
"""Clean BED input files to avoid overlapping segments that cause downstream issues.
Per-merges inputs to avoid needing to call multiple times during later parallel steps.
"""
clean_vr = clean_file(utils.get_in(data, ("config", "algorithm", "variant_regions")), data)
merge_overlaps(clean_vr, data)
data["config"]["algorithm"]["variant_regions"] = clean_vr
return data
def combine(in_files, out_file, config):
"""Combine multiple BED files into a single output.
"""
if not utils.file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for in_file in in_files:
with open(in_file) as in_handle:
shutil.copyfileobj(in_handle, out_handle)
return out_file
|
Python
| 0
|
@@ -3247,32 +3247,60 @@
g them.%0A %22%22%22%0A
+ config = data%5B%22config%22%5D%0A
if in_file:%0A
@@ -3355,30 +3355,86 @@
tools%22,
-data%5B%22config%22%5D
+config,%0A default=%22bedtools%22
)%0A
|
48cd6af0e138dd28b18ca3a71f41976c71483445
|
Add --forceuninstall option
|
Python/brewcaskupgrade.py
|
Python/brewcaskupgrade.py
|
#! /usr/bin/env python3
# -*- coding: utf8 -*-
import argparse
import shutil
from subprocess import check_output, run
parser = argparse.ArgumentParser(description='Update every entries found in cask folder.')
parser.add_argument('--pretend', dest='pretend', action='store_true',
help='Pretend to take action.')
parser.set_defaults(pretend=False)
args = parser.parse_args()
brew_bin = 'brew'
if not shutil.which(brew_bin):
raise FileExistsError(brew_bin + ' not exists')
list_command = [
brew_bin,
'cask',
'list'
]
list_installed = str.split(check_output(list_command).decode(), '\n')
list_installed = [i for i in list_installed if i is not '']
print(str(len(list_installed)) + ' cask(s) installed')
updated_count = 0
for cask in list_installed:
info_command = [
brew_bin,
'cask',
'info',
cask
]
try:
install_status = str.splitlines(check_output(info_command).decode())
except:
install_status = 'Not installed'
version = str.strip(str.split(install_status[0], ':')[1])
is_version_installed = False
for line in install_status:
if not line.startswith(cask) and cask in line and version in line:
is_version_installed = True
if not is_version_installed:
print('Installing', cask)
install_command = [
brew_bin,
'cask',
'install',
'--force',
cask
]
if args.pretend:
print(' '.join(install_command))
else:
run(install_command)
updated_count += 1
print(str(updated_count) + ' cask(s) updated')
|
Python
| 0
|
@@ -338,28 +338,194 @@
ser.
-set_defaults(pretend
+add_argument('--forceuninstall', dest='forceuninstall', action='store_true',%0A help='Force uninstall before install.')%0Aparser.set_defaults(pretend=False, forceuninstall
=Fal
@@ -1242,41 +1242,8 @@
1%5D)%0A
- is_version_installed = False%0A
@@ -1386,16 +1386,49 @@
= True%0A%0A
+ is_version_installed = False%0A
if n
@@ -1713,16 +1713,294 @@
else:%0A
+ if args.forceuninstall:%0A uninstall_command = %5B%0A brew_bin,%0A 'cask',%0A 'uninstall',%0A '--force',%0A cask%0A %5D%0A run(uninstall_command)%0A
@@ -2052,16 +2052,16 @@
t += 1%0A%0A
-
print(st
@@ -2094,12 +2094,13 @@
s) updated')
+%0A
|
1e0f2f61976c61aace85a05dc21181fe06065560
|
version bump; delta-mag fitting now available for BinaryStarModel
|
isochrones/__init__.py
|
isochrones/__init__.py
|
__version__ = '0.8.1'
try:
__ISOCHRONES_SETUP__
except NameError:
__ISOCHRONES_SETUP__ = False
if not __ISOCHRONES_SETUP__:
__all__ = ['dartmouth','basti','padova',
'Isochrone', 'StarModel', 'BinaryStarModel',
'TripleStarModel']
from .isochrone import Isochrone
from .starmodel import StarModel, BinaryStarModel, TripleStarModel
|
Python
| 0
|
@@ -16,9 +16,9 @@
0.8.
-1
+2
'%0A%0At
|
a4bc6c0c4d13629dbdfef30edcba262efce0eaff
|
fix up config for heroku
|
colorsearchtest/settings.py
|
colorsearchtest/settings.py
|
# -*- coding: utf-8 -*-
import os
os_env = os.environ
class Config(object):
SECRET_KEY = os_env.get('COLORSEARCHTEST_SECRET', 'secret-key') # TODO: Change me
APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory
PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))
SQLALCHEMY_DATABASE_URI = os_env.get(
'COLORSEARCHTEST_DATABASE_URI',
'postgresql://localhost/example') # TODO: Change me
BCRYPT_LOG_ROUNDS = 13
ASSETS_DEBUG = False
DEBUG_TB_ENABLED = False # Disable Debug toolbar
DEBUG_TB_INTERCEPT_REDIRECTS = False
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
IS_DELTA_E_COLORMATH_ENABLED = False
IS_DELTA_E_DBQUERY_ENABLED = True
MAX_COLORS = 100
class ProdConfig(Config):
"""Production configuration."""
ENV = 'prod'
DEBUG = False
DEBUG_TB_ENABLED = False # Disable Debug toolbar
class DevConfig(Config):
"""Development configuration."""
ENV = 'dev'
DEBUG = True
DEBUG_TB_ENABLED = True
ASSETS_DEBUG = True # Don't bundle/minify static assets
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
class TestConfig(Config):
TESTING = True
DEBUG = True
BCRYPT_LOG_ROUNDS = 1 # For faster tests
WTF_CSRF_ENABLED = False # Allows form testing
|
Python
| 0.000001
|
@@ -337,20 +337,173 @@
I =
-os_env.get(%0A
+((os.environ.get('HEROKU') is not None)%0A and os_env.get(%0A 'DATABASE_URL',%0A 'postgresql://localhost/example')%0A or os_env.get(%0A
@@ -538,16 +538,20 @@
E_URI',%0A
+
@@ -583,17 +583,17 @@
xample')
-
+)
# TODO:
|
ef1c1bb39a99555428df014c7ad5035c2746c35a
|
Version 1.3.2
|
moirai/__init__.py
|
moirai/__init__.py
|
# -*- coding: utf-8; -*-
#
# Copyright (c) 2016 Álan Crístoffer
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
__author__ = 'Álan Crístoffer'
__copyright__ = 'Copyright 2016, Álan Crístoffer'
__credits__ = ['Álan Crístoffer']
__license__ = 'MIT'
__version__ = '1.3.1'
__maintainer__ = 'Álan Crístoffer'
__email__ = 'acristoffers@gmail.com'
__status__ = 'Development'
|
Python
| 0
|
@@ -1269,17 +1269,17 @@
= '1.3.
-1
+2
'%0A__main
|
f20eb91dcf04bc8e33fbb48ebfbef1b56acbf02d
|
Make functions that pull a number of tweets and pics
|
web.py
|
web.py
|
""" Heroku/Python Quickstart: https://blog.heroku.com/archives/2011/9/28/python_and_django"""
import os
from flask import Flask
app = Flask(__name__)
@app.route('/')
def home_page():
return 'Hello from the SPARK learn-a-thon!'
if __name__ == '__main__':
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port)
|
Python
| 0.000002
|
@@ -102,31 +102,94 @@
os%0A
-from flask import Flask
+import random%0Aimport requests%0Afrom flask import Flask%0A%0Aimport tweepy%0A%0Aimport settings%0A
%0A%0Aap
@@ -209,16 +209,17 @@
ame__)%0A%0A
+%0A
@app.rou
@@ -292,16 +292,1020 @@
thon!'%0A%0A
+%0Adef get_instagram_image():%0A instagram_api_url = 'https://api.instagram.com/v1/tags/spark/media/recent?client_id=%7B%7D'.format(settings.CLIENT_ID)%0A%0A data = requests.get(instagram_api_url).json()%5B'data'%5D%0A number_of_images = choose_number_of_images()%0A%0A images_returned = %5B%5D%0A for image in number_of_images:%0A images_returned.append(random.choice(data)%5B'images'%5D%5B'low_resolution'%5D%5B'url'%5D)%0A%0A return images_returned%0A%0A%0Adef get_tweets():%0A auth = tweepy.OAuthHandler(settings.CONSUMER_KEY, settings.CONSUMER_SECRET)%0A auth.set_access_token(settings.ACCESS_KEY, settings.ACCESS_SECRET)%0A api = tweepy.API(auth)%0A%0A number_of_tweets = choose_number_of_tweets()%0A%0A tweets_text = %5B%5D%0A tweets = tweepy.Cursor(api.search, q='#spark')%0A for tweet in tweets.items(limit=number_of_tweets):%0A tweets_text.append(tweet.text)%0A%0A return tweets_text%0A%0A%0Adef choose_number_of_images():%0A number = 3%0A return number%0A%0A%0Adef choose_number_of_tweets():%0A number = 3%0A return number%0A%0A%0A
if __nam
|
2a87ed1772a530b07c69e1d2086cd54160dd440a
|
fix sample test
|
samples/snippets/speech_adaptation_beta.py
|
samples/snippets/speech_adaptation_beta.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DO NOT EDIT! This is a generated sample ("Request", "speech_adaptation_beta")
# To install the latest published package dependency, execute the following:
# pip install google-cloud-speech
# sample-metadata
# title: Speech Adaptation (Cloud Storage)
# description: Transcribe a short audio file with speech adaptation.
# usage: python3 samples/v1p1beta1/speech_adaptation_beta.py [--storage_uri "gs://cloud-samples-data/speech/brooklyn_bridge.mp3"] [--phrase "Brooklyn Bridge"]
# [START speech_adaptation_beta]
from google.cloud import speech_v1p1beta1
from google.cloud.speech_v1p1beta1 import enums
def sample_recognize(storage_uri, phrase):
"""
Transcribe a short audio file with speech adaptation.
Args:
storage_uri URI for audio file in Cloud Storage, e.g. gs://[BUCKET]/[FILE]
phrase Phrase "hints" help recognize the specified phrases from your audio.
"""
client = speech_v1p1beta1.SpeechClient()
# storage_uri = 'gs://cloud-samples-data/speech/brooklyn_bridge.mp3'
# phrase = 'Brooklyn Bridge'
phrases = [phrase]
# Hint Boost. This value increases the probability that a specific
# phrase will be recognized over other similar sounding phrases.
# The higher the boost, the higher the chance of false positive
# recognition as well. Can accept wide range of positive values.
# Most use cases are best served with values between 0 and 20.
# Using a binary search happroach may help you find the optimal value.
boost = 20.0
speech_contexts_element = {"phrases": phrases, "boost": boost}
speech_contexts = [speech_contexts_element]
# Sample rate in Hertz of the audio data sent
sample_rate_hertz = 44100
# The language of the supplied audio
language_code = "en-US"
# Encoding of audio data sent. This sample sets this explicitly.
# This field is optional for FLAC and WAV audio formats.
encoding = enums.RecognitionConfig.AudioEncoding.MP3
config = {
"speech_contexts": speech_contexts,
"sample_rate_hertz": sample_rate_hertz,
"language_code": language_code,
"encoding": encoding,
}
audio = {"uri": storage_uri}
response = client.recognize(config, audio)
for result in response.results:
# First alternative is the most probable result
alternative = result.alternatives[0]
print(u"Transcript: {}".format(alternative.transcript))
# [END speech_adaptation_beta]
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--storage_uri",
type=str,
default="gs://cloud-samples-data/speech/brooklyn_bridge.mp3",
)
parser.add_argument("--phrase", type=str, default="Brooklyn Bridge")
args = parser.parse_args()
sample_recognize(args.storage_uri, args.phrase)
if __name__ == "__main__":
main()
|
Python
| 0.000729
|
@@ -3068,16 +3068,36 @@
n_beta%5D%0A
+ return response%0A
%0A%0Adef ma
|
149d1ba75bfda348afb150ab0aa6d8785f850884
|
Remove wavefront sampler
|
demosys/loaders/scene/wavefront.py
|
demosys/loaders/scene/wavefront.py
|
import numpy
import pywavefront
from pywavefront import cache
from pywavefront.obj import ObjParser
import moderngl
from demosys.loaders.scene.base import SceneLoader
from demosys.opengl import VAO
from demosys.resources import textures
from demosys.resources.meta import SceneDescription, TextureDescription
from demosys.scene import Material, MaterialTexture, Mesh, Node, Scene
def translate_buffer_format(vertex_format):
"""Translate the buffer format"""
buffer_format = []
attributes = []
mesh_attributes = []
if "T2F" in vertex_format:
buffer_format.append("2f")
attributes.append("in_uv")
mesh_attributes.append(("TEXCOORD_0", "in_uv", 2))
if "C3F" in vertex_format:
buffer_format.append("3f")
attributes.append("in_color")
mesh_attributes.append(("NORMAL", "in_color", 3))
if "N3F" in vertex_format:
buffer_format.append("3f")
attributes.append("in_normal")
mesh_attributes.append(("NORMAL", "in_normal", 3))
buffer_format.append("3f")
attributes.append("in_position")
mesh_attributes.append(("POSITION", "in_position", 3))
return " ".join(buffer_format), attributes, mesh_attributes
class VAOCacheLoader(cache.CacheLoader):
"""Load geometry data directly into vaos"""
def load_vertex_buffer(self, fd, material, length):
buffer_format, attributes, mesh_attributes = translate_buffer_format(material.vertex_format)
vao = VAO(material.name, mode=moderngl.TRIANGLES)
# buffer = context.ctx().buffer(fd.read(length))
vao.buffer(fd.read(length), buffer_format, attributes)
setattr(material, 'vao', vao)
setattr(material, 'buffer_format', buffer_format)
setattr(material, 'attributes', attributes)
setattr(material, 'mesh_attributes', mesh_attributes)
ObjParser.cache_loader_cls = VAOCacheLoader
class ObjLoader(SceneLoader):
"""Loade obj files"""
file_extensions = [
['.obj'],
['.obj', '.gz'],
['.bin'],
]
def __init__(self, meta: SceneDescription):
super().__init__(meta)
def load(self):
"""Deferred loading"""
path = self.find_scene(self.meta.path)
if not path:
raise ValueError("Scene '{}' not found".format(self.meta.path))
if path.suffix == '.bin':
path = path.parent / path.stem
data = pywavefront.Wavefront(str(path), create_materials=True, cache=True)
scene = Scene(self.meta.resolved_path)
texture_cache = {}
for _, mat in data.materials.items():
mesh = Mesh(mat.name)
# Traditional loader
if mat.vertices:
buffer_format, attributes, mesh_attributes = translate_buffer_format(mat.vertex_format)
vbo = numpy.array(mat.vertices, dtype='f4')
vao = VAO(mat.name, mode=moderngl.TRIANGLES)
vao.buffer(vbo, buffer_format, attributes)
mesh.vao = vao
for attrs in mesh_attributes:
mesh.add_attribute(*attrs)
# Binary cache loader
elif hasattr(mat, 'vao'):
mesh = Mesh(mat.name)
mesh.vao = mat.vao
for attrs in mat.mesh_attributes:
mesh.add_attribute(*attrs)
else:
# Empty
continue
scene.meshes.append(mesh)
mesh.material = Material(mat.name)
mesh.material.color = mat.diffuse
if mat.texture:
# A texture can be referenced multiple times, so we need to cache loaded ones
texture = texture_cache.get(mat.texture.path)
if not texture:
print("Loading:", mat.texture.path)
texture = textures.load(TextureDescription(
label=mat.texture.path,
path=mat.texture.path,
))
texture_cache[mat.texture.path] = texture
mesh.material.mat_texture = MaterialTexture(
texture=texture,
# sampler=samplers.create(
# wrap_s=GL.GL_CLAMP_TO_EDGE,
# wrap_t=GL.GL_CLAMP_TO_EDGE,
# anisotropy=8,
# )
)
node = Node(mesh=mesh)
scene.root_nodes.append(node)
# Not supported yet for obj
# self.calc_scene_bbox()
scene.prepare()
return scene
|
Python
| 0.000001
|
@@ -4186,18 +4186,16 @@
- #
sampler
@@ -4199,196 +4199,13 @@
ler=
-samplers.create(%0A # wrap_s=GL.GL_CLAMP_TO_EDGE,%0A # wrap_t=GL.GL_CLAMP_TO_EDGE,%0A # anisotropy=8,%0A # )
+None,
%0A
|
08539b462d6b875a754d1ec03fb86232e788754e
|
enable secure cookie
|
app/flask_loginmanager/__init__.py
|
app/flask_loginmanager/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from functools import wraps
from flask import request, abort, g
import time
from .user_mixin import UserMixin
class LoginManager(object):
__SET = 'set'
__CLEAR = 'clear'
__user_loader_dict = {}
__after_request_func_list = []
__hash_generators = {}
def __init__(self, app=None, role='default', expires=3600, salt=''):
if app is not None:
app.context_processor(LoginManager.__user_context_processor)
app.after_request(LoginManager.__after_requset_funcs)
self.__role = role
self.__expires = expires
self.__salt = salt
self.__failure_handler = None
def init_app(self, app):
if app is None:
raise ValueError('app must be set')
app.context_processor(LoginManager.__user_context_processor)
app.after_request(LoginManager.__after_requset_funcs)
# 通过密码校验后,调用这个方法载入用户信息
def login(self, user):
g.option = LoginManager.__SET
g.uid = LoginManager.get_id(user)
g.start_time = int(time.time()) + self.__expires
LoginManager.__after_request_func_list.append(self.__set_cookie)
# 登出当前用户
def logout(self):
g.option = LoginManager.__CLEAR
LoginManager.__after_request_func_list.append(self.__set_cookie)
def __set_cookie(self, resp):
if g.option is not None:
if g.option == LoginManager.__SET:
user = LoginManager.__user_loader_dict.get(self.__role)(g.uid)
if user is not None:
info = [str(LoginManager.get_id(user)), str(g.start_time), LoginManager.__hash_generators.get(self.__role)(user)]
value = '-'.join(info)
resp.set_cookie(key=self.__role, value=value, expires=g.start_time, httponly=True)
elif g.option == LoginManager.__CLEAR:
resp.delete_cookie(key=self.__role)
return resp
@property
def role(self):
return self.__role
@role.setter
def role(self, role):
self.__role = role
@property
def expires(self):
return self.__expires
@expires.setter
def expires(self, expires):
self.__expires = expires
@property
def salt(self):
return self.__salt
@salt.setter
def salt(self, salt):
self.__salt = salt
# 获取当前用户
@property
def current_user(self):
return LoginManager.__load_user_from_cookie(self.__role)
# 是个装饰器,login_required 装饰器校验失败后调用其装饰的方法,可以分别制定以处理请求来自 WEB 和 API 的情况
def failure_handler(self, handler):
self.__failure_handler = handler
return handler
# 载入用户信息的回调方法,是个装饰器
def user_loader(self, loader):
LoginManager.__user_loader_dict[self.__role] = loader
return loader
# 载入 hash 校验值的回调方法,是个装饰器
def hash_generator(self, generator):
LoginManager.__hash_generators[self.__role] = generator
return generator
# 装饰器,用来给路由添加登陆校验功能
def login_required(self, *permissions):
def decorator(func):
@wraps(func)
def wrapper(*args, **kw):
if self.has_permissions(*permissions):
return func(*args, **kw)
if self.__failure_handler is not None:
return self.__failure_handler()
abort(401)
return wrapper
return decorator
permissions_required = login_required
def has_permissions(self, *permissions):
user = self.current_user
if user is not None:
for p in permissions:
if (user.get_permissions() & p) != p:
return False
return True
return False
@staticmethod
def get_id(user):
try:
func = getattr(user, "get_id")
except AttributeError:
raise NotImplementedError
else:
return func()
@staticmethod
def __load_user_from_cookie(role):
if g.get('user'):
return g.user
if LoginManager.__user_loader_dict.get(role) is None:
raise NotImplementedError('user_loader must be set!')
if LoginManager.__hash_generators.get(role, None) is None:
raise NotImplementedError('hash_generator must be set!')
cookie = request.cookies.get(role)
if cookie is not None:
info = cookie.split('-')
if len(info) == 3:
uid = info[0]
expires = info[1]
hash_client = info[2]
if int(expires) > time.time():
user = LoginManager.__user_loader_dict.get(role)(uid)
if user is not None:
hash_server = LoginManager.__hash_generators.get(role)(user)
if hash_client == hash_server:
g.user = user
return user
return None
@staticmethod
def __user_context_processor():
return dict(current_user=LoginManager.__load_user_from_cookie)
@staticmethod
def __after_requset_funcs(resp):
for func in LoginManager.__after_request_func_list:
func(resp)
LoginManager.__after_request_func_list.clear()
return resp
__all__ = [
LoginManager.__name__,
UserMixin.__name__,
]
|
Python
| 0.000001
|
@@ -103,16 +103,29 @@
abort, g
+, current_app
%0Aimport
@@ -1753,16 +1753,167 @@
(info)%0A%0A
+ httponly = current_app.config%5B'SESSION_COOKIE_HTTPONLY'%5D%0A secure = current_app.config%5B'SESSION_COOKIE_SECURE'%5D%0A%0A
@@ -2001,19 +2001,38 @@
ttponly=
-Tru
+httponly, secure=secur
e)%0A%0A
|
74a5836d41386a847d2e69e2335e0825fb64972f
|
Add CPU_ONLY tag for sparse_feature_hash layer
|
caffe2/python/layers/sparse_feature_hash.py
|
caffe2/python/layers/sparse_feature_hash.py
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package sparse_feature_hash
# Module caffe2.python.layers.sparse_feature_hash
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import (
ModelLayer,
IdList,
IdScoreList,
)
import numpy as np
class SparseFeatureHash(ModelLayer):
def __init__(self, model, input_record, seed=0, modulo=None,
use_hashing=True, name='sparse_feature_hash', **kwargs):
super(SparseFeatureHash, self).__init__(model, name, input_record, **kwargs)
self.seed = seed
self.use_hashing = use_hashing
if schema.equal_schemas(input_record, IdList):
self.modulo = modulo or self.extract_hash_size(input_record.items.metadata)
metadata = schema.Metadata(
categorical_limit=self.modulo,
feature_specs=input_record.items.metadata.feature_specs,
)
hashed_indices = schema.Scalar(
np.int64,
self.get_next_blob_reference("hashed_idx")
)
hashed_indices.set_metadata(metadata)
self.output_schema = schema.List(
values=hashed_indices,
lengths_blob=input_record.lengths,
)
elif schema.equal_schemas(input_record, IdScoreList):
self.modulo = modulo or self.extract_hash_size(input_record.keys.metadata)
metadata = schema.Metadata(
categorical_limit=self.modulo,
feature_specs=input_record.keys.metadata.feature_specs,
)
hashed_indices = schema.Scalar(
np.int64,
self.get_next_blob_reference("hashed_idx")
)
hashed_indices.set_metadata(metadata)
self.output_schema = schema.Map(
keys=hashed_indices,
values=input_record.values,
lengths_blob=input_record.lengths,
)
else:
assert False, "Input type must be one of (IdList, IdScoreList)"
assert self.modulo >= 1, 'Unexpected modulo: {}'.format(self.modulo)
def extract_hash_size(self, metadata):
if metadata.feature_specs and metadata.feature_specs.desired_hash_size:
return metadata.feature_specs.desired_hash_size
elif metadata.categorical_limit is not None:
return metadata.categorical_limit
else:
assert False, "desired_hash_size or categorical_limit must be set"
def add_ops(self, net):
if schema.equal_schemas(self.output_schema, IdList):
input_blob = self.input_record.items()
output_blob = self.output_schema.items()
elif schema.equal_schemas(self.output_schema, IdScoreList):
input_blob = self.input_record.keys()
output_blob = self.output_schema.keys()
else:
raise NotImplementedError()
if self.use_hashing:
net.IndexHash(
input_blob, output_blob, seed=self.seed, modulo=self.modulo
)
else:
net.Mod(
input_blob, output_blob, divisor=self.modulo, sign_follow_divisor=True
)
|
Python
| 0.000001
|
@@ -1017,16 +1017,67 @@
eList,%0A)
+%0Afrom caffe2.python.layers.tags import (%0A Tags%0A)
%0A%0Aimport
@@ -2953,16 +2953,288 @@
odulo)%0A%0A
+ # operators in this layer do not have CUDA implementation yet.%0A # In addition, since the sparse feature keys that we are hashing are%0A # typically on CPU originally, it makes sense to have this layer on CPU.%0A self.tags.update(%5BTags.CPU_ONLY%5D)%0A%0A
def
|
0d58c2ffc8ec6afc353a242f942f668b0b7f362c
|
Correct shipping repository method calls
|
sandbox/apps/shipping/repository.py
|
sandbox/apps/shipping/repository.py
|
from decimal import Decimal as D
from oscar.apps.shipping.methods import Free, FixedPrice
from oscar.apps.shipping.repository import Repository as CoreRepository
class Repository(CoreRepository):
"""
This class is included so that there is a choice of shipping methods.
Oscar's default behaviour is to only have one which means you can't test the
shipping features of PayPal.
"""
def get_shipping_methods(self, user, basket, shipping_addr=None, **kwargs):
methods = [Free(), FixedPrice(D('10.00')), FixedPrice(D('20.00'))]
return self.add_basket_to_methods(basket, methods)
|
Python
| 0
|
@@ -354,16 +354,16 @@
test
- the
%0A
+ the
shi
@@ -576,21 +576,13 @@
elf.
-add_basket_to
+prime
_met
|
edec18a82d6027c8a011fbef84c8aa3b80e18826
|
Update forward_device1.py
|
Server/forward_device1.py
|
Server/forward_device1.py
|
import zmq
def main():
print "\nServer for ProBot is running..."
try:
context = zmq.Context(1)
# Socket facing clients
frontend = context.socket(zmq.SUB)
frontend.bind("tcp://*:5559")
frontend.setsockopt(zmq.SUBSCRIBE, "")
# Socket facing services
backend = context.socket(zmq.PUB)
backend.bind("tcp://*:5560")
zmq.device(zmq.FORWARDER, frontend, backend)
except Exception, e:
print e
print "bringing down zmq device"
finally:
pass
frontend.close()
backend.close()
context.term()
if __name__ == "__main__":
main()
|
Python
| 0.000006
|
@@ -218,17 +218,16 @@
:5559%22)%0A
-%0A
@@ -651,10 +651,8 @@
main()%0A
-%0A%0A
|
2100b512ffb188374e1d883cd2f359586182596b
|
ADD migration name
|
packages/grid/backend/alembic/versions/2021-09-20_916812f40fb4.py
|
packages/grid/backend/alembic/versions/2021-09-20_916812f40fb4.py
|
"""empty message
Revision ID: 916812f40fb4
Revises: 5796f6ceb314
Create Date: 2021-09-20 01:07:37.239186
"""
# third party
from alembic import op # type: ignore
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "916812f40fb4"
down_revision = "5796f6ceb314"
branch_labels = None
depends_on = None
def upgrade() -> None:
op.add_column("setup", sa.Column("daa_document", sa.String(255), default=""))
pass
def downgrade() -> None:
pass
|
Python
| 0.000003
|
@@ -1,19 +1,44 @@
%22%22%22
-empty messag
+ADD daa_document column at setup tabl
e%0A%0AR
|
80ede493f698395176d3c67dd1e4f3723b0d5859
|
Add initial pass at writing the git commit hook
|
mothermayi/hook.py
|
mothermayi/hook.py
|
import logging
import os
LOGGER = logging.getLogger(__name__)
class NoRepoFoundError(Exception):
pass
def find_git_repo():
location = os.path.abspath('.')
while location != '/':
check = os.path.join(location, '.git')
if os.path.exists(check) and os.path.isdir(check):
return check
location = os.path.dirname(location)
raise NoRepoFoundError("Could not find a git repository (.git) in {}".format(os.path.abspath('.')))
def install():
repo = find_git_repo()
LOGGER.debug("Found git repo at %s", repo)
|
Python
| 0
|
@@ -102,16 +102,60 @@
pass%0A%0A
+class PreCommitExists(Exception):%0A pass%0A%0A
def find
@@ -515,93 +515,515 @@
))%0A%0A
-def install():%0A repo = find_git_repo()%0A LOGGER.debug(%22Found git repo at %25s%22, repo
+HOOK_CONTENT = %22%22%22%0Amothermayi run%0A%22%22%22%0A%0Adef write_hook(pre_commit):%0A with open(pre_commit, 'w') as f:%0A f.write(HOOK_CONTENT)%0A%0Adef install():%0A repo = find_git_repo()%0A LOGGER.debug(%22Found git repo at %25s%22, repo)%0A hooks = os.path.join(repo, 'hooks')%0A pre_commit = os.path.join(hooks, 'pre-commit')%0A if os.path.exists(pre_commit):%0A raise PreCommitExists(%22A git hook already exists at %7B%7D. Refusing to overwrite. Please remove it manually%22.format(pre_commit))%0A write_hook(pre_commit
)%0A
|
5d5f73ac411873c0ec82e233b74ce70f4de4ab03
|
Optimize migration process
|
openprocurement/planning/api/migration.py
|
openprocurement/planning/api/migration.py
|
# -*- coding: utf-8 -*-
import logging
from openprocurement.planning.api.traversal import Root
from openprocurement.planning.api.models import Plan
LOGGER = logging.getLogger(__name__)
SCHEMA_VERSION = 1
SCHEMA_DOC = 'openprocurement_plans_schema'
def get_db_schema_version(db):
schema_doc = db.get(SCHEMA_DOC, {"_id": SCHEMA_DOC})
return schema_doc.get("version", SCHEMA_VERSION - 1)
def set_db_schema_version(db, version):
schema_doc = db.get(SCHEMA_DOC, {"_id": SCHEMA_DOC})
schema_doc["version"] = version
db.save(schema_doc)
def migrate_data(registry, destination=None):
if registry.settings.get('plugins') and 'planning' not in registry.settings['plugins'].split(','):
return
cur_version = get_db_schema_version(registry.db)
if cur_version == SCHEMA_VERSION:
return cur_version
for step in xrange(cur_version, destination or SCHEMA_VERSION):
LOGGER.info("Migrate openprocurement plans schema from {} to {}".format(step, step + 1), extra={'MESSAGE_ID': 'migrate_data'})
migration_func = globals().get('from{}to{}'.format(step, step + 1))
if migration_func:
migration_func(registry)
set_db_schema_version(registry.db, step + 1)
def from0to1(registry):
class Request(object):
def __init__(self, registry):
self.registry = registry
len(registry.db.view('plans/all', limit=1))
results = registry.db.iterview('plans/all', 2 ** 10, include_docs=True, stale='update_after')
docs = []
request = Request(registry)
root = Root(request)
for i in results:
doc = i.doc
if doc.get('documents'):
plan = Plan(doc)
plan.__parent__ = root
doc = plan.to_primitive()
docs.append(doc)
if len(docs) >= 2 ** 7:
registry.db.update(docs)
docs = []
if docs:
registry.db.update(docs)
|
Python
| 0.000004
|
@@ -1626,16 +1626,87 @@
if
+not all(%5Bi.get('url', '').startswith(registry.docservice_url) for i in
doc.get(
@@ -1716,16 +1716,22 @@
cuments'
+, %5B%5D)%5D
):%0A
@@ -1827,16 +1827,72 @@
itive()%0A
+ doc%5B'dateModified'%5D = get_now().isoformat()%0A
|
7d254f6b7cebf8883fa314afbf4aa3f2f8afaf14
|
Create hard links instead of copying
|
pw_docgen/py/pw_docgen/docgen.py
|
pw_docgen/py/pw_docgen/docgen.py
|
# Copyright 2019 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Renders HTML documentation using Sphinx."""
# TODO(frolv): Figure out a solution for installing all library dependencies
# to run Sphinx and build RTD docs.
import argparse
import collections
import json
import os
import shutil
import subprocess
import sys
from typing import Dict, List, Tuple
SCRIPT_HEADER: str = '''
██████╗ ██╗ ██████╗ ██╗ ██╗███████╗███████╗██████╗ ██████╗ ██████╗ ██████╗███████╗
██╔══██╗██║██╔════╝ ██║ ██║██╔════╝██╔════╝██╔══██╗ ██╔══██╗██╔═══██╗██╔════╝██╔════╝
██████╔╝██║██║ ███╗██║ █╗ ██║█████╗ █████╗ ██║ ██║ ██║ ██║██║ ██║██║ ███████╗
██╔═══╝ ██║██║ ██║██║███╗██║██╔══╝ ██╔══╝ ██║ ██║ ██║ ██║██║ ██║██║ ╚════██║
██║ ██║╚██████╔╝╚███╔███╔╝███████╗███████╗██████╔╝ ██████╔╝╚██████╔╝╚██████╗███████║
╚═╝ ╚═╝ ╚═════╝ ╚══╝╚══╝ ╚══════╝╚══════╝╚═════╝ ╚═════╝ ╚═════╝ ╚═════╝╚══════╝
'''
def parse_args() -> argparse.Namespace:
"""Parses command-line arguments."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--sphinx-build-dir',
required=True,
help='Directory in which to build docs')
parser.add_argument('--conf',
required=True,
help='Path to conf.py file for Sphinx')
parser.add_argument('--gn-root',
required=True,
help='Root of the GN build tree')
parser.add_argument('--gn-gen-root',
required=True,
help='Root of the GN gen tree')
parser.add_argument('sources',
nargs='+',
help='Paths to the root level rst source files')
parser.add_argument('--out-dir',
required=True,
help='Output directory for rendered HTML docs')
parser.add_argument('--metadata',
required=True,
type=argparse.FileType('r'),
help='Metadata JSON file')
return parser.parse_args()
def build_docs(src_dir: str, dst_dir: str) -> int:
"""Runs Sphinx to render HTML documentation from a doc tree."""
# TODO(frolv): Specify the Sphinx script from a prebuilts path instead of
# requiring it in the tree.
command = [
'sphinx-build', '-W', '-b', 'html', '-d', f'{dst_dir}/help', src_dir,
f'{dst_dir}/html'
]
return subprocess.call(command)
def mkdir(dirname: str, exist_ok: bool = False) -> None:
"""Wrapper around os.makedirs that prints the operation."""
print(f'MKDIR {dirname}')
os.makedirs(dirname, exist_ok=exist_ok)
def copy(src: str, dst: str) -> None:
"""Wrapper around shutil.copy that prints the operation."""
print(f'COPY {src} -> {dst}')
shutil.copy(src, dst)
def copy_doc_tree(args: argparse.Namespace) -> None:
"""Copies doc source and input files into a build tree."""
def build_path(path):
"""Converts a source path to a filename in the build directory."""
if path.startswith(args.gn_root):
path = os.path.relpath(path, args.gn_root)
elif path.startswith(args.gn_gen_root):
path = os.path.relpath(path, args.gn_gen_root)
return os.path.join(args.sphinx_build_dir, path)
source_files = json.load(args.metadata)
copy_paths = [build_path(f) for f in source_files]
mkdir(args.sphinx_build_dir)
for source_path in args.sources:
copy(source_path, f'{args.sphinx_build_dir}/')
copy(args.conf, f'{args.sphinx_build_dir}/conf.py')
# Map of directory path to list of source and destination file paths.
dirs: Dict[str, List[Tuple[str, str]]] = collections.defaultdict(list)
for source_file, copy_path in zip(source_files, copy_paths):
dirname = os.path.dirname(copy_path)
dirs[dirname].append((source_file, copy_path))
for directory, file_pairs in dirs.items():
mkdir(directory, exist_ok=True)
for src, dst in file_pairs:
copy(src, dst)
def main() -> int:
"""Script entry point."""
args = parse_args()
# Clear out any existing docs for the target.
if os.path.exists(args.sphinx_build_dir):
shutil.rmtree(args.sphinx_build_dir)
# TODO(pwbug/164): Printing the header causes unicode problems on Windows.
# Disabled for now; re-enable once the root issue is fixed.
# print(SCRIPT_HEADER)
copy_doc_tree(args)
# Flush all script output before running Sphinx.
print('-' * 80, flush=True)
return build_docs(args.sphinx_build_dir, args.out_dir)
if __name__ == '__main__':
sys.exit(main())
|
Python
| 0
|
@@ -840,16 +840,41 @@
rt sys%0A%0A
+from pathlib import Path%0A
from typ
@@ -3283,173 +3283,8 @@
)%0A%0A%0A
-def copy(src: str, dst: str) -%3E None:%0A %22%22%22Wrapper around shutil.copy that prints the operation.%22%22%22%0A print(f'COPY %7Bsrc%7D -%3E %7Bdst%7D')%0A shutil.copy(src, dst)%0A%0A%0A
def
@@ -3328,24 +3328,24 @@
e) -%3E None:%0A
+
%22%22%22Copie
@@ -3937,20 +3937,23 @@
-copy
+os.link
(source_
@@ -3957,16 +3957,32 @@
ce_path,
+%0A
f'%7Bargs
@@ -4004,19 +4004,46 @@
ir%7D/
-')%0A copy
+%7BPath(source_path).name%7D')%0A os.link
(arg
@@ -4538,20 +4538,23 @@
-copy
+os.link
(src, ds
|
47e2f60c8e10b6b2c87f2df40f362b70cb09fade
|
this should be a tuple
|
cyder/core/system/models.py
|
cyder/core/system/models.py
|
from django.db import models
from cyder.base.mixins import ObjectUrlMixin
from cyder.base.models import BaseModel
from cyder.cydhcp.keyvalue.models import KeyValue
class System(BaseModel, ObjectUrlMixin):
name = models.CharField(max_length=255, unique=False)
search_fields = ('name')
display_fields = ('name', 'pk')
def __str__(self):
return "{0} : {1}".format(*(str(getattr(self, f))
for f in self.display_fields))
class Meta:
db_table = 'system'
def details(self):
"""For tables."""
data = super(System, self).details()
data['data'] = [
('Name', 'name', self),
]
return data
def eg_metadata(self):
"""EditableGrid metadata."""
return {'metadata': [
{'name': 'name', 'datatype': 'string', 'editable': True},
]}
class SystemKeyValue(KeyValue):
system = models.ForeignKey(System, null=False)
class Meta:
db_table = 'system_key_value'
unique_together = ('key', 'value', 'system')
|
Python
| 1
|
@@ -287,16 +287,17 @@
('name'
+,
)%0A di
|
310f1d32bf4edcd3046d6648d5133c8ef7a4a8d6
|
Fix issue with system ctnr change not propagating correctly to its interfaces
|
cyder/core/system/models.py
|
cyder/core/system/models.py
|
from django.db import models
from django.db.models import Q
from django.db.models.loading import get_model
from cyder.base.eav.constants import ATTRIBUTE_INVENTORY
from cyder.base.eav.fields import EAVAttributeField
from cyder.base.eav.models import Attribute, EAVBase
from cyder.base.mixins import ObjectUrlMixin
from cyder.base.models import BaseModel
from cyder.base.utils import transaction_atomic
from cyder.core.system.validators import validate_no_spaces
class System(BaseModel, ObjectUrlMixin):
name = models.CharField(
max_length=255, unique=False, null=False, blank=False,
validators=[validate_no_spaces])
ctnr = models.ForeignKey("cyder.Ctnr", null=False,
verbose_name="Container")
search_fields = ('name',)
sort_fields = ('name',)
def __unicode__(self):
return self.name
class Meta:
app_label = 'cyder'
db_table = 'system'
@staticmethod
def filter_by_ctnr(ctnr, objects=None):
objects = objects if objects is not None else System.objects
return objects.filter(ctnr=ctnr)
def check_in_ctnr(self, ctnr):
return self.ctnr == ctnr
def details(self):
"""For tables."""
data = super(System, self).details()
data['data'] = [
('Name', 'name', self),
('Ctnr', 'ctnr', self.ctnr),
]
return data
@transaction_atomic
def delete(self, *args, **kwargs):
DynamicInterface = get_model('cyder', 'dynamicinterface')
for interface in DynamicInterface.objects.filter(system=self):
interface.delete(delete_system=False, commit=False)
StaticInterface = get_model('cyder', 'staticinterface')
for interface in StaticInterface.objects.filter(system=self):
interface.delete(delete_system=False, commit=False)
super(System, self).delete(*args, **kwargs)
@staticmethod
def eg_metadata():
"""EditableGrid metadata."""
return {'metadata': [
{'name': 'name', 'datatype': 'string', 'editable': True},
]}
@transaction_atomic
def save(self, *args, **kwargs):
self.full_clean()
super(System, self).save(*args, **kwargs)
def clean(self, *args, **kwargs):
for i in (list(self.staticinterface_set.all()) +
list(self.dynamicinterface_set.all())):
if self.ctnr != i.ctnr:
i.ctnr = self.ctnr
i.save()
super(System, self).clean(*args, **kwargs)
class SystemAV(EAVBase):
class Meta(EAVBase.Meta):
app_label = 'cyder'
db_table = 'system_av'
entity = models.ForeignKey(System)
attribute = EAVAttributeField(Attribute,
type_choices=(ATTRIBUTE_INVENTORY,))
|
Python
| 0
|
@@ -2242,47 +2242,8 @@
rgs)
-%0A%0A def clean(self, *args, **kwargs):
%0A
@@ -2454,59 +2454,8 @@
e()%0A
- super(System, self).clean(*args, **kwargs)%0A
%0A%0Acl
|
e09e7148a3b94fe883839257f8b972351466eb93
|
fix docstring
|
py/_plugin/pytest_monkeypatch.py
|
py/_plugin/pytest_monkeypatch.py
|
"""
safely patch object attributes, dicts and environment variables.
Usage
----------------
Use the `monkeypatch funcarg`_ to tweak your global test environment
for running a particular test. You can safely set/del an attribute,
dictionary item or environment variable by respective methods
on the monkeypatch funcarg. If you want e.g. to set an ENV1 variable
and have os.path.expanduser return a particular directory, you can
write it down like this:
.. sourcecode:: python
def test_mytest(monkeypatch):
monkeypatch.setenv('ENV1', 'myval')
monkeypatch.setattr(os.path, 'expanduser', lambda x: '/tmp/xyz')
... # your test code that uses those patched values implicitely
After the test function finished all modifications will be undone,
because the ``monkeypatch.undo()`` method is registered as a finalizer.
``monkeypatch.setattr/delattr/delitem/delenv()`` all
by default raise an Exception if the target does not exist.
Pass ``raising=False`` if you want to skip this check.
prepending to PATH or other environment variables
---------------------------------------------------------
To prepend a value to an already existing environment parameter:
.. sourcecode:: python
def test_mypath_finding(monkeypatch):
monkeypatch.setenv('PATH', 'x/y', prepend=":")
# in bash language: export PATH=x/y:$PATH
calling "undo" finalization explicitely
-----------------------------------------
At the end of function execution py.test invokes
a teardown hook which undoes all monkeypatch changes.
If you do not want to wait that long you can call
finalization explicitely::
monkeypatch.undo()
This will undo previous changes. This call consumes the
undo stack. Calling it a second time has no effect unless
you start monkeypatching after the undo call.
.. _`monkeypatch blog post`: http://tetamap.wordpress.com/2009/03/03/monkeypatching-in-unit-tests-done-right/
"""
import py, os, sys
def pytest_funcarg__monkeypatch(request):
"""The returned ``monkeypatch`` funcarg provides these
helper methods to modify objects, dictionaries or os.environ::
monkeypatch.setattr(obj, name, value, raising=True)
monkeypatch.delattr(obj, name, raising=True)
monkeypatch.setitem(mapping, name, value)
monkeypatch.delitem(obj, name, raising=True)
monkeypatch.setenv(name, value, prepend=False)
monkeypatch.delenv(name, value, raising=True)
monkeypatch.syspath_prepend(path)
All modifications will be undone when the requesting
test function finished its execution. For the ``del``
methods the ``raising`` parameter determines if a
KeyError or AttributeError will be raised if the
deletion has no target.
"""
monkeypatch = MonkeyPatch()
request.addfinalizer(monkeypatch.undo)
return monkeypatch
notset = object()
class MonkeyPatch:
def __init__(self):
self._setattr = []
self._setitem = []
def setattr(self, obj, name, value, raising=True):
oldval = getattr(obj, name, notset)
if raising and oldval is notset:
raise AttributeError("%r has no attribute %r" %(obj, name))
self._setattr.insert(0, (obj, name, oldval))
setattr(obj, name, value)
def delattr(self, obj, name, raising=True):
if not hasattr(obj, name):
if raising:
raise AttributeError(name)
else:
self._setattr.insert(0, (obj, name, getattr(obj, name, notset)))
delattr(obj, name)
def setitem(self, dic, name, value):
self._setitem.insert(0, (dic, name, dic.get(name, notset)))
dic[name] = value
def delitem(self, dic, name, raising=True):
if name not in dic:
if raising:
raise KeyError(name)
else:
self._setitem.insert(0, (dic, name, dic.get(name, notset)))
del dic[name]
def setenv(self, name, value, prepend=None):
value = str(value)
if prepend and name in os.environ:
value = value + prepend + os.environ[name]
self.setitem(os.environ, name, value)
def delenv(self, name, raising=True):
self.delitem(os.environ, name, raising=raising)
def syspath_prepend(self, path):
if not hasattr(self, '_savesyspath'):
self._savesyspath = sys.path[:]
sys.path.insert(0, str(path))
def undo(self):
for obj, name, value in self._setattr:
if value is not notset:
setattr(obj, name, value)
else:
delattr(obj, name)
self._setattr[:] = []
for dictionary, name, value in self._setitem:
if value is notset:
del dictionary[name]
else:
dictionary[name] = value
self._setitem[:] = []
if hasattr(self, '_savesyspath'):
sys.path[:] = self._savesyspath
|
Python
| 0.000002
|
@@ -2608,38 +2608,9 @@
n.
-For the %60%60del%60%60 %0A methods t
+T
he %60
@@ -2620,16 +2620,21 @@
ising%60%60
+%0A
paramete
@@ -2650,20 +2650,16 @@
nes if a
-%0A
KeyErro
@@ -2678,16 +2678,21 @@
teError
+%0A
will be
@@ -2708,17 +2708,27 @@
the
-%0A dele
+ set/deletion opera
tion
|
d0a9d10d0df25de670e8bf9a1e603ed1fbe5ca29
|
use helpers
|
py3status/modules/taskwarrior.py
|
py3status/modules/taskwarrior.py
|
# -*- coding: utf-8 -*-
"""
Display tasks currently running in taskwarrior.
Configuration parameters:
cache_timeout: how often we refresh this module in seconds (default 5)
format: display format for taskwarrior (default '{task}')
Format placeholders:
{task} active tasks
Requires
task: https://taskwarrior.org/download/
@author James Smith http://jazmit.github.io/
@license BSD
"""
# import your useful libs here
from subprocess import check_output
import json
import shlex
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 5
format = '{task}'
def taskWarrior(self):
command = 'task start.before:tomorrow status:pending export'
taskwarrior_output = check_output(shlex.split(command))
tasks_json = json.loads(taskwarrior_output.decode('utf-8'))
def describeTask(taskObj):
return str(taskObj['id']) + ' ' + taskObj['description']
result = ', '.join(map(describeTask, tasks_json))
return {
'cached_until': self.py3.time_in(self.cache_timeout),
'full_text': self.py3.safe_format(self.format, {'task': result})
}
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
|
Python
| 0.001805
|
@@ -119,28 +119,28 @@
ut:
-how often we refresh
+refresh interval for
thi
@@ -152,19 +152,8 @@
ule
-in seconds
(def
@@ -192,26 +192,26 @@
at for t
-askwarrior
+his module
(defaul
@@ -390,99 +390,19 @@
%22%22%0A%0A
-# import your useful libs here%0Afrom subprocess import check_output%0Aimport json%0Aimport shlex
+import json
%0A%0A%0Ac
@@ -558,315 +558,262 @@
-command = 'task start.before:tomorrow status:pending export'%0A taskwarrior_output = check_output(shlex.split(command))%0A tasks_json = json.loads(taskwarrior_output.decode('utf-8'))%0A%0A def describeTask(taskObj):%0A return str(taskObj%5B'id'%5D) + ' ' + taskObj%5B'description'%5D%0A%0A
+def describeTask(taskObj):%0A return str(taskObj%5B'id'%5D) + ' ' + taskObj%5B'description'%5D%0A%0A task_command = 'task start.before:tomorrow status:pending export'%0A task_json = json.loads(self.py3.command_output(task_command))%0A task_
resu
@@ -849,17 +849,16 @@
sk, task
-s
_json))%0A
@@ -1008,16 +1008,21 @@
'task':
+task_
result%7D)
|
49acdec58b21ce23b5e20200249bbae7aaf4b49f
|
upgrade logging
|
src/main/python/pybuilder_aws_plugin/upload_zip_task.py
|
src/main/python/pybuilder_aws_plugin/upload_zip_task.py
|
import os
import subprocess
import zipfile
from pybuilder.core import depends, task
from pybuilder.plugins.python.install_dependencies_plugin import (
as_pip_argument)
from .helpers import upload_helper, teamcity_helper
def zip_recursive(archive, directory, folder=''):
"""Zip directories recursively"""
for item in os.listdir(directory):
if os.path.isfile(os.path.join(directory, item)):
archive.write(
os.path.join(directory, item), os.path.join(folder, item),
zipfile.ZIP_DEFLATED)
elif os.path.isdir(os.path.join(directory, item)):
zip_recursive(
archive, os.path.join(directory, item),
folder=os.path.join(folder, item))
def prepare_dependencies_dir(project, target_directory, excludes=None):
"""Get all dependencies from project and install them to given dir"""
excludes = excludes or []
dependencies = map(lambda dep: as_pip_argument(dep), project.dependencies)
index_url = project.get_property('install_dependencies_index_url')
if index_url:
index_url = "--index-url {0}".format(index_url)
pip_cmd = 'pip install --target {0} {1} {2}'
for dependency in dependencies:
if dependency in excludes:
continue
cmd = pip_cmd.format(target_directory, index_url, dependency).split()
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
stdout, _ = process.communicate()
def get_path_to_zipfile(project):
return os.path.join(
project.expand_path('$dir_target'), '{0}.zip'.format(project.name))
def write_version(project, archive):
"""Get the current version and write it to a version file"""
filename = os.path.join(project.expand_path('$dir_target'), 'VERSION')
with open(filename, 'w') as version_file:
version_file.write(project.version)
archive.write(filename, 'VERSION')
@task('package_lambda_code',
description='Package the modules, dependencies and scripts into a '
'lambda-zip')
@depends('package')
def package_lambda_code(project, logger):
dir_target = project.expand_path('$dir_target')
lambda_dependencies_dir = os.path.join(dir_target, 'lambda_dependencies')
excludes = ['boto', 'boto3']
logger.info('Going to prepare dependencies.')
prepare_dependencies_dir(
project, lambda_dependencies_dir, excludes=excludes)
logger.info('Going to assemble the lambda-zip.')
path_to_zipfile = get_path_to_zipfile(project)
archive = zipfile.ZipFile(path_to_zipfile, 'w')
if os.path.isdir(lambda_dependencies_dir):
zip_recursive(archive, lambda_dependencies_dir)
sources = project.expand_path('$dir_source_main_python')
zip_recursive(archive, sources)
scripts = project.expand_path('$dir_source_main_scripts')
zip_recursive(archive, scripts)
write_version(project, archive)
archive.close()
logger.info('Lambda zip is available at: "{0}".'.format(path_to_zipfile))
@task('upload_zip_to_s3', description='Upload a packaged lambda-zip to S3')
@depends('package_lambda_code')
def upload_zip_to_s3(project, logger):
path_to_zipfile = get_path_to_zipfile(project)
with open(path_to_zipfile, 'rb') as fp:
data = fp.read()
bucket_prefix = project.get_property('bucket_prefix')
bucket_name = project.get_mandatory_property('bucket_name')
keyname_version = '{0}v{1}/{2}.zip'.format(
bucket_prefix, project.version, project.name)
keyname_latest = '{0}latest/{1}.zip'.format(bucket_prefix, project.name)
acl = project.get_property('lambda_file_access_control')
upload_helper(logger, bucket_name, keyname_version, data, acl)
upload_helper(logger, bucket_name, keyname_latest, data, acl)
tc_param = project.get_property('teamcity_parameter')
if project.get_property("teamcity_output") and tc_param:
teamcity_helper(tc_param, keyname_version)
|
Python
| 0.000001
|
@@ -2936,17 +2936,17 @@
('Lambda
-
+-
zip is a
@@ -3187,24 +3187,95 @@
le(project)%0A
+ logger.info('Found lambda-zip at: %22%7B0%7D%22.'.format(path_to_zipfile))%0A
with ope
|
c9b7e886f9276079fc79fbe394f5b15595f04603
|
Test fixes
|
ownblock/ownblock/apps/messaging/tests.py
|
ownblock/ownblock/apps/messaging/tests.py
|
from unittest.mock import Mock
from django.test import TestCase
from rest_framework import serializers
from apps.accounts.tests import ResidentFactory
from apps.buildings.tests import ApartmentFactory
from .serializers import MessageSerializer
class SerializerTests(TestCase):
def test_validate_recipient_if_same_as_sender(self):
apt = ApartmentFactory.create()
req = Mock()
req.user = ResidentFactory.create(apartment=apt)
serializer = MessageSerializer(context={'request': req})
attrs = {'recipient': req.user}
self.assertRaises(serializers.ValidationError,
serializer.validate_recipient, attrs, 'recipient')
def test_validate_recipient_if_does_not_exist(self):
pass
def test_validate_recipient_if_ok(self):
pass
|
Python
| 0.000001
|
@@ -755,20 +755,408 @@
-pass
+apt = ApartmentFactory.create()%0A req = Mock()%0A req.user = ResidentFactory.create(apartment=apt)%0A recipient = ResidentFactory.create()%0A serializer = MessageSerializer(context=%7B'request': req%7D)%0A attrs = %7B'recipient': recipient%7D%0A self.assertRaises(serializers.ValidationError,%0A serializer.validate_recipient, attrs, 'recipient')
%0A%0A de
|
5c7dcc4fbe5a80f1f53dd48ab19b557976ef64d8
|
Fix help string on override file
|
paasta_tools/monitoring/check_capacity.py
|
paasta_tools/monitoring/check_capacity.py
|
#!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
import argparse
import json
import sys
from collections import defaultdict
from paasta_tools.api.client import get_paasta_api_client
from paasta_tools.utils import load_system_paasta_config
from paasta_tools.utils import paasta_print
def parse_capacity_check_options():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
'type', choices=['cpus', 'mem', 'disk'], type=str,
help='The resource to check.',
)
parser.add_argument(
'--warn', '-w', dest='warn', type=float, default=90,
help='Level to emit a warning status.',
)
parser.add_argument(
'--crit', '-c', dest='crit', type=float, default=90,
help='Level to emit a critical status.',
)
parser.add_argument(
'--overrides', dest='overrides', type=str,
help='json file of per-attribute overrides.\n'
'In the format {attribute_name: {value: {cpus: num, disk: num, mem: num}}}',
)
parser.add_argument(
'--cluster', dest='cluster', type=str,
help='Cluster to check. Defaults to looking for the current cluster.',
)
parser.add_argument(
'--attributes', dest='attributes', type=str, default='pool',
help='Comma separated list of attributes to check.\n'
'By default, only checks attributes individually; see --cross-product-check',
)
parser.add_argument(
'--cross-product-check', dest='cpc', action='store_true',
help='When specified, --cross-product-check will check combinations of '
'attributes instead of individual attributes. \n'
'eg if attributes is \'pool,region\', this will cause the check to fail '
'if any region-pool combination is below the threashold, or either\'s override',
)
options = parser.parse_args()
return options
def calc_percent_usage(resource_item, value_to_check):
values = resource_item[value_to_check]
if values['total'] == 0:
return 0
return 100 * (values['used'] / values['total'])
def error_message(failures, level, cluster, value_to_check):
result = "%s cluster %s %s usage:\n" % (level, cluster, value_to_check)
results = []
for f in failures:
attrs = ", ".join(["%s: %s" % (e['attr'], e['value']) for e in f['attrs']])
results.append(
" %s is at %s percent %s, maximum %s percent" % (
attrs, f['current'], value_to_check,
f['maximum'],
),
)
result += "\n".join(results)
return result
def run_capacity_check():
options = parse_capacity_check_options()
system_paasta_config = load_system_paasta_config()
cluster = options.cluster if options.cluster is not None else system_paasta_config.get_cluster()
value_to_check = options.type
client = get_paasta_api_client(cluster=options.cluster)
if client is None:
paasta_print('UNKNOWN Failed to load paasta api client')
sys.exit(3)
if options.overrides:
with open(options.overrides, 'r') as f:
overrides = json.loads(f.read())
else:
overrides = {}
attributes = options.attributes.split(',')
if options.cpc:
resource_use = {'all': client.resources.resources(groupings=attributes).result()}
else:
resource_use = {a: client.resources.resources(groupings=[a]).result() for a in attributes}
default_check = {
'warn': {
'cpus': options.warn,
'mem': options.warn,
'disk': options.warn,
},
'crit': {
'cpus': options.crit,
'mem': options.crit,
'disk': options.crit,
},
}
failures = defaultdict(list)
for attribute, values in resource_use.items():
for usage_value in values:
attribute_value = usage_value['groupings'].get(attribute, 'unknown')
check = overrides.get('attribute', {}).get(attribute_value, default_check)
usage_percent = calc_percent_usage(usage_value, value_to_check)
for c in ['crit', 'warn']:
if usage_percent > check[c][value_to_check]:
failures[c].append({
'attrs': [{'attr': a, 'value': v} for a, v in usage_value['groupings'].items()],
'maximum': check[c][value_to_check], 'current': usage_percent,
})
break
return_value = [0]
if len(failures['crit']) > 0:
result = error_message(failures['crit'], 'CRITICAL', cluster, value_to_check)
paasta_print(result)
return_value.append(2)
if len(failures['warn']) > 0:
result = error_message(failures['warn'], 'WARNING', cluster, value_to_check)
paasta_print(result)
return_value.append(1)
if max(return_value) == 0:
paasta_print("OK cluster %s is below critical capacity in %s" % (cluster, value_to_check))
sys.exit(max(return_value))
if __name__ == "__main__":
run_capacity_check()
|
Python
| 0.000002
|
@@ -1258,33 +1258,33 @@
=float, default=
-9
+8
0,%0A help=
@@ -1643,16 +1643,23 @@
%7Bvalue:
+ %7Bwarn:
%7Bcpus:
@@ -1683,16 +1683,68 @@
em: num%7D
+, '%0A 'crit: %7Bcpus: num, disk: num, mem: num%7D%7D
%7D%7D',%0A
|
5e9eda407832d9b97e7f78219f20236e04306a32
|
fix test, probably broken by a epydoc change this code is dead though so i don't much care
|
pydoctor/test/test_formatting.py
|
pydoctor/test/test_formatting.py
|
from pydoctor import html, model
from py import test
def test_signatures():
argspec = [['a', 'b', 'c'], None, None, (1,2)]
assert html.getBetterThanArgspec(argspec) == (['a'], [('b', 1), ('c', 2)])
def test_strsig():
argspec = [['a', 'b', 'c'], None, None, (1,2)]
assert html.signature(argspec) == "a, b=1, c=2"
def test_strsigvar():
argspec = [['a', 'b', 'c'], 'args', 'kk', (1,2)]
assert html.signature(argspec) == "a, *args, b=1, c=2, **kk"
def test_strsigargh():
argspec = [['a', ['b','c']], None, None, ()]
assert html.signature(argspec) == 'a, (b, c)'
def test_link():
doc0 = model.Documentable(None, 'twisted', None)
docco = model.Documentable(None, 'threadz', None, doc0)
assert html.link(docco) == 'twisted.threadz.html'
def test_summaryDoc():
docco = model.Documentable(None, 'threadz', 'Woot\nYeah')
assert html.summaryDoc(docco) == html.doc2html(docco, 'Woot')
def test_boringDocstring():
assert html.boringDocstring('Woot\nYeah') == '<pre>Woot\nYeah</pre>'
def test_reallyBoringDocstring():
undocced = '<pre class="undocumented">Undocumented</pre>'
assert html.boringDocstring('') == undocced
assert html.boringDocstring(None) == undocced
def test_doc2htmlEpy():
if not html.EPYTEXT:
test.skip("Epytext not available")
assert html.doc2html(None, 'Woot\nYeah') == '<div>Woot Yeah\n</div>'
class TestEpyHackers:
def setup_method(self, meth):
self.orig = html.EPYTEXT
def teardown_method(self, meth):
html.EPYTEXT = self.orig
def test_doc2htmlBoring(self):
if html.EPYTEXT:
html.EPYTEXT = False
assert html.doc2html(object(), 'Woot\nYeah') == '<pre>Woot\nYeah</pre>'
def test_generateModuleIndex(self):
#This test is a bit un-unity
# And *damnit* how do I write teardowners
html.EPYTEXT = False
sysw = html.SystemWriter(None)
pack = model.Package(None, 'twisted', None)
mod = model.Module(None, 'threadz', 'Woot\nYeah', pack)
fun = model.Function(None, 'blat', 'HICKY HECK\nYEAH', mod)
fun.argspec = [(), None, None, ()]
out = sysw.getHTMLFor(fun)
assert 'blat()' in out
assert 'HICKY HECK\nYEAH' in out
|
Python
| 0
|
@@ -1368,16 +1368,19 @@
= '%3Cdiv%3E
+%3Cp%3E
Woot Yea
@@ -1380,16 +1380,20 @@
oot Yeah
+%3C/p%3E
%5Cn%3C/div%3E
|
d9189f91370abd1e20e5010bb70d9c47efd58215
|
Change read_chrom_sizes to read from a FAIDX index if available
|
muver/reference.py
|
muver/reference.py
|
from wrappers import bowtie2, picard, samtools
def create_reference_indices(ref_fn):
'''
For a given reference FASTA file, generate several indices.
'''
bowtie2.build(ref_fn)
samtools.faidx_index(ref_fn)
picard.create_sequence_dictionary(ref_fn)
def read_chrom_sizes(reference_assembly_fn):
'''
Iterate through a FASTA file to find the length of each chromosome.
'''
chrom_sizes = dict()
last_chromosome = None
with open(reference_assembly_fn) as f:
for line in f:
if line.startswith('>'):
last_chromosome = line.split('>')[1].strip()
chrom_sizes[last_chromosome] = 0
else:
chrom_sizes[last_chromosome] += len(line.strip())
return chrom_sizes
def read_chrom_sizes_from_file(chrom_sizes_fn):
'''
Read chromosome sizes from a UCSC chrom_sizes file.
'''
chrom_sizes = dict()
with open(chrom_sizes_fn) as f:
for line in f:
chromosome, size = line.strip().split()
chrom_sizes[chromosome] = int(size)
return chrom_sizes
|
Python
| 0
|
@@ -1,12 +1,25 @@
+import os%0D%0A%0D%0A
from wrapper
@@ -422,45 +422,394 @@
ome.
-%0D%0A '''%0D%0A chrom_sizes = dict()%0D%0A
+ If a%0D%0A FAIDX index is available, it will read the lengths from there.%0D%0A '''%0D%0A chrom_sizes = dict()%0D%0A%0D%0A if os.path.exists(reference_assembly_fn + '.fai'):%0D%0A with open(reference_assembly_fn + '.fai') as f:%0D%0A for line in f:%0D%0A chromosome, size = line.strip().split('%5Ct')%5B:2%5D%0D%0A chrom_sizes%5Bchromosome%5D = int(size)%0D%0A else:%0D%0A
@@ -832,22 +832,24 @@
= None%0D%0A
-%0D%0A
+
+
with ope
@@ -876,26 +876,28 @@
_fn) as f:%0D%0A
-%0D%0A
+
for
@@ -908,18 +908,18 @@
in f:%0D%0A
-%0D%0A
+
@@ -922,16 +922,18 @@
+
if line.
@@ -950,18 +950,20 @@
('%3E'):%0D%0A
-%0D%0A
+
@@ -1012,24 +1012,28 @@
1%5D.strip()%0D%0A
+
@@ -1070,18 +1070,20 @@
e%5D = 0%0D%0A
-%0D%0A
+
@@ -1093,18 +1093,20 @@
else:%0D%0A
-%0D%0A
+
|
8e1610570a50282594a5516ee473cf13bec2ce71
|
fix typo
|
core/drivers/count/count.py
|
core/drivers/count/count.py
|
keywords = ['SET', 'INSERT', 'UPDATE', 'DELETE']
def count_query(queries):
ret = {}
for keyword in keywords:
ret[keyword] = 0
for query in queries:
for keyword in keywords:
if query.startswith(keyword):
ret[keyword] += 1
break
return ret
|
Python
| 0.999991
|
@@ -8,16 +8,19 @@
s = %5B'SE
+LEC
T', 'INS
|
4b6911cbe2fefceda4e24f5c3092cfbb15a40982
|
Remove unused
|
jet/dashboard/views.py
|
jet/dashboard/views.py
|
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.forms.formsets import formset_factory
from django.shortcuts import redirect
from django.views.decorators.http import require_POST, require_GET
from jet.dashboard.forms import UpdateDashboardModulesForm, AddUserDashboardModuleForm, \
UpdateDashboardModuleCollapseForm, RemoveDashboardModuleForm, ResetDashboardForm
from jet.dashboard.models import UserDashboardModule
from jet.utils import JsonResponse, get_app_list, SuccessMessageMixin
from django.views.generic import UpdateView
from django.utils.translation import ugettext_lazy as _
class UpdateDashboardModuleView(SuccessMessageMixin, UpdateView):
model = UserDashboardModule
fields = ('title',)
template_name = 'jet.dashboard/update_module.html'
success_message = _('Widget was successfully updated')
object = None
module = None
def get_success_url(self):
if self.object.app_label:
return reverse('admin:app_list', kwargs={'app_label': self.object.app_label})
else:
return reverse('admin:index')
def get_module(self):
object = self.object if getattr(self, 'object', None) is not None else self.get_object()
return object.load_module()
def get_settings_form_kwargs(self):
kwargs = {
'initial': self.module.settings
}
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
'files': self.request.FILES,
})
return kwargs
def get_settings_form(self):
if self.module.settings_form:
form = self.module.settings_form(**self.get_settings_form_kwargs())
if hasattr(form, 'set_module'):
form.set_module(self.module)
return form
def get_children_formset_kwargs(self):
kwargs = {
'initial': self.module.children,
'prefix': 'children',
}
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
'files': self.request.FILES,
})
return kwargs
def get_children_formset(self):
if self.module.child_form:
return formset_factory(self.module.child_form, can_delete=True, extra=1)(**self.get_children_formset_kwargs())
def clean_children_data(self, children):
children = list(filter(
lambda item: isinstance(item, dict) and item and item.get('DELETE') is not True,
children
))
for item in children:
item.pop('DELETE')
return children
def get_current_app(self):
app_list = get_app_list({'request': self.request})
for app in app_list:
if app.get('app_label', app.get('name')) == self.object.app_label:
return app
def get_context_data(self, **kwargs):
data = super(UpdateDashboardModuleView, self).get_context_data(**kwargs)
data['title'] = _('Change')
data['module'] = self.module
data['settings_form'] = self.get_settings_form()
data['children_formset'] = self.get_children_formset()
data['child_name'] = self.module.child_name if self.module.child_name else _('Items')
data['child_name_plural'] = self.module.child_name_plural if self.module.child_name_plural else _('Items')
data['app'] = self.get_current_app()
return data
def dispatch(self, request, *args, **kwargs):
self.object = self.get_object()
self.module = self.get_module()(model=self.object)
return super(UpdateDashboardModuleView, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
settings_form = self.get_settings_form()
children_formset = self.get_children_formset()
data = request.POST.copy()
if settings_form:
if settings_form.is_valid():
settings = settings_form.cleaned_data
data['settings'] = self.module.dump_settings(settings)
else:
return self.form_invalid(self.get_form(self.get_form_class()))
if children_formset:
if children_formset.is_valid():
self.module.children = self.clean_children_data(children_formset.cleaned_data)
data['children'] = self.module.dump_children()
else:
return self.form_invalid(self.get_form(self.get_form_class()))
request.POST = data
return super(UpdateDashboardModuleView, self).post(request, *args, **kwargs)
def form_valid(self, form):
if 'settings' in form.data:
form.instance.settings = form.data['settings']
if 'children' in form.data:
form.instance.children = form.data['children']
return super(UpdateDashboardModuleView, self).form_valid(form)
@require_POST
def update_dashboard_modules_view(request):
result = {'error': False}
form = UpdateDashboardModulesForm(request, request.POST)
if form.is_valid():
form.save()
else:
result['error'] = True
return JsonResponse(result)
@require_POST
def add_user_dashboard_module_view(request):
result = {'error': False}
form = AddUserDashboardModuleForm(request, request.POST)
if form.is_valid():
module = form.save()
result['id'] = module.pk
messages.success(request, _('Widget has been successfully added'))
if module.app_label:
result['success_url'] = reverse('admin:app_list', kwargs={'app_label': module.app_label})
else:
result['success_url'] = reverse('admin:index')
else:
result['error'] = True
return JsonResponse(result)
@require_POST
def update_dashboard_module_collapse_view(request):
result = {'error': False}
try:
instance = UserDashboardModule.objects.get(pk=request.POST.get('id'))
form = UpdateDashboardModuleCollapseForm(request, request.POST, instance=instance)
if form.is_valid():
module = form.save()
result['collapsed'] = module.collapsed
else:
result['error'] = True
except UserDashboardModule.DoesNotExist:
result['error'] = True
return JsonResponse(result)
@require_POST
def remove_dashboard_module_view(request):
result = {'error': False}
try:
instance = UserDashboardModule.objects.get(pk=request.POST.get('id'))
form = RemoveDashboardModuleForm(request, request.POST, instance=instance)
if form.is_valid():
form.save()
else:
result['error'] = True
except UserDashboardModule.DoesNotExist:
result['error'] = True
return JsonResponse(result)
@require_GET
def load_dashboard_module_view(request, pk):
result = {'error': False}
try:
instance = UserDashboardModule.objects.get(pk=pk)
module_cls = instance.load_module()
module = module_cls(model=instance, context={'request': request})
result['html'] = module.render()
except UserDashboardModule.DoesNotExist:
result['error'] = True
return JsonResponse(result)
@require_POST
def reset_dashboard_view(request):
result = {'error': False}
form = ResetDashboardForm(request, request.POST)
if form.is_valid():
form.save()
else:
result['error'] = True
return JsonResponse(result)
|
Python
| 0.000349
|
@@ -128,46 +128,8 @@
ory%0A
-from django.shortcuts import redirect%0A
from
|
49ab63133924b4789751d3e2806ff1516c0ee167
|
Move commented import
|
cxroots/IterativeMethods.py
|
cxroots/IterativeMethods.py
|
from __future__ import division
# import scipy.optimize
def iterateToRoot(x0, f, df=None, steptol=1e-8, roottol=1e-12, maxIter=20):
# iterate to a root using initial point x0
if df is not None:
try:
# uses Newton-Raphson method if f and df are given.
# SciPy implementation
# root = scipy.optimize.newton(f, x0, df, tol=steptol, maxiter=maxIter)
# err = abs(f(root))
root, err = newton(x0, f, df, steptol, roottol, maxIter)
except (RuntimeError, OverflowError):
return None
else:
# Secant method:
x1, x2 = x0, x0*(1 + 1e-4) + 1e-4
root, err = secant(x1, x2, f, steptol, roottol, maxIter)
# XXX: Secant method is very slow to converge. Use Muller's method instead?
# Muller's method: uses 3 initial points
# import mpmath # XXX: mpmath insists on functions accepting mpc which is inconvenient
# x1, x2, x3 = x0, x0*(1 + 1e-4) + 1e-4, x0*(1 + 2e-4) + 2e-4
# root = mpmath.findroot(f, (x1, x2, x3), solver='muller', tol=roottol, verbose=False, verify=False)
# err = abs(root)
if err < roottol:
return root
def newton(x0, f, df, steptol=1e-8, roottol=1e-12, maxIter=20, callback=None):
"""
Find an approximation to a point xf such that f(xf)=0 for a
scalar function f using Newton-Raphson iteration starting at
the point x0.
Parameters
----------
x0 : float or complex
Initial point for Newton iteration, should be as close as
possible to a root of f
f : function
Function of a single variable f(x)
df : function
Function of a single variable, df(x), providing the
derivative of the function f(x) at the point x
steptol: float, optional
Routine will end if the step size, dx, between sucessive
iterations of x satisfies abs(dx) < steptol
roottol: float, optional
The routine will end if abs(f(x)) < roottol
maxIter : int, optional
Routine ends after maxIter iterations
callback : function, optional
After each iteration the supplied function
callback(x, dx, f(x), iteration) will be called where 'x' is the current iteration
of the estimated root, 'dx' is the step size between the previous
and current 'x' and 'iteration' the number of iterations that have been taken.
If the callback function evaluates to True then the routine will end
Returns
-------
xf : float
The approximation to a root of f
rooterr : float
The error of the original function at xf, abs(f(xf))
"""
# XXX: Could use deflated polynomials to ensure that known roots are not found again?
x, y = x0, f(x0)
for iteration in range(maxIter):
dx = -y/df(x)
x += dx
y = f(x)
if callback is not None and callback(x, dx, y, iteration+1):
break
if abs(dx) < steptol or abs(y) < roottol:
break
return x, abs(y)
def secant(x1, x2, f, steptol=1e-10, roottol=1e-14, maxIter=30, callback=None):
"""
Find an approximation to a point xf such that f(xf)=0 for a
scalar function f using the secant method. The method requires
two initial points x1 and x2, ideally close to a root
and proceeds iteratively.
Parameters
----------
x1 : float or complex
An initial point for iteration, should be close to a
root of f.
x2 : float or complex
An initial point for iteration, should be close to a
root of f. Should not equal x1.
f : function
Function of a single variable f(x)
steptol: float, optional
Routine will end if the step size, dx, between sucessive
iterations of x satisfies abs(dx) < steptol
roottol: float, optional
The routine will end if abs(f(x)) < roottol
maxIter : int, optional
Routine ends after maxIter iterations
callback : function, optional
After each iteration the supplied function
callback(x, dx, f(x), iteration) will be called where 'x' is the current iteration
of the estimated root, 'dx' is the step size between the previous
and current 'x' and 'iteration' the number of iterations that have been taken.
If the callback function evaluates to True then the routine will end
Returns
-------
xf : float
The approximation to a root of f
rooterr : float
The error of the original function at xf, abs(f(xf))
"""
# As in "Numerical Recipies 3rd Edition" pick the bound with the
# smallest function value as the most recent guess
y1, y2 = f(x1), f(x2)
if abs(y1) < abs(y2):
x1, x2 = x2, x1
y1, y2 = y2, y1
for iteration in range(maxIter):
dx = -(x2-x1)*y2/(y2-y1)
x1, x2 = x2, x2 + dx
y1, y2 = y2, f(x2)
if callback is not None and callback(x2, dx, y2, iteration+1):
break
if abs(dx) < steptol or abs(y2) < roottol:
break
return x2, abs(y2)
|
Python
| 0
|
@@ -28,32 +28,8 @@
sion
-%0A# import scipy.optimize
%0A%0Ade
@@ -255,16 +255,43 @@
ntation%0A
+%09%09%09# import scipy.optimize%0A
%09%09%09# roo
|
0e180dfc0c6c4716408f9d74819edfc1d3a638b6
|
Fix pep8 on views/page.py.
|
myuw/views/page.py
|
myuw/views/page.py
|
import re
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render_to_response
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
from django.conf import settings
import logging
from userservice.user import UserService
from myuw.dao.term import get_current_quarter
from myuw.dao.pws import is_student
from myuw.dao.affiliation import get_all_affiliations
from myuw.dao.affiliation import is_mandatory_switch_user
from myuw.dao.affiliation import is_optin_switch_user, has_legacy_preference
from myuw.logger.timer import Timer
from myuw.logger.logresp import log_data_not_found_response
from myuw.logger.logresp import log_invalid_netid_response
from myuw.logger.logresp import log_success_response_with_affiliation
from myuw.views.rest_dispatch import invalid_session
from myuw.dao.uwemail import get_email_forwarding_for_current_user
from myuw.dao.card_display_dates import get_card_visibilty_date_values
from myuw.logger.session_log import log_session
@login_required
def index(request,
year=None,
quarter=None,
summer_term=None):
netid = UserService().get_user()
if not netid:
log_invalid_netid_response(logger, timer)
return invalid_session()
if _is_mobile(request):
# On mobile devices, all students get the current myuw. Non-students
# are sent to the legacy site.
if not is_student():
return redirect_to_legacy_site()
else:
# On the desktop, we're migrating users over. There are 2 classes of
# users - mandatory and opt-in switchers. The mandatory users, who
# are users who haven't been at the UW long enough to be accustomed to
# the existing myuw.
# The other class of users can opt to use the legacy myuw instead.
# Check to see if they have a set preference, and if not, keep them on
# the new version
if not is_mandatory_switch_user():
if is_optin_switch_user():
if has_legacy_preference():
return redirect_to_legacy_site()
else:
return redirect_to_legacy_site()
timer = Timer()
logger = logging.getLogger('myuw.views.page.index')
context = {
"year": year,
"quarter": quarter,
"summer_term": summer_term,
"home_url": "/",
"err": None,
"user": {
"netid": None,
"affiliations": get_all_affiliations(request)
},
"card_display_dates": get_card_visibilty_date_values(request),
}
context["user"]["session_key"] = request.session.session_key
log_session(netid, request.session.session_key, request)
my_uwemail_forwarding = get_email_forwarding_for_current_user()
if my_uwemail_forwarding is not None and my_uwemail_forwarding.is_active():
c_user = context["user"]
c_user["email_is_uwgmail"] = my_uwemail_forwarding.is_uwgmail()
c_user["email_is_uwlive"] = my_uwemail_forwarding.is_uwlive()
context["user"]["netid"] = netid
if year is None or quarter is None:
cur_term = get_current_quarter(request)
if cur_term is None:
context["err"] = "No current quarter data!"
log_data_not_found_response(logger, timer)
else:
context["year"] = cur_term.year
context["quarter"] = cur_term.quarter
else:
pass
log_success_response_with_affiliation(logger, timer, request)
return render_to_response("index.html",
context,
context_instance=RequestContext(request))
def _is_mobile(request):
user_agent = request.META.get("HTTP_USER_AGENT")
# This is the check we were doing in our apache config...
if re.match('.*iPhone.*', user_agent):
return True
if re.match('.*Android.*Mobile.*', user_agent):
return True
return False
def redirect_to_legacy_site():
legacy_url = getattr(settings,
"MYUW_USER_SERVLET_URL",
"https://myuw.washington.edu/servlet/user")
return HttpResponseRedirect(legacy_url)
def logout(request):
# Expires current myuw session
request.session.flush()
logout_url = "https://weblogin.washington.edu/?logout_action=1&two=myuw&one=myuw.washington.edu"
# Redirects to weblogin logout page
return HttpResponseRedirect(logout_url)
|
Python
| 0
|
@@ -4235,16 +4235,17 @@
y_url)%0A%0A
+%0A
def logo
@@ -4337,16 +4337,26 @@
t_url =
+%22%25s%25s%22 %25 (
%22https:/
@@ -4380,16 +4380,47 @@
ton.edu/
+%22,%0A %22
?logout_
@@ -4461,16 +4461,17 @@
ton.edu%22
+)
%0A%0A #
@@ -4543,12 +4543,13 @@
(logout_url)
+%0A
|
d58bee9caa30f67e808a0a203fc88febacdc6186
|
Fix parentheses in Rectangle in plot_hull_distances.
|
stability/analysis.py
|
stability/analysis.py
|
import os
from pymatgen.phasediagram.pdanalyzer import PDAnalyzer
from pymatgen.phasediagram.pdmaker import PhaseDiagram
from pymatgen.core.structure import Structure
from pymatgen.io.vasp.outputs import Vasprun
from pymatgen.entries.computed_entries import ComputedEntry
from pymatgen.matproj.rest import MPRester
from monty.serialization import loadfn
import twod_materials.standard as st
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
MPR = MPRester(
loadfn(os.path.join(os.path.expanduser('~'), 'dbauth.yaml'))['mp_api']
)
def get_competing_species(directories):
total_competing_species = []
for directory in directories:
os.chdir(directory)
composition = Structure.from_file('POSCAR').composition
try:
energy = Vasprun('vasprun.xml').final_energy
except:
energy = 100
my_entry = ComputedEntry(composition, energy) # 2D material
entries = MPR.get_entries_in_chemsys(
[elt.symbol for elt in composition]
)
entries.append(my_entry) # 2D material
pda = PDAnalyzer(PhaseDiagram(entries))
decomp = pda.get_decomp_and_e_above_hull(my_entry, allow_negative=True)
competing_species = [
(entry.composition.reduced_formula,
entry.entry_id) for entry in decomp[0]
]
# Keep a running list of all unique competing species, since in
# high throughput 2D searches there is usually some overlap in
# competing species for different materials.
for specie in competing_species:
if specie not in total_competing_species:
total_competing_species.append(specie)
os.chdir('../')
return total_competing_species
def get_hull_distances(directories):
hull_distances = {}
finished_competitors = {}
# Determine which competing species have been relaxed in the current
# framework and store them in a dictionary ({formula: entry}).
if os.path.isdir('all_competitors'):
os.chdir('all_competitors')
for comp_dir in [
dir for dir in os.listdir(os.getcwd()) if os.path.isdir(dir) and
st.is_converged(dir)
]:
os.chdir(comp_dir)
composition = Structure.from_file('POSCAR').composition
energy = Vasprun('vasprun.xml').final_energy
finished_competitors[comp_dir] = ComputedEntry(composition, energy)
os.chdir('../')
os.chdir('../')
for directory in directories:
os.chdir(directory)
composition = Structure.from_file('POSCAR').composition
try:
energy = Vasprun('vasprun.xml').final_energy
except:
energy = 100
my_entry = ComputedEntry(composition, energy) # 2D material
entries = MPR.get_entries_in_chemsys(
[elt.symbol for elt in composition]
)
# If the energies of competing species have been calculated in
# the current framework, put them in the phase diagram instead
# of the MP energies.
for i in range(len(entries)):
formula = entries[i].composition.reduced_formula
if formula in finished_competitors:
entries[i] = finished_competitors[formula]
else:
entries[i] = ComputedEntry(entries[i].composition, 100)
entries.append(my_entry) # 2D material
pda = PDAnalyzer(PhaseDiagram(entries))
decomp = pda.get_decomp_and_e_above_hull(my_entry, allow_negative=True)
hull_distances[composition.reduced_formula] = decomp[1]
os.chdir('../')
return hull_distances
def plot_hull_distances(hull_distances):
ax = plt.figure().gca()
ax.set_ylim(0, 1000)
ax.set_xlim(0, len(hull_distances))
x_ticklabels = []
i = 0
for compound in hull_distances:
x_ticklabels.append(compound)
if hull_distances[compound] < 100:
color_code = 0.5
elif hull_distances[compound] < 200:
color_code = 0.71
else:
color_code = 0.92
ax.add_patch(plt.Rectangle(i, 0), height=hull_distances[compound],
width=1, linewidth=0, facecolor=plt.cm.jet(color_code))
ax.set_xticklabels(x_ticklabels)
plt.savefig('stability_plot.pdf', transparent=True)
|
Python
| 0.000385
|
@@ -4183,16 +4183,17 @@
ctangle(
+(
i, 0), h
@@ -4300,16 +4300,17 @@
r_code))
+)
%0A%0A ax
|
4657acf6408b2fb416e2c9577ac09d18d81f8a68
|
Remove unused NHS database mockup
|
nameless/config.py
|
nameless/config.py
|
import os
_basedir = os.path.abspath(os.path.dirname(__file__))
# Plugin settings
DATABASE_NAMES = ['atc', 'nhs', 'sms']
# Using sqlite for local development, will be SQL on production.
SQLALCHEMY_BINDS = {
'atc': 'sqlite:///' + os.path.join(_basedir, 'db/atc.db'),
'nhs': 'sqlite:///' + os.path.join(_basedir, 'db/nhs.db'),
'sms': 'sqlite:///' + os.path.join(_basedir, 'db/sms.db')
}
# TxtLocal SMS settings
SENDER = '447786202240'
INBOX_ID = '498863'
API_KEY = 'Sap3A0EaE2k-xL6d4nLJuQdZriNxBByUjRhOCHM5X0'
API_URI = 'https://api.txtlocal.com/'
API_SEND_URI = API_URI + 'send/?'
API_RECEIVE_URI = API_URI + 'get_messages/?'
TEST_MODE = 1 # 1 (True) to enable test mode & 0 to disable.
|
Python
| 0
|
@@ -103,15 +103,8 @@
tc',
- 'nhs',
'sm
@@ -260,71 +260,8 @@
'),%0A
- 'nhs': 'sqlite:///' + os.path.join(_basedir, 'db/nhs.db'),%0A
|
6da7171e1e7c3c46103a0d0310fae1f9cbe8c055
|
add MACHO LAZY_BINDINGS test to test-security-check.py
|
contrib/devtools/test-security-check.py
|
contrib/devtools/test-security-check.py
|
#!/usr/bin/env python3
# Copyright (c) 2015-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Test script for security-check.py
'''
import subprocess
import unittest
def write_testcode(filename):
with open(filename, 'w', encoding="utf8") as f:
f.write('''
#include <stdio.h>
int main()
{
printf("the quick brown fox jumps over the lazy god\\n");
return 0;
}
''')
def call_security_check(cc, source, executable, options):
subprocess.check_call([cc,source,'-o',executable] + options)
p = subprocess.Popen(['./security-check.py',executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
return (p.returncode, stdout.rstrip())
class TestSecurityChecks(unittest.TestCase):
def test_ELF(self):
source = 'test1.c'
executable = 'test1'
cc = 'gcc'
write_testcode(source)
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-zexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE']),
(1, executable+': failed PIE NX RELRO Canary'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE']),
(1, executable+': failed PIE RELRO Canary'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-no-pie','-fno-PIE']),
(1, executable+': failed PIE RELRO'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-pie','-fPIE']),
(1, executable+': failed RELRO'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE']),
(0, ''))
def test_PE(self):
source = 'test1.c'
executable = 'test1.exe'
cc = 'x86_64-w64-mingw32-gcc'
write_testcode(source)
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--no-nxcompat','-Wl,--no-dynamicbase','-Wl,--no-high-entropy-va']),
(1, executable+': failed DYNAMIC_BASE HIGH_ENTROPY_VA NX'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--no-dynamicbase','-Wl,--no-high-entropy-va']),
(1, executable+': failed DYNAMIC_BASE HIGH_ENTROPY_VA'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--dynamicbase','-Wl,--no-high-entropy-va']),
(1, executable+': failed HIGH_ENTROPY_VA'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--dynamicbase','-Wl,--high-entropy-va']),
(0, ''))
def test_MACHO(self):
source = 'test1.c'
executable = 'test1'
cc = 'clang'
write_testcode(source)
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-Wl,-allow_stack_execute','-fno-stack-protector']),
(1, executable+': failed PIE NOUNDEFS NX Canary'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-Wl,-allow_stack_execute','-fstack-protector-all']),
(1, executable+': failed PIE NOUNDEFS NX'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-fstack-protector-all']),
(1, executable+': failed PIE NOUNDEFS'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-fstack-protector-all']),
(1, executable+': failed PIE'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-pie','-fstack-protector-all']),
(0, ''))
if __name__ == '__main__':
unittest.main()
|
Python
| 0.000002
|
@@ -3362,16 +3362,30 @@
DEFS NX
+LAZY_BINDINGS
Canary')
@@ -3598,16 +3598,30 @@
NDEFS NX
+ LAZY_BINDINGS
'))%0A
@@ -3798,16 +3798,30 @@
NOUNDEFS
+ LAZY_BINDINGS
'))%0A
@@ -3919,32 +3919,221 @@
otector-all'%5D),%0A
+ (1, executable+': failed PIE LAZY_BINDINGS'))%0A self.assertEqual(call_security_check(cc, source, executable, %5B'-Wl,-no_pie','-Wl,-bind_at_load','-fstack-protector-all'%5D),%0A
(1,
@@ -4239,24 +4239,44 @@
-Wl,-pie','-
+Wl,-bind_at_load','-
fstack-prote
|
10801bca03c03d6b6bb7b6108733178dcf5a8b53
|
Revert 87dbc5eb9665b5a145a3c2a190f64e2ce4c09fd4^..HEAD
|
shop/views.py
|
shop/views.py
|
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.views.generic.simple import direct_to_template
from shop.forms import OrderForm
from shop.models import EmailEntry, Order
from datetime import datetime
import urllib
from xml.dom import minidom
def index(request):
print request.META['HTTP_HOST']
if request.META['HTTP_HOST'] == 'www.opkikker.nl':
return HttpResponseRedirect('/opkikker')
else:
return HttpResponseRedirect('/rustgever')
def opkikker(request):
if request.POST:
form = EmailEntry.Form(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
EmailEntry.objects.get_or_create(email=email, date_added=datetime.now())
form.clean()
return direct_to_template(request, 'opkikker.html', extra_context={'succes': True})
else:
return direct_to_template(request, 'opkikker.html', extra_context={'error': True, 'form': form,})
else:
form = EmailEntry.Form()
return direct_to_template(request, 'opkikker.html', extra_context={'form': form})
def rustgever(request):
return direct_to_template(request, 'rustgever.html')
def order(request):
if request.POST:
form = OrderForm(request.POST)
if form.is_valid():
order = form.save()
order.save()
total_amount = int(((order.product_price * order.product_amount) + order.product_shipment_cost) * 100)
return HttpResponseRedirect(get_payment_url(total_amount, order.id))
else:
return direct_to_template(request, 'bestel-rustgever.html', extra_context={'error': True, 'form': form,})
else:
form = OrderForm()
return direct_to_template(request, 'bestel-rustgever.html', extra_context={'form': form})
def get_payment_url(amount, id):
URL = "https://secure.mollie.nl/xml/ideal?a=create-link&partnerid=705747&amount="+str(amount)+"&description=Zen%20Garden%20Rustgever(tm)%20order_id%20"+str(id)+"&profile_key=e510805f"
print URL
result = urllib.urlopen(URL).read()
splits = result.split("<URL>")
return splits[1].split("</URL>")[0]
|
Python
| 0.000001
|
@@ -365,23 +365,22 @@
== '
-www.opkikker.nl
+localhost:8000
':%0A
|
c81f4d0659366e1512a4b64f0cce65d50de25927
|
update to 3.29.0
|
packages/dependencies/sqlite3.py
|
packages/dependencies/sqlite3.py
|
{
'repo_type' : 'archive',
'custom_cflag' : '-O2', # make sure we build it without -ffast-math
'download_locations' : [
{ 'url' : 'https://www.sqlite.org/2019/sqlite-autoconf-3280000.tar.gz', 'hashes' : [ { 'type' : 'sha256', 'sum' : 'd61b5286f062adfce5125eaf544d495300656908e61fca143517afcc0a89b7c3' }, ], },
{ 'url' : 'https://fossies.org/linux/misc/sqlite-autoconf-3280000.tar.gz', 'hashes' : [ { 'type' : 'sha256', 'sum' : 'd61b5286f062adfce5125eaf544d495300656908e61fca143517afcc0a89b7c3' }, ], },
],
'cflag_addition' : '-fexceptions -DSQLITE_ENABLE_COLUMN_METADATA=1 -DSQLITE_USE_MALLOC_H=1 -DSQLITE_USE_MSIZE=1 -DSQLITE_DISABLE_DIRSYNC=1 -DSQLITE_ENABLE_RTREE=1 -fno-strict-aliasing',
'configure_options': '--host={target_host} --prefix={target_prefix} --disable-shared --enable-static --enable-threadsafe --disable-editline --enable-readline --enable-json1 --enable-fts5 --enable-session',
'depends_on': (
'zlib',
),
'update_check' : { 'url' : 'https://www.sqlite.org/index.html', 'type' : 'httpregex', 'regex' : r'<a href="releaselog/.*\.html">Version (?P<version_num>[\d.]+)<\/a>' },
'_info' : { 'version' : '3.28.0', 'fancy_name' : 'libsqlite3' },
}
|
Python
| 0
|
@@ -167,33 +167,33 @@
lite-autoconf-32
-8
+9
0000.tar.gz', 'h
@@ -237,72 +237,72 @@
: '
-d61b5286f062adfce5125eaf544d495300656908e61fca143517afcc0a89b7c3
+8e7c1e2950b5b04c5944a981cb31fffbf9d2ddda939d536838ebc854481afd5b
' %7D,
@@ -370,17 +370,17 @@
oconf-32
-8
+9
0000.tar
@@ -432,72 +432,72 @@
: '
-d61b5286f062adfce5125eaf544d495300656908e61fca143517afcc0a89b7c3
+8e7c1e2950b5b04c5944a981cb31fffbf9d2ddda939d536838ebc854481afd5b
' %7D,
@@ -1131,17 +1131,17 @@
' : '3.2
-8
+9
.0', 'fa
|
0fee973ea7a4ca7b79c84ed55fa1d327c754beee
|
Add tests and some fixes for class extension pattern
|
readthedocs/core/utils/extend.py
|
readthedocs/core/utils/extend.py
|
"""Patterns for extending Read the Docs"""
from django.conf import settings
from django.utils.module_loading import import_by_path
from django.utils.functional import LazyObject
class SettingsOverrideObject(LazyObject):
"""Base class for creating class that can be overridden
This is used for extension points in the code, where we want to extend a
class without monkey patching it. This abstract class allows for lazy
inheritance, creating a class from the specified class or from a setting,
but only once the class is called.
Default to an instance of the class defined by :py:cvar:`_default_class`.
Next, look for an override setting class path in
``settings.CLASS_OVERRIDES``, which should be a dictionary of class paths.
The setting should be a dictionary keyed by the object path name::
CLASS_OVERRIDES = {
'readthedocs.core.resolver.Resolver': 'something.resolver.Resolver',
}
Lastly, if ``settings.CLASS_OVERRIDES`` is missing, or the key is not found,
attempt to pull the key :py:cvar:`_override_setting` from ``settings``.
"""
_default_class = None
_override_setting = None
def _setup(self):
"""Set up wrapped object
This is called when attributes are accessed on :py:cls:`LazyObject`
and the underlying wrapped object does not yet exist.
"""
cls = self._default_class
cls_path = (getattr(settings, 'CLASS_OVERRIDES', {})
.get(self.get_class_id()))
if cls_path is None:
cls_path = getattr(settings, self._override_setting, None)
if cls_path is not None:
cls = import_by_path(cls_path)
self._wrapped = cls()
def get_class_id(self):
# type() here, because LazyObject overrides some attribute access
return '.'.join([__name__, type(self).__name__])
|
Python
| 0
|
@@ -37,16 +37,32 @@
ocs%22%22%22%0A%0A
+import inspect%0A%0A
from dja
@@ -1519,16 +1519,17 @@
et(self.
+_
get_clas
@@ -1560,16 +1560,55 @@
path is
+None and self._override_setting is not
None:%0A
@@ -1791,16 +1791,17 @@
def
+_
get_clas
@@ -1915,17 +1915,72 @@
in(%5B
-__name__,
+inspect.getmodule(type(self)).__name__,%0A
typ
|
252da1473643916dd10e7a250d64c8bedb8ae5a9
|
Use username as id too; #35
|
judge/views/select2.py
|
judge/views/select2.py
|
from django.db.models import Q
from django.http import JsonResponse
from django.utils.encoding import smart_text
from django.views.generic.list import BaseListView
from judge.models import Profile, Organization, Problem, Comment, Contest
from judge.templatetags.gravatar import get_gravatar_url
class Select2View(BaseListView):
paginate_by = 20
def get(self, request, *args, **kwargs):
self.request = request
self.term = kwargs.get('term', request.GET.get('term', ''))
self.object_list = self.get_queryset()
context = self.get_context_data()
return JsonResponse({
'results': [
{
'text': smart_text(self.get_name(obj)),
'id': obj.pk,
} for obj in context['object_list']],
'more': context['page_obj'].has_next(),
})
def get_name(self, obj):
return unicode(obj)
class UserSelect2View(Select2View):
def get_queryset(self):
return Profile.objects.filter(Q(user__username__icontains=(self.term)) | Q(name__icontains=(self.term))) \
.select_related('user')
class OrganizationSelect2View(Select2View):
def get_queryset(self):
return Organization.objects.filter(Q(key__icontains=self.term) | Q(name__icontains=self.term))
class ProblemSelect2View(Select2View):
def get_queryset(self):
queryset = Problem.objects.filter(Q(code__icontains=self.term) | Q(name__icontains=self.term))
if not self.request.user.has_perm('judge.see_private_problem'):
filter = Q(is_public=True)
if self.request.user.is_authenticated():
filter |= Q(authors=self.request.user.profile)
queryset = queryset.filter(filter)
return queryset
class ContestSelect2View(Select2View):
def get_queryset(self):
queryset = Contest.objects.filter(Q(key__icontains=self.term) | Q(name__icontains=self.term))
if not self.request.user.has_perm('judge.see_private_contest'):
queryset = queryset.filter(is_public=True)
if not self.request.user.has_perm('judge.edit_all_contest'):
q = Q(is_private=False)
if self.request.user.is_authenticated():
q |= Q(organizations__in=self.request.user.profile.organizations.all())
queryset = queryset.filter(q)
return queryset
class CommentSelect2View(Select2View):
def get_queryset(self):
return Comment.objects.filter(Q(title__icontains=self.term) | Q(page__icontains=self.term))
class UserSearchSelect2View(BaseListView):
paginate_by = 20
def get(self, request, *args, **kwargs):
self.request = request
self.term = kwargs.get('term', request.GET.get('term', ''))
self.gravatar_size = request.GET.get('gravatar_size', 128)
self.gravatar_default = request.GET.get('gravatar_default', None)
self.object_list = (Profile.objects.filter(Q(user__username__icontains=(self.term)) |
Q(name__icontains=(self.term)))
.values_list('pk', 'user__username', 'user__email'))
context = self.get_context_data()
return JsonResponse({
'results': [
{
'text': username, 'id': pk,
'gravatar_url': get_gravatar_url(email, self.gravatar_size, self.gravatar_default),
} for pk, username, email in context['object_list']],
'more': context['page_obj'].has_next(),
})
def get_name(self, obj):
return unicode(obj)
|
Python
| 0.00001
|
@@ -3341,17 +3341,43 @@
ame,
- 'id': pk
+%0A 'id': username
,%0A
|
f1c49d33c829c56f0dff12a20563ca7a1b3fbc41
|
Print the other way around (makes more sense)
|
Toolkit/AimlessSurface.py
|
Toolkit/AimlessSurface.py
|
from __future__ import division
def work():
from scitbx import math
from scitbx.array_family import flex
N=15
lfg = math.log_factorial_generator(N)
nsssphe = math.nss_spherical_harmonics(6,50000,lfg)
l = 2
m = 1
t = 1
p = 1
print nsssphe.spherical_harmonic(2, 1, 1, 1)
def n_terms():
orders = {}
for j in range(10):
nterms = 0
for k in range(1, j+1):
for l in range(-k, k+1):
nterms += 1
orders[nterms] = j
print orders
def order_from_nterm(n):
return {0: 0, 80: 8, 3: 1, 8: 2, 15: 3, 48: 6, 99: 9, 35: 5, 24: 4, 63: 7}[n]
def evaluate_1degree(ClmList, png_filename):
from scitbx import math
from scitbx.array_family import flex
import math as pymath
import numpy
d2r = pymath.pi / 180.0
order = order_from_nterm(len(ClmList))
lfg = math.log_factorial_generator(2 * order + 1)
nsssphe = math.nss_spherical_harmonics(order,50000,lfg)
Clm = { }
idx = 0
for l in range(1, order+1):
for m in range(-l, l+1):
Clm[(l,m)] = ClmList[idx]
idx += 1
abscor = numpy.empty((1+360//1, 1+180//1), float, 'C')
sqrt2 = pymath.sqrt(2)
for t in range(0, 361, 1):
for p in range(0, 181, 1):
a = 1.0
for l in range(1, order+1):
for m in range(-l, l+1):
# Ylm = nsssphe.spherical_harmonic(l, m, t*d2r, p*d2r)
# Convert from complex to real according to
# http://en.wikipedia.org/wiki/Spherical_harmonics#Real_form
Ylm = nsssphe.spherical_harmonic(l, abs(m), t*d2r, p*d2r)
if m < 0:
a += Clm[(l,m)] * sqrt2 * ((-1) ** m) * Ylm.imag
elif m == 0:
assert(Ylm.imag == 0.0)
a += Clm[(l,m)] * Ylm.real
else:
a += Clm[(l,m)] * sqrt2 * ((-1) ** m) * Ylm.real
abscor[(t//1, p//1)] = a
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot
plot = pyplot.imshow(abscor)
pyplot.colorbar()
pyplot.savefig(png_filename)
return
def scrape_coefficients(log_file_name):
Clm = { }
c = 0
l = 0
coefficients = []
for record in open(log_file_name):
if 'Coefficient(Sd)' in record:
for token in record.split()[1:]:
coefficients.append(float(token.split('(')[0]))
return coefficients
if __name__ == '__main__':
import sys
evaluate_1degree(scrape_coefficients(sys.argv[1]), sys.argv[2])
|
Python
| 0.000078
|
@@ -1063,18 +1063,18 @@
((1+
-36
+18
0//1, 1+
180/
@@ -1069,18 +1069,18 @@
0//1, 1+
-18
+36
0//1), f
@@ -1788,15 +1788,15 @@
or%5B(
-t
+p
//1,
-p
+t
//1)
|
f1e071957214e787521c7de887ca1fe369671bc7
|
Add constants
|
UI/resources/constants.py
|
UI/resources/constants.py
|
# -*- coding: utf-8 -*-
SAVE_PASSWORD_HASHED = True
MAX_RETRIES_DOWNLOAD_FROM_SAME_FARMER = 3
MAX_RETRIES_UPLOAD_TO_SAME_FARMER = 3
MAX_RETRIES_NEGOTIATE_CONTRACT = 1000
MAX_RETRIES_GET_FILE_POINTERS = 100
GET_DEFAULT_TMP_PATH_FROM_ENV_VARIABLES = True
GET_HOME_PATH_FROM_ENV_VARIABLES = True
FILE_POINTERS_REQUEST_DELAY = 1
FILE_POINTERS_ITERATION_DELAY = 0.2
CONTRACT_NEGOTIATION_ITERATION_DELAY = 0.2
MAX_POINTERS_RESOLVED_IN_ONE_PART = 50
MINIMAL_ALLOWED_BRIDGE_REQUEST_TIMEOUT = 5
# int: file pointers request delay, file pointers iteration delay, max pointers resolved in one part,
# minimal bridge request timeout, in seconds.
MAX_DOWNLOAD_REQUEST_BLOCK_SIZE = 4 * 1024
MAX_UPLOAD_REQUEST_BLOCK_SIZE = 4096
MAX_UPLOAD_CONNECTIONS_AT_SAME_TIME = 4
MAX_DOWNLOAD_CONNECTIONS_AT_SAME_TIME = 4
CONCURRENT_UPLOADING = False
DEFAULT_MAX_BRIDGE_REQUEST_TIMEOUT = 90
DEFAULT_MAX_FARMER_CONNECTION_TIMEOUT = 7
DEFAULT_MAX_FARMER_DOWNLOAD_READ_TIMEOUT = 17
# int: maximum bridge request timeout, in seconds.
MAX_ALLOWED_UPLOAD_CONCURRENCY = 9999
MAX_ALLOWED_DOWNLOAD_CONCURRENCY = 9999
DEFAULT_BRIDGE_API_URL = 'api.storj.io'
# DESIGN
DISPLAY_FILE_CREATION_DATE_IN_MAIN = True
DISPLAY_FILE_ID_IN_MAIN = True
FILE_LIST_SORTING_MAIN_ENABLED = True
AUTO_SCROLL_UPLOAD_DOWNLOAD_QUEUE = True
SHOW_TRAY_ICON = False
BUCKETS_LIST_SORTING_ENABLED = True
MIRRORS_TREE_SORTING_ENABLED = True
FIXED_WINDOWS_SIZE = True
ALLOW_DOWNLOAD_FARMER_POINTER_CANCEL_BY_USER = True
ALLOW_UPLOAD_FARMER_CANCEL_BY_USER = True
DATA_TABLE_EDIT_ENABLED = False
# BLACKLISTING
FARMER_NODES_EXCLUSION_FOR_UPLOAD_ENABLED = True
FARMER_NODES_EXCLUSION_FOR_DOWNLOAD_ENABLED = True
BLACKLIST_MAX_LENGTH = 300
BLACKLISTING_MODE = 2
# 1 - blacklist all farmers to which shard have been recently uploaded
# 2 - blacklist only farmers to which transfer failed
# PATHS
USE_USER_ENV_PATH_FOR_TEMP = False
DEFAULT_ENCRYPTION_KEYS_DIRECTORY = ""
# SHARDING
DEFAULT_MAX_SHARD_SIZE = 4294967296 # 4Gb
DEFAULT_SHARD_SIZE = 2 * (1024 * 1024) # 8Mb
# UPLOAD
REED_SOLOMON_ENCODING_ENABLED = True
|
Python
| 0.000228
|
@@ -1123,16 +1123,61 @@
torj.io'
+%0ACONFIG_FILE_NAME = 'storj_client_config.xml'
%0A%0A# DESI
|
f2a0bbee61a144bf0d1de77dd4b41393fe7428bf
|
fix Ntests in simuNtests
|
simuNtests.py
|
simuNtests.py
|
# lance simulations pour different nombre d'electeurs
import multiprocessing
import os, sys
import shutil
import time
import numpy as np
from randomSets import *
def worker(((Ncandidats,q, Nwinners))):
"""worker function"""
sys.stdout.write('\nSTART -- %i candidats -- \n' % Ncandidats)
sys.stdout.flush()
time.sleep(0.01) # being sure that simulation are differently initialized
minNvoters = simulate(Ncandidats, q =q, Nwinners = Nwinners)
with open('nmin-candidates-%i' % Ncandidats,'a') as f_handle:
np.savetxt(f_handle,minNvoters)
return
if __name__ == '__main__':
print "Cette fois, c'est la bonne !"
print (time.strftime("%H:%M:%S"))
root = "simulations/"
try:
os.mkdir(root)
except OSError:
pass
Ncandidates = int(sys.argv[1])
Ntests = [sys.argv[1] if len(sys.argv) == 3 else 1000]
Nwinners = 1
args = []
print Ncandidates
for i in range(Ncandidates):
arg = [Ncandidates,100,1]
args.append(arg)
if args == []:
print "Rien a faire!"
pool = multiprocessing.Pool(processes=1)
pool.map(worker, args)
print "Alors, ca marche ? :)"
|
Python
| 0.000006
|
@@ -941,25 +941,20 @@
range(N
-candida
te
+st
s):%0A
@@ -1105,17 +1105,18 @@
ocesses=
-1
+20
)%0A po
|
3bf9ab0da4b06b8b0383fb6db64947886742899c
|
Add newline in log of builds after successful rebuild of website.
|
site/build.py
|
site/build.py
|
#!/usr/bin/env python
# -*- coding: ascii -*-
"""
This script can be used to build the website.
It is also run on each commit to github.
Example: ./build public_html
"""
import datetime
import os
import shutil
import subprocess
import sys
import time
BUILD_DIR = 'build'
def get_build_dir():
try:
build_dir = sys.argv[1]
except IndexError:
build_dir = BUILD_DIR
basedir = os.path.abspath(os.path.curdir)
build_dir = os.path.join(basedir, build_dir)
return build_dir
def build(dest):
source = os.path.split(os.path.abspath(__file__))[0]
source = os.path.join(source, 'src')
# We aren't doing anything fancy yet.
shutil.copytree(source, dest)
def update_gitrepo():
source = os.path.split(os.path.abspath(__file__))[0]
initial = os.getcwd()
try:
os.chdir(source)
subprocess.call(['git', 'pull'])
finally:
os.chdir(initial)
def main():
try:
min_delay = int(sys.argv[2]) * 60
except:
min_delay = 0
# Build only if enough time has passed.
build_dir = get_build_dir()
if os.path.exists(build_dir):
elapsed = time.time() - os.path.getmtime(build_dir)
if elapsed < min_delay:
print "Not enough time has elapsed since last build."
sys.exit(0)
else:
# Delete it all!
if os.path.islink(build_dir):
os.unlink(build_dir)
else:
shutil.rmtree(build_dir)
elif os.path.islink(build_dir):
# Then its a bad symlink.
os.unlink(build_dir)
#update_gitrepo()
build(build_dir)
subprocess.call(['touch', build_dir])
print "Done."
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -169,16 +169,55 @@
l%0A%0A%22%22%22%0A%0A
+from __future__ import print_function%0A%0A
import d
@@ -1274,17 +1274,17 @@
print
-
+(
%22Not eno
@@ -1322,16 +1322,17 @@
build.%22
+)
%0A
@@ -1727,16 +1727,19 @@
rint
-
+(
%22Done.
-%22
+%5Cn%22)
%0A%0Aif
|
50a6ac219a3ff9f9b6ed6614c8a54ab5e93b525a
|
set phid_valid to yes since received from phi [skip ci]
|
custom/icds/repeaters/generators/phi.py
|
custom/icds/repeaters/generators/phi.py
|
import json
from django.core.serializers.json import DjangoJSONEncoder
from corehq import toggles
from corehq.apps.hqcase.utils import update_case
from corehq.motech.repeaters.repeater_generators import (
CaseRepeaterJsonPayloadGenerator,
)
class BasePayloadGenerator(CaseRepeaterJsonPayloadGenerator):
@staticmethod
def enabled_for_domain(domain):
return toggles.PHI_CAS_INTEGRATION.enabled(domain)
class SearchByParamsPayloadGenerator(BasePayloadGenerator):
@staticmethod
def _gender(gender):
if gender:
if gender == 'male':
return 'M'
elif gender == 'female':
return 'F'
return ""
def get_payload(self, repeat_record, case):
data = self._setup_names(case)
data.update({
"gender": self._gender(case.get_case_property('gender')),
"villagecode": 442639,
"subdistrictcode": 3318,
"districtcode": 378,
"statecode": 22,
"dateofbirth": case.get_case_property('date_of_birth') or "",
"mobileno": case.get_case_property('mobile_number') or "",
"email": "",
"govt_id_name": "",
"govt_id_number": ""
})
return json.dumps(data, cls=DjangoJSONEncoder)
def _setup_names(self, case):
data = {}
self._setup_name(case.name, 'beneficaryname', 'namelocal', data)
self._setup_name(case.get_case_property('mothers_name'), 'mothername', 'mothernamelocal', data)
self._setup_name(case.get_case_property('fathers_name'), 'fathername', 'fathernamelocal', data)
self._setup_name(case.get_case_property('husbands_name'), 'husbandname', 'husbandnamelocal', data)
return data
def _setup_name(self, name, key_name, key_name_local, data):
data[key_name] = ""
data[key_name_local] = ""
if self._has_special_chars(name):
data[key_name_local] = name
else:
data[key_name] = name
@staticmethod
def _has_special_chars(value):
try:
value.encode(encoding='utf-8').decode('ascii')
except UnicodeDecodeError:
return True
return False
def handle_success(self, response, case, repeat_record):
phi_id = response.json().get('result', [{}])[0].get('phi_id', None)
if phi_id:
update_case(case.domain, case.case_id, {'phid_for_beneficiary': phi_id},
device_id=__name__ + ".search")
class ValidatePHIDPayloadGenerator(BasePayloadGenerator):
def get_payload(self, repeat_record, payload_doc):
data = {'phi_id': payload_doc.get_case_property('phid_for_beneficiary')}
return json.dumps(data, cls=DjangoJSONEncoder)
def handle_success(self, response, case, repeat_record):
case_update = {'phid_validated': 'yes'}
if response.json()['result'] == 'true':
case_update['phid_valid'] = 'yes'
else:
case_update['phid_valid'] = 'no'
update_case(case.domain, case.case_id, case_update,
device_id=__name__ + ".validate")
|
Python
| 0
|
@@ -2430,16 +2430,40 @@
case_id,
+%0A
%7B'phid_
@@ -2486,16 +2486,37 @@
: phi_id
+, 'phid_valid': 'yes'
%7D,%0A
|
a4d089ad89fea16c7d3fa4743355229d163c30c4
|
fix running info in screen module closes #783
|
kivy/modules/screen.py
|
kivy/modules/screen.py
|
'''
Screen
======
This module change some environement and configuration to match the density /
dpi / screensize of a specific devices.
To see a list of the available screenid, just run::
python main.py -m screen
Simulate a medium-density screen as Motolora Droid 2::
python main.py -m screen,droid2
Simulate a high-density screen as HTC One X, in portrait::
python main.py -m screen,onex,portrait
Simulate the iPad 2 screen::
python main.py -m screen,ipad
'''
import sys
from os import environ
from kivy.config import Config
from kivy.logger import Logger
# taken from http://en.wikipedia.org/wiki/List_of_displays_by_pixel_density
devices = {
# device: (name, width, height, dpi, density)
'onex': ('HTC One X', 1280, 720, 312, 2),
's3': ('Galaxy SIII', 1280, 720, 306, 2),
'droid2': ('Motolora Droid 2', 854, 480, 240, 1.5),
'xoom': ('Motolora Xoom', 1280, 800, 149, 1),
'ipad': ('iPad (1 and 2)', 1024, 768, 132, 1),
'ipad3': ('iPad 3', 2048, 1536, 264, 2),
'iphone4': ('iPhone 4', 640, 960, 326, 2),
'iphone5': ('iPhone 5', 640, 1136, 326, 2),
}
def start(win, ctx):
pass
def stop(win, ctx):
pass
def apply_device(device, scale, orientation):
name, width, height, dpi, density = devices[device]
if orientation == 'portrait':
width, height = height, width
Logger.info('Screen: Apply screen settings for {0}'.format(name))
Logger.info('Screen: size={0}x{1} dpi={2} density={3} '
'orientation={4}'.format(width, height, dpi, density, orientation))
environ['KIVY_METRICS_DENSITY'] = str(density)
environ['KIVY_DPI'] = str(dpi)
Config.set('graphics', 'width', str(width))
Config.set('graphics', 'height', str(height))
Config.set('graphics', 'fullscreen', '0')
Config.set('graphics', 'show_mousecursor', '1')
def usage(device=None):
if device:
Logger.error('Screen: The specified device ({0}) is unknow.',
device)
print '\nModule usage: python main.py -m screen,deviceid[,orientation]\n'
print 'Availables devices:\n'
print '{0:12} {1:<22} {2:<8} {3:<8} {4:<5} {5:<8}'.format(
'Device ID', 'Name', 'Width', 'Height', 'DPI', 'Density')
for device, info in devices.iteritems():
print '{0:12} {1:<22} {2:<8} {3:<8} {4:<5} {5:<8}'.format(
device, *info)
print '\n'
print 'Simulate a medium-density screen as Motolora Droid 2:\n'
print ' python main.py -m screen,droid2\n'
print 'Simulate a high-density screen as HTC One X, in portrait:\n'
print ' python main.py -m screen,onex,portrait\n'
print 'Simulate the iPad 2 screen\n'
print ' python main.py -m screen,ipad\n'
sys.exit(1)
def configure(ctx):
scale = ctx.pop('scale', None)
orientation = 'landscape'
ctx.pop('landscape', None)
if ctx.pop('portrait', None):
orientation = 'portrait'
if not ctx:
return usage(None)
device = ctx.keys()[0]
if device not in devices:
return usage('')
apply_device(device, scale, orientation)
|
Python
| 0
|
@@ -294,25 +294,25 @@
py -m screen
-,
+:
droid2%0A%0ASimu
@@ -387,33 +387,33 @@
ain.py -m screen
-,
+:
onex,portrait%0A%0AS
@@ -465,25 +465,25 @@
py -m screen
-,
+:
ipad%0A'''%0A%0Aim
|
c60a238f9d3d46500a823082d64de08a2c2f1ae4
|
Set args and filenames properly
|
kiwi/ui/test/runner.py
|
kiwi/ui/test/runner.py
|
#
# Kiwi: a Framework and Enhanced Widgets for Python
#
# Copyright (C) 2006 Async Open Source
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
# Author(s): Johan Dahlin <jdahlin@async.com.br>
#
"""
Runner - executes recorded scripts
"""
import doctest
import sys
import time
import gobject
from gtk import gdk
from kiwi.log import Logger
from kiwi.ui.test.common import WidgetIntrospecter
log = Logger('kiwi.ui.test.player')
class NotReadyYet(Exception):
pass
class MissingWidget(KeyError):
pass
class MagicWindowWrapper(object):
def __init__(self, window, ns):
self.window = window
self.ns = ns
def delete(self):
self.window.emit('delete-event', gdk.Event(gdk.DELETE))
def __getattr__(self, attr):
if not attr in self.ns:
raise MissingWidget(attr)
return self.ns[attr]
class Runner(object):
"""
@ivar parser:
"""
def __init__(self, filename):
self._filename = filename
self._pos = 0
self._windows = {}
self._ns = {}
self._source_id = -1
self.parser = doctest.DocTestParser()
self._stmts = self.parser.get_examples(open(filename).read())
wi = WidgetIntrospecter()
wi.register_event_handler()
wi.connect('window-added', self._on_wi__window_added)
wi.connect('window-removed', self._on_wi__window_removed)
# Callbacks
def _on_wi__window_added(self, wi, window, name, ns):
log.info('Window added: %s' % (name,))
self._windows[name] = MagicWindowWrapper(window, ns)
self._iterate()
def _on_wi__window_removed(self, wi, window, name):
log.info('Window removed: %s' % (name,))
del self._windows[name]
self._iterate()
# Private
def _iterate(self):
stmts = self._stmts
while True:
if self._pos == len(stmts):
self.quit()
break
ex = stmts[self._pos]
self._pos += 1
log.info('will now execute %r' % (ex.source[:-1],))
try:
exec compile(ex.source, self._filename,
'single', 0, 1) in self._ns
except NotReadyYet:
self._pos -= 1
break
except (SystemExit, KeyboardInterrupt):
raise SystemExit
except MissingWidget, e:
raise SystemExit(
"ERROR: Could not find widget: %s" % str(e))
except Exception, e:
import traceback
traceback.print_exc()
raise SystemExit
log.info('Executed %r' % (ex.source[:-1],))
self._last = time.time()
# Public API
def quit(self):
print '* Executed successfully'
sys.exit(0)
def start(self):
self._last = time.time()
def sleep(self, duration):
"""
@param duration:
"""
# We don't want to block the interface here which means that
# we cannot use time.sleep.
# Instead we schedule another execute iteration in the future
# and raises NotReadyYet which stops the interpreter until
# iterate is called again.
def _iter():
# Turn ourselves off and allow future calls to wait() to
# queue new waits.
self._source_id = -1
# Iterate, which will call us again
self._iterate()
return False
if self._source_id != -1:
raise NotReadyYet
# The delta is the last time we executed a statement minus
delta = (self._last + duration) - time.time()
if delta > 0:
ms = int(delta * 1000)
self._source_id = gobject.timeout_add(ms, _iter)
raise NotReadyYet
# Okay, we've waited enough, let's go back to business
def waitopen(self, window_name):
"""
@param window_name:
"""
if not window_name in self._windows:
raise NotReadyYet(window_name)
return self._windows[window_name]
def waitclose(self, window_name):
"""
@param window_name:
"""
if window_name in self._windows:
raise NotReadyYet(window_name)
runner = None
def play_file(script, filename=None, args=None):
"""
@param script:
@param filename:
@param args:
"""
global runner
log.info('Running script %s' % script)
runner = Runner(script)
if filename is None:
fd = open(script)
data = fd.readline()[:-1] + fd.readline()[:-1]
# Check for run: lines in the doctests
# run: ....
pos = data.find('run:')
if pos != -1:
rest = data[pos+5:]
# run: foo --arg
if ' ' in rest:
filename, args = rest.split(' ', 1)
# run: foo
else:
filename = rest
else:
if args is None:
args = []
sys.argv = [filename] + args[:]
execfile(sys.argv[0], globals(), globals())
|
Python
| 0.000001
|
@@ -5465,17 +5465,16 @@
pos+5:%5D%0A
-%0A
@@ -5510,17 +5510,16 @@
if '
-
' in re
@@ -5574,16 +5574,46 @@
' ', 1)%0A
+ args = %5Bargs%5D%0A
|
798f80c3efe06869194adf7073af574cc94481b9
|
add to init
|
km3modules/__init__.py
|
km3modules/__init__.py
|
# coding=utf-8
# Filename: __init__.py
# pylint: disable=locally-disabled
"""
A collection of commonly used modules.
"""
from km3modules.common import (Dump, Delete, HitCounter, BlobIndexer, Keep,
StatusBar, MemoryObserver, Wrap)
from km3modules.reco import SvdFit as PrimFit
from km3modules.reco import SvdFit
|
Python
| 0.000006
|
@@ -253,16 +253,21 @@
er, Wrap
+, Cut
)%0Afrom k
|
c3f3b20c24c7d928698ca4e6d6ca197a9d314965
|
Filter out Nones
|
corehq/ex-submodules/casexml/apps/phone/data_providers/case/stock.py
|
corehq/ex-submodules/casexml/apps/phone/data_providers/case/stock.py
|
from collections import defaultdict
from casexml.apps.stock.consumption import compute_default_monthly_consumption, \
ConsumptionConfiguration
from corehq.form_processor.exceptions import LedgerValueNotFound
from corehq.form_processor.interfaces.dbaccessors import LedgerAccessors
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.parsing import json_format_datetime
from datetime import datetime
from casexml.apps.stock.const import COMMTRACK_REPORT_XMLNS
def get_stock_payload(project, stock_settings, case_stub_list):
if project and not project.commtrack_enabled:
return
generator = StockPayloadGenerator(project.name, stock_settings, case_stub_list)
for section in generator.yield_sections():
yield section
class StockPayloadGenerator(object):
def __init__(self, domain_name, stock_settings, case_stub_list):
self.domain_name = domain_name
self.stock_settings = stock_settings
self.case_stub_list = case_stub_list
from lxml.builder import ElementMaker
self.elem_maker = ElementMaker(namespace=COMMTRACK_REPORT_XMLNS)
def yield_sections(self):
case_ids = [case.case_id for case in self.case_stub_list]
all_current_ledgers = LedgerAccessors(self.domain_name).get_current_ledger_state(case_ids)
for case_stub in self.case_stub_list:
case_id = case_stub.case_id
case_ledgers = all_current_ledgers[case_id]
section_timestamp_map = defaultdict(lambda: json_format_datetime(datetime.utcnow()))
for section_id in sorted(case_ledgers.keys()):
state_map = case_ledgers[section_id]
stock_states = sorted(state_map.values(), key=lambda s: s.product_id)
as_of = json_format_datetime(max(txn.last_modified_date for txn in stock_states))
section_timestamp_map[section_id] = as_of
yield self.elem_maker.balance(
*(self._state_to_xml(e) for e in stock_states),
**{'entity-id': case_id, 'date': as_of, 'section-id': section_id}
)
for section in self._consumption_sections(case_stub, case_ledgers, section_timestamp_map):
yield section
def _consumption_sections(self, case_stub, case_ledgers, section_timestamp_map):
case_id = case_stub.case_id
for section_id, consumption_section_id in self.stock_settings.section_to_consumption_types.items():
if section_id in case_ledgers or self.stock_settings.force_consumption_case_filter(case_stub):
consumption_entries = []
current_section_sate = case_ledgers.get(section_id, {})
if self.stock_settings.default_product_list:
for product_id in self.stock_settings.default_product_list:
state = current_section_sate.get(product_id, None)
if not state:
try:
state = LedgerAccessors(self.domain_name).get_ledger_value(
case_id, section_id, product_id
)
except LedgerValueNotFound:
pass
consumption_entries.append(
self._consumption_entry(case_id, product_id, state)
)
else:
for product_id in sorted(current_section_sate.keys()):
state = current_section_sate[product_id]
consumption_entries.append(
self._consumption_entry(case_id, product_id, state)
)
if consumption_entries:
yield self.elem_maker.balance(
*consumption_entries,
**{
'entity-id': case_id,
'date': section_timestamp_map[section_id],
'section-id': consumption_section_id,
}
)
def _entry_xml(self, id, quantity):
return self.elem_maker.entry(
id=id,
quantity=str(int(quantity)),
)
def _state_to_xml(self, state):
return self._entry_xml(state.product_id, state.stock_on_hand)
def _consumption_entry(self, case_id, product_id, state):
if state and state.daily_consumption is not None:
return self._entry_xml(state.product_id, state.daily_consumption)
return self._default_consumption_entry(case_id, product_id)
@memoized
def _default_consumption_entry(self, case_id, product_id):
configuration = self.stock_settings.consumption_config or ConsumptionConfiguration()
default_value = compute_default_monthly_consumption(
case_id,
product_id,
configuration,
)
if default_value is not None:
return self._entry_xml(product_id, default_value)
|
Python
| 0.999635
|
@@ -3758,32 +3758,123 @@
)%0A%0A
+ consumption_entries = filter(lambda e: e is not None, consumption_entries)%0A
|
b5f8e3f8dd8d2d99494be83bdddbc1a6078c3161
|
Test cleanup connectivity test added.
|
package/tests/test_connectivity/test_cleanup_connectivity.py
|
package/tests/test_connectivity/test_cleanup_connectivity.py
|
from unittest import TestCase
from mock import Mock
from cloudshell.cp.azure.domain.services.network_service import NetworkService
from cloudshell.cp.azure.domain.services.tags import TagService
from cloudshell.cp.azure.domain.services.virtual_machine_service import VirtualMachineService
from cloudshell.cp.azure.domain.vm_management.operations.delete_operation import DeleteAzureVMOperation
from tests.helpers.test_helper import TestHelper
class TestCleanupConnectivity(TestCase):
def setUp(self):
self.logger = Mock()
self.vm_service = VirtualMachineService()
self.network_service = NetworkService()
self.tags_service = TagService()
self.delete_operation = DeleteAzureVMOperation(logger=self.logger,
vm_service=self.vm_service,
network_service=self.network_service,
tags_service=self.tags_service)
def test_cleanup(self):
"""
:return:
"""
# Arrange
self.vm_service.delete_resource_group = Mock()
tested_group_name = "test_group"
resource_client = Mock()
# Act
self.delete_operation.delete_resource_group(resource_client=resource_client, group_name=tested_group_name)
# Verify
self.assertTrue(TestHelper.CheckMethodCalledXTimes(self.vm_service.delete_resource_group))
self.vm_service.delete_resource_group.assert_called_with(resource_management_client=resource_client,
group_name=tested_group_name)
|
Python
| 0
|
@@ -1143,32 +1143,87 @@
_group = Mock()%0A
+ self.vm_service.delete_sandbox_subnet = Mock()%0A
tested_g
@@ -1284,108 +1284,733 @@
k()%0A
-%0A # Act%0A self.delete_operation.delete_resource_group(resource_client=resource_client,
+ network_client = Mock()%0A cloud_provider_model = Mock()%0A%0A vnet = Mock()%0A subnet = Mock()%0A subnet.name = tested_group_name%0A vnet.subnets = %5Bsubnet%5D%0A reservation = Mock()%0A reservation.reservation_id = tested_group_name%0A self.network_service.get_sandbox_virtual_network = Mock(return_value=vnet)%0A%0A # Act%0A self.delete_operation.delete_resource_group(resource_client=resource_client, group_name=tested_group_name)%0A self.delete_operation.delete_sandbox_subnet(network_client=network_client,%0A cloud_provider_model=cloud_provider_model,%0A resource_
grou
@@ -2152,16 +2152,126 @@
group))%0A
+ self.assertTrue(TestHelper.CheckMethodCalledXTimes(self.network_service.get_sandbox_virtual_network))%0A
@@ -2446,28 +2446,695 @@
oup_name=tested_group_name)%0A
+%0A def test_delete_sandbox_subnet_on_error(self):%0A # Arrange%0A self.vm_service.delete_resource_group = Mock()%0A self.vm_service.delete_sandbox_subnet = Mock()%0A tested_group_name = %22test_group%22%0A vnet = Mock()%0A subnet = Mock()%0A subnet.name = %22test_group_for_exception%22%0A vnet.subnets = %5Bsubnet%5D%0A reservation = Mock()%0A reservation.reservation_id = tested_group_name%0A self.network_service.get_sandbox_virtual_network = Mock(return_value=vnet)%0A%0A # Act%0A self.assertRaises(Exception,%0A self.delete_operation.delete_sandbox_subnet,%0A )%0A%0A
|
e5ed0e4e6dea58a1412e3c596612e647bd22c619
|
Update __init__.py
|
krempelair/__init__.py
|
krempelair/__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import jinja2
import flask
import views
class Krempelair(flask.Flask):
jinja_options = {
'extensions': ['jinja2.ext.autoescape'],
'undefined': jinja2.StrictUndefined
}
def __init__(self):
"""(See `make_app` for parameter descriptions.)"""
flask.Flask.__init__(self, __name__)
self.setup_routes()
def create_jinja_environment(self):
"""Called by Flask.__init__"""
env = super(Krempelair, self).create_jinja_environment()
for func in [
'force_unicode',
'timesince',
'shorten_sha1',
'shorten_message',
'extract_author_name',
'formattimestamp',
]:
env.filters[func] = getattr(utils, func)
return env
def setup_routes(self):
for endpoint, rule in [
('air_get_status_betrieb', '/'),
('air_get_status_stoerung', '/stoerung'),
('air_set_status', '/<int:pin>/<int:state>'),
('air_set_level', '/lueftung/stufe/<int:level>'),
('air_set_timer', '/lueftung/timer/<int:time>'),
('air_set_temp', '/lueftung/temperatur/<int:temp>'),
('air_set_raucherraum_on', '/raucherraum/on'),
('air_set_raucherraum_off', '/raucherraum/off'),
('air_get_temperaturen', '/lueftung/temperatur'),
]:
self.add_url_rule(rule, view_func=getattr(views, endpoint))
if __name__ == "__main__":
app = Krempelair()
app.run(host="0.0.0.0", debug=True)
else:
application = Krempelair()
|
Python
| 0.000072
|
@@ -1225,16 +1225,105 @@
peratur/
+sollTemp/%3Cint:temp%3E'),%0A ('air_set_tempNAK', '/lueftung/temperatur/sollTempNAK/
%3Cint:tem
|
3f725f25b0896237b71f68993d9ffa24329f47fe
|
Keep the same format with other usage: capitalize the head letter
|
kuryr/common/config.py
|
kuryr/common/config.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Routines for configuring Kuryr
"""
import os
from oslo_config import cfg
from kuryr import i18n
_ = i18n._
core_opts = [
cfg.StrOpt('pybasedir',
default=os.path.abspath(os.path.join(os.path.dirname(__file__),
'../../')),
help=_('Directory where kuryr python module is installed.')),
cfg.StrOpt('bindir',
default='$pybasedir/usr/libexec/kuryr',
help=_('Directory for kuryr vif binding executables.')),
cfg.StrOpt('kuryr_uri',
default='http://127.0.0.1:2377',
help=_('Kuryr URL for accessing Kuryr through json rpc.')),
cfg.StrOpt('capability_scope',
default='global',
choices=['local', 'global'],
help=_('Kuryr plugin scope reported to libnetwork.')),
]
neutron_opts = [
cfg.StrOpt('neutron_uri',
default=os.environ.get('OS_URL', 'http://127.0.0.1:9696'),
help=_('Neutron URL for accessing the network service.')),
cfg.StrOpt('enable_dhcp',
default='False',
help=_('Enable or Disable dhcp for neutron subnets.')),
]
keystone_opts = [
cfg.StrOpt('auth_uri',
default=os.environ.get('IDENTITY_URL',
'http://127.0.0.1:35357'),
help=_('The URL for accessing the identity service.')),
cfg.StrOpt('admin_user',
default=os.environ.get('SERVICE_USER'),
help=_('The admin username.')),
cfg.StrOpt('admin_tenant_name',
default=os.environ.get('SERVICE_TENANT_NAME'),
help=_('The admin username.')),
cfg.StrOpt('admin_password',
default=os.environ.get('SERVICE_PASSWORD'),
help=_('The admin password.')),
cfg.StrOpt('admin_token',
default=os.environ.get('SERVICE_TOKEN'),
help=_('The admin token.')),
]
binding_opts = [
cfg.StrOpt('veth_dst_prefix',
default='eth',
help=('The name prefix of the veth endpoint put inside the '
'container.'))
]
CONF = cfg.CONF
CONF.register_opts(core_opts)
CONF.register_opts(neutron_opts, group='neutron_client')
CONF.register_opts(keystone_opts, group='keystone_client')
CONF.register_opts(binding_opts, 'binding')
def init(args, **kwargs):
cfg.CONF(args=args, project='kuryr',
version='0.1', **kwargs)
|
Python
| 0.999999
|
@@ -906,17 +906,17 @@
y where
-k
+K
uryr pyt
@@ -1061,17 +1061,17 @@
ory for
-k
+K
uryr vif
|
0258b5a219a6c61c5b7cc526a0c9d5a7f9ebf78e
|
print() function for Python 3
|
src/cpu/scheduler_profile.py
|
src/cpu/scheduler_profile.py
|
#!/usr/bin/python
import json
import sys
STATE_COLOURS = {'S': '#acff90', 'R': '#ffaeae', 'D': '#fce94f', 'K': '#c00', 'x': '#0c0', 'U': '#ccc'}
STROKE_COLOURS = {'S': '#679657', 'R': '#b07979', 'D': '#b3a639', 'K': '#600', 'x': '#060', 'U': '#aaa'}
STATE_DESCRIPTORS = {'S': 'Sleeping', 'R': 'Runnable', 'D': 'Blocked I/O', 'K': 'Killed', 'x': 'Dead'}
def write_svg_header(writer, width, height):
writer.write(
'<?xml version="1.0" standalone="no"?>' +
'<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">')
writer.write(
'<svg version="1.1" width="' + str(width) + '" height="' + str(height) + '" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">\n')
writer.write(
'<text text-anchor="middle" x="{}" y="30" font-size="20" font-family="monospace" fill="#000">Thread scheduling states</text>'.format(width / 2))
def write_svg_footer(writer):
writer.write('</svg>\n')
def calculate_number_of_columns(cpu_tenancy_by_pid):
column_count = 0
for pid in cpu_tenancy_by_pid:
column_count += len(cpu_tenancy_by_pid[pid])
return column_count
def get_fill(state):
return STATE_COLOURS[state]
def get_stroke(state):
return STROKE_COLOURS[state]
def get_state_description(state):
if state not in STATE_DESCRIPTORS:
return 'Other'
return STATE_DESCRIPTORS[state]
def write_cell(writer, x_offset, y_offset, width, height, state, thread_name, count, total, text_written):
state_percentage = 100 * (count / float(total))
cell_text = '{}/{} ({:.2f}%)'.format(thread_name, get_state_description(state), state_percentage)
writer.write('<g><title>{}</title>'.format(cell_text))
writer.write(
'<rect x="{}" y="{}" width="{}" height="{}" style="fill: {}; stroke:{}">'.format(x_offset, y_offset, width, height, get_fill(state), get_stroke(state)))
writer.write('</rect>\n')
if not text_written:
writer.write(
'<text x="{}" y="{}" width="{}" font-size="12" font-family="monospace" fill="#000">{}</text>'.format(
x_offset, y_offset + 12, width, cell_text))
writer.write('</g>\n')
def write_svg(width, height, thread_scheduling, max_total, tid_to_thread_name, process_id):
output_filename = 'scheduler-profile-{}.svg'.format(process_id)
writer = open(output_filename, 'w')
write_svg_header(writer, width, height)
row_height = float((height - 60) / len(thread_scheduling))
border = 10
y_offset = 50
for tid in sorted(thread_scheduling.iterkeys()):
x_offset = border
tid_sample_count = thread_scheduling[tid]['total']
single_sample_width = float((width - (2 * border)) / float(thread_scheduling[tid]['total']))
text_written = False
for state in ['S', 'R', 'D', 'U', 'x', 'K']:
sample_count = thread_scheduling[tid][state]
if sample_count > 0:
state_width = sample_count * single_sample_width
write_cell(writer, x_offset, y_offset, state_width, row_height, state, tid_to_thread_name[tid],
sample_count, tid_sample_count, text_written)
x_offset += state_width
text_written = True
y_offset += row_height
write_svg_footer(writer)
writer.close()
print "Wrote {}".format(output_filename)
def get_tid_to_thread_name(jstack_file):
tid_to_thread_name = dict()
for line in open(jstack_file):
if line.find("nid=") != -1:
try:
hex_tid = line.split('nid=')[1].split(" ")[0]
thread_name = line.split('"')[1]
decimal_tid = int(hex_tid, 0)
tid_to_thread_name[str(decimal_tid)] = thread_name
except IndexError:
print "Failed to parse tid from line: " + line
return tid_to_thread_name
def filter_scheduler_info(thread_scheduling_info, threads_to_include):
max_total_value = 0
filtered = dict()
for k in thread_scheduling_info.iterkeys():
if k in threads_to_include.keys():
if thread_scheduling_info[k]["S"] != thread_scheduling_info[k]["total"] and thread_scheduling_info[k]["total"] != 0:
filtered[k] = thread_scheduling_info[k]
if filtered[k]['total'] > max_total_value:
max_total_value = filtered[k]['total']
return (filtered, max_total_value)
if __name__ == "__main__":
tid_to_thread_name = get_tid_to_thread_name(sys.argv[1])
process_id = sys.argv[2]
thread_scheduling = json.load(sys.stdin)
filtered_scheduling, max_total = filter_scheduler_info(thread_scheduling, tid_to_thread_name)
if len(filtered_scheduling) == 0:
print "No samples for pid {}".format(process_id)
else:
write_svg(1200, 660, filtered_scheduling, max_total, tid_to_thread_name, process_id)
|
Python
| 0.999804
|
@@ -3374,17 +3374,17 @@
print
-
+(
%22Wrote %7B
@@ -3409,16 +3409,17 @@
ilename)
+)
%0A%0A%0Adef g
@@ -3850,17 +3850,17 @@
print
-
+(
%22Failed
@@ -3891,16 +3891,17 @@
%22 + line
+)
%0A ret
@@ -4789,17 +4789,17 @@
print
-
+(
%22No samp
@@ -4828,24 +4828,25 @@
(process_id)
+)
%0A else:%0A
|
f751f0bc4ea5466052fdd11a674ddb9a0a3303a4
|
Fix pylint
|
dataset/models/tf/layers/core.py
|
dataset/models/tf/layers/core.py
|
""" Contains common layers """
import numpy as np
import tensorflow as tf
def flatten2d(inputs, name=None):
""" Flatten tensor to two dimensions (batch_size, item_vector_size) """
x = tf.convert_to_tensor(inputs)
dims = tf.reduce_prod(tf.shape(x)[1:])
x = tf.reshape(x, [-1, dims], name=name)
return x
def flatten(inputs, name=None):
""" Flatten tensor to two dimensions (batch_size, item_vector_size) using inferred shape and numpy """
x = tf.convert_to_tensor(inputs)
shape = x.get_shape().as_list()
dim = np.prod(shape[1:])
x = tf.reshape(x, [-1, dim], name=name)
return x
def maxout(inputs, depth, axis=-1, name='max'):
""" Shrink last dimension by making max pooling every ``depth`` channels """
with tf.name_scope(name):
x = tf.convert_to_tensor(inputs)
shape = x.get_shape().as_list()
shape[axis] = -1
shape += [depth]
for i, _ in enumerate(shape):
if shape[i] is None:
shape[i] = tf.shape(x)[i]
out = tf.reduce_max(tf.reshape(x, shape), axis=-1, keep_dims=False)
return out
def mip(inputs, depth, data_format='channels_last', name='mip'):
""" Maximum intensity projection by shrinking the channels dimension with max pooling every ``depth`` channels """
with tf.name_scope(name):
x = tf.convert_to_tensor(inputs)
axis = -1 if data_format == 'channels_last' else 1
num_layers = x.get_shape().as_list()[axis]
split_sizes = [depth] * (num_layers // depth)
if num_layers % depth:
split_sizes += [num_layers % depth]
splits = tf.split(x, split_sizes, axis=axis)
mips = []
for split in splits:
amip = tf.reduce_max(split, axis=axis)
mips.append(amip)
mips = tf.stack(mips, axis=axis)
return mips
def alpha_dropout(inputs, rate=0.5, noise_shape=None, seed=None, training=False, name=None):
""" Alpha dropout layer
Alpha Dropout is a dropout that maintains the self-normalizing property.
For an input with zero mean and unit standard deviation, the output of Alpha Dropout maintains
the original mean and standard deviation of the input.
Klambauer G. et al "`Self-Normalizing Neural Networks <https://arxiv.org/abs/1706.02515>`_"
"""
def dropped_inputs():
return tf.contrib.nn.alpha_dropout(inputs, 1-rate, noise_shape=noise_shape, seed=seed)
return tf.cond(training, dropped_inputs, lambda: tf.identity(inputs))
|
Python
| 0.000099
|
@@ -2331,16 +2331,17 @@
def
+_
dropped_
@@ -2474,16 +2474,17 @@
aining,
+_
dropped_
@@ -2518,9 +2518,21 @@
(inputs)
-)
+, name=name)%0A
|
130663a47fe3c497aedd39acd12de70bab230dec
|
make things login free
|
src/datahub/browser/views.py
|
src/datahub/browser/views.py
|
import json, sys, re, hashlib, smtplib, base64, urllib, os
from auth import *
from django.http import *
from django.shortcuts import render_to_response
from django.views.decorators.csrf import csrf_exempt
from django.core.context_processors import csrf
from django.core.validators import email_re
from django.db.utils import IntegrityError
from django.utils.http import urlquote_plus
'''
@author: Anant Bhardwaj
@date: Mar 21, 2013
Datahub Web Handler
'''
@login_required
def home(request):
try:
user = request.session[kUsername]
return HttpResponseRedirect(user)
except KeyError:
return HttpResponseRedirect('/login')
except Exception, e:
return HttpResponse(
{'error': str(e)},
mimetype="application/json")
def user(request, username):
try:
if(username):
res = manager.list_repos(username)
repos = [t[0] for t in res['tuples']]
return render_to_response("user.html", {
'username': username,
'repos': repos})
except Exception, e:
return HttpResponse(
{'error': str(e)},
mimetype="application/json")
def repo(request, username, repo):
try:
res = manager.list_tables(username, repo)
tables = [t[0] for t in res['tuples']]
return render_to_response("repo.html", {
'username': username,
'repo': repo,
'tables': tables})
except Exception, e:
return HttpResponse(
{'error': str(e)},
mimetype="application/json")
def table(request, username, repo, table):
try:
res = manager.execute_sql(
username=username,
query='SELECT * from %s.%s.%s' %(username, repo, table))
column_names = res['column_names']
tuples = res['tuples']
return render_to_response("table.html", {
'username': username,
'repo': repo,
'table': table,
'column_names': column_names,
'tuples': tuples})
except Exception, e:
return HttpResponse(
{'error': str(e)},
mimetype="application/json")
|
Python
| 0
|
@@ -568,13 +568,22 @@
ect(
+'/%25s' %25(
user)
+)
%0A e
|
8712b50048b3fe42fbeb725f92f20bda08cfcc28
|
update output string formatting
|
sknano/structure_io/_xyz_structure_data.py
|
sknano/structure_io/_xyz_structure_data.py
|
# -*- coding: utf-8 -*-
"""
==============================================================================
XYZ format (:mod:`sknano.structure_io._xyz_structure_data`)
==============================================================================
.. currentmodule:: sknano.structure_io._xyz_structure_data
"""
from __future__ import division, print_function, absolute_import
__docformat__ = 'restructuredtext'
from pksci.chemistry import Atom
from pkshared.tools.fiofuncs import get_fpath
from ._structure_data import StructureReader, StructureReaderError, \
StructureWriter
__all__ = ['XYZDATA', 'XYZReader', 'XYZWriter']
class XYZReader(StructureReader):
"""Class for reading xyz chemical file format.
Parameters
----------
xyzfile : str
xyz structure file
"""
def __init__(self, fname=None):
super(XYZReader, self).__init__(fname=fname)
self._read()
def _read(self):
with open(self._fname, 'r') as f:
self._Natoms = int(f.readline().strip())
self._comment_line = f.readline().strip()
lines = f.readlines()
for line in lines:
s = line.strip().split()
if len(s) != 0:
atom = \
Atom(s[0], x=float(s[1]), y=float(s[2]), z=float(s[3]))
self._atoms.append(atom)
class XYZWriter(StructureWriter):
"""Class for writing xyz chemical file format."""
@classmethod
def write(cls, fname=None, atoms=None, comment_line=None):
"""Write structure data to file.
Parameters
----------
fname : str
atoms : Atoms
:py:class:`~pksci.chemistry.Atoms` instance.
comment_line : str, optional
"""
if fname is None:
raise TypeError('fname argument must be a string!')
elif atoms is None:
raise TypeError('atoms argument must be an Atoms object')
else:
fname = get_fpath(fname=fname, ext='xyz', overwrite=True)
if comment_line is None:
comment_line = fname
atoms.fix_minus_zero_coords()
with open(fname, 'w') as f:
f.write('{:d}\n'.format(atoms.Natoms))
f.write('{}\n'.format(comment_line))
for atom in atoms:
f.write('{:3s} {:10.5f} {:10.5f} {:10.5f}\n'.format(
atom.symbol, atom.x, atom.y, atom.z))
class XYZDATA(XYZReader):
"""Class for reading and writing structure data in XYZ data format.
Parameters
----------
fname : str, optional
"""
def __init__(self, fname=None):
try:
super(XYZDATA, self).__init__(fname=fname)
except StructureReaderError:
pass
def write(self, xyzfile=None):
if xyzfile is None:
xyzfile = self._fname
XYZWriter.write(fname=xyzfile, atoms=self._atoms,
comment_line=self._comment_line)
|
Python
| 0.000003
|
@@ -2379,34 +2379,32 @@
('%7B:
+%3E
3s%7D
-
%7B:1
-0
+5
.5f%7D
-
%7B:1
-0
+5
.5f%7D
-
%7B:1
-0
+5
.5f%7D
|
8758e8ba90e54c35778cd0e2fdbc04869752ee29
|
Revert "Add database and message queue config for prod"
|
stagecraft/settings/production.py
|
stagecraft/settings/production.py
|
# preview/staging/production all use the same config with the exception of the
# environment-specific secrets
#
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
#
import os
import random
from os.path import abspath, dirname, join as pjoin
from .common import *
from .environment_specific_settings import *
DEBUG = False
TEMPLATE_DEBUG = False
CSRF_COOKIE_SECURE = True # avoid transmitting the CSRF cookie over HTTP
SESSION_COOKIE_SECURE = True # avoid transmitting the session cookie over HTTP
USE_DEVELOPMENT_USERS = False
ALLOWED_HOSTS = [
'*',
]
APP_HOSTNAME = 'stagecraft{0}'.format(ENV_HOSTNAME)
APP_ROOT = 'https://{0}'.format(APP_HOSTNAME)
GOVUK_WEBSITE_ROOT = os.getenv('GOVUK_WEBSITE_ROOT')
BASE_DIR = abspath(pjoin(dirname(__file__), '..', '..'))
STATIC_URL = '{0}/stagecraft/'.format(os.getenv('GOVUK_ASSET_HOST'))
STATIC_ROOT = abspath(pjoin(BASE_DIR, 'public', 'stagecraft'))
BACKDROP_PUBLIC_URL = 'https://www{0}'.format(PUBLIC_HOSTNAME)
BACKDROP_READ_URL = 'https://backdrop-read.{0}'.format(
os.getenv('GOVUK_APP_DOMAIN'))
BACKDROP_WRITE_URL = 'https://backdrop-write.{0}'.format(
os.getenv('GOVUK_APP_DOMAIN'))
SIGNON_URL = 'https://signon.{0}'.format(os.getenv('GOVUK_APP_DOMAIN'))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'stagecraft',
'USER': 'stagecraft',
'PASSWORD': os.getenv('DATABASE_PASSWORD'),
'HOST': 'api-postgresql-primary-1',
'PORT': '5432',
}
}
BROKER_URL = [
'amqp://stagecraft:{0}@rabbitmq-1.backend:5672//stagecraft'.format(
os.getenv('MESSAGE_QUEUE_PASSWORD')),
'amqp://stagecraft:{0}@rabbitmq-2.backend:5672//stagecraft'.format(
os.getenv('MESSAGE_QUEUE_PASSWORD')),
'amqp://stagecraft:{0}@rabbitmq-3.backend:5672//stagecraft'.format(
os.getenv('MESSAGE_QUEUE_PASSWORD'))
]
random.shuffle(BROKER_URL)
VARNISH_CACHES = [
('http://frontend-app-1', 7999),
('http://frontend-app-2', 7999),
]
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format': ("[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s]"
" %(message)s"),
'datefmt': "%d-%b-%y %H:%M:%S"
},
'logstash_json': {
'()': 'logstash_formatter.LogstashFormatter',
'fmt': '{"extra": {"@tags": ["application", "stagecraft"]}}',
},
},
'filters': {
'additional_fields': {
'()': 'stagecraft.libs.request_logger.middleware.AdditionalFieldsFilter', # noqa
}
},
'handlers': {
'null': {
'level': 'INFO',
'class': 'django.utils.log.NullHandler',
},
'logfile': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': BASE_DIR + "/log/stagecraft.log",
'maxBytes': 4 * 1024 * 1024,
'backupCount': 2,
'formatter': 'standard',
'filters': ['additional_fields'],
},
'json_log': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': BASE_DIR + "/log/production.json.log",
'formatter': 'logstash_json',
},
'json_audit_log': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': BASE_DIR + "/log/audit/stagecraft.json.log",
'maxBytes': 4 * 1024 * 1024,
'backupCount': 2,
'formatter': 'logstash_json',
'filters': ['additional_fields'],
},
},
'loggers': {
'': {
'level': 'WARN',
'handlers': ['logfile', 'json_log'],
},
'django.request': {
'handlers': ['logfile', 'json_log'],
'level': 'INFO',
'propagate': True, # also handle in parent handler
},
'stagecraft.apps': {
'handlers': ['logfile', 'json_log'],
'level': 'INFO',
'propagate': True,
},
'stagecraft.libs': {
'handlers': ['logfile', 'json_log'],
'level': 'INFO',
'propagate': True,
},
'stagecraft.audit': {
'handlers': ['json_audit_log'],
'level': 'DEBUG',
'propagate': True,
},
},
}
|
Python
| 0
|
@@ -194,22 +194,8 @@
t os
-%0Aimport random
%0A%0Afr
@@ -1237,686 +1237,8 @@
))%0A%0A
-DATABASES = %7B%0A 'default': %7B%0A 'ENGINE': 'django.db.backends.postgresql_psycopg2',%0A 'NAME': 'stagecraft',%0A 'USER': 'stagecraft',%0A 'PASSWORD': os.getenv('DATABASE_PASSWORD'),%0A 'HOST': 'api-postgresql-primary-1',%0A 'PORT': '5432',%0A %7D%0A%7D%0A%0ABROKER_URL = %5B%0A 'amqp://stagecraft:%7B0%7D@rabbitmq-1.backend:5672//stagecraft'.format(%0A os.getenv('MESSAGE_QUEUE_PASSWORD')),%0A 'amqp://stagecraft:%7B0%7D@rabbitmq-2.backend:5672//stagecraft'.format(%0A os.getenv('MESSAGE_QUEUE_PASSWORD')),%0A 'amqp://stagecraft:%7B0%7D@rabbitmq-3.backend:5672//stagecraft'.format(%0A os.getenv('MESSAGE_QUEUE_PASSWORD'))%0A%5D%0Arandom.shuffle(BROKER_URL)%0A%0A
VARN
|
29a5ec45e76681865c62163e2580c0bfd4a6e241
|
Enhance comments
|
lc0045_jump_game_ii.py
|
lc0045_jump_game_ii.py
|
"""Leetcode 45. Jump Game II
Hard
URL: https://leetcode.com/problems/jump-game-ii/
Given an array of non-negative integers, you are initially positioned at
the first index of the array.
Each element in the array represents your maximum jump length at that position.
Your goal is to reach the last index in the minimum number of jumps.
Example:
Input: [2,3,1,1,4]
Output: 2
Explanation: The minimum number of jumps to reach the last index is 2.
Jump 1 step from index 0 to 1, then 3 steps to the last index.
Note:
You can assume that you can always reach the last index.
"""
class SolutionDPGreedy(object):
def jump(self, nums):
"""
:type nums: List[int]
:rtype: int
Time complexity: O(n^2), where n is length of nums.
Space complexity: O(n).
"""
n = len(nums)
# Create table T, where T[i] is min number of jumps to reach i.
T = [n] * n
T[0] = 0
# For each index i, loop to update T[reach+1] ~ T[i+nums[i]].
reach = 0
for i in range(n):
for j in range(reach + 1, min(i + nums[i], n - 1) + 1):
T[j] = min(T[j], T[i] + 1)
reach = max(reach, i + nums[i])
return T[-1]
class SolutionBFSGreedy1(object):
def jump(self, nums):
"""
:type nums: List[int]
:rtype: int
Time complexity: O(n), where n is length of nums.
Space complexity: O(1).
"""
n = len(nums)
# Apply greedy algorithm to check index i in prev_reach and reach.
prev_reach, reach = -1, 0
result = 0
for i in range(n):
# Check if reached last index already.
if reach >= n - 1:
break
# Update jump if prev_reach is behind current index.
if prev_reach < i:
result += 1
prev_reach = reach
# Update reach with current index and jump.
reach = max(reach, i + nums[i])
return result
class SolutionBFSGreedy2(object):
def jump(self, nums):
"""
:type nums: List[int]
:rtype: int
Time complexity: O(n), where n is length of nums.
Space complexity: O(1).
"""
n = len(nums)
# Apply greedy algorithm to check index i in cur_reach and reach.
cur_reach, reach = 0, 0
result = 0
for i in range(n - 1):
# Break if reaches last index.
if cur_reach >= n - 1:
break
# Update reach with current index and jump.
reach = max(reach, i + nums[i])
# If i reaches cur_reach, trigger another jump and update cur_reach.
if i == cur_reach:
result += 1
cur_reach = reach
return result
def main():
# Outpout: 2
nums = [2,3,1,1,4]
print SolutionDPGreedy().jump(nums)
print SolutionBFSGreedy1().jump(nums)
print SolutionBFSGreedy2().jump(nums)
# Outpout: 2
nums = [7,0,9,6,9,6,1,7,9,0,1,2,9,0,3]
print SolutionDPGreedy().jump(nums)
print SolutionBFSGreedy1().jump(nums)
print SolutionBFSGreedy2().jump(nums)
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -836,14 +836,21 @@
#
-Create
+Apply DP with
tab
@@ -873,26 +873,16 @@
is min
-number of
jumps to
@@ -943,30 +943,33 @@
#
-For each index i, loop
+Iterate through from left
to
@@ -1757,20 +1757,22 @@
Update
-jump
+result
if prev
@@ -1922,32 +1922,46 @@
date reach with
+max jump from
current index an
@@ -1953,33 +1953,24 @@
urrent index
- and jump
.%0A
@@ -2557,25 +2557,40 @@
pdate re
-ach
+sult
with
+max jump from
current
@@ -2598,17 +2598,8 @@
ndex
- and jump
.%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.