commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
8ab94d992871ba010bbc215cadc9d468722d80ef
|
Refactor HealthTestView ‘get’ method to reduce number of function calls.
|
health_monitor/views.py
|
health_monitor/views.py
|
"""
Copyright 2017 Gracenote
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import distutils.util
import json
from django.http import HttpResponse
try:
from django.views import View
except ImportError:
from django.views.generic import View
from . import utils
from .models import HealthTest
class HealthView(View):
def get(self, request, uid=None, group=None, test=None):
"""Get health by uid, group, and/or test."""
status_code = 200
if not test and not group and not uid:
detail = distutils.util.strtobool(request.GET['detail']) if 'detail' in request.GET.keys() else False
healths = self.health_model.objects.all()
if detail:
response_data = [{'uid': x.uid, 'state': x.state, 'severity': x.severity} for x in healths]
else:
response_data = [x.uid for x in healths]
else:
try:
health = self.health_model.objects.get(uid=uid)
if test and group:
state = {k: {x: y for x, y in v.items() if x == test} for k, v in health.state.items() if k == group}
severity = {k: v for k, v in health.severity.items() if k == group}
elif group:
state = {k: v for k, v in health.state.items() if k == group}
severity = {k: v for k, v in health.severity.items() if k == group}
else:
state = health.state
severity = health.severity
response_data = {
'uid': health.uid,
'state': state,
'severity': severity,
}
except Exception as e:
response_data = {
'uid': uid,
'message': str(e)
}
status_code = 400
return HttpResponse(json.dumps(response_data), content_type="application/json", status=status_code)
def delete(self, request, uid=None, group=None, test=None):
"""Delete health by uid, group, and/or test."""
status_code = 200
try:
if not test and not group:
self.health_model.objects.get(uid=uid).delete()
response_data = {
'message': '{} health deleted'.format(uid)
}
elif not test:
self.health_model.objects.get(uid=uid).delete_group(group)
response_data = {
'message': '{} group deleted from {} health'.format(group, uid)
}
else:
self.health_model.objects.get(uid=uid).delete_group_test(group, test)
response_data = {
'message': '{} test deleted from {} group in {} health'.format(test, group, uid)
}
except Exception as e:
response_data = {
'message': str(e)
}
status_code = 400
return HttpResponse(json.dumps(response_data), content_type="application/json", status=status_code)
class HealthAlarmView(View):
def get(self, request, group=None, test=None):
status_code = 200
try:
if not group:
response_data = {'groups': HealthTest._get_groups()}
elif not test:
response_data = {'tests': HealthTest._get_tests(group)}
else:
kwargs = {}
for k in ['score', 'aggregate_percent', 'repetition', 'repetition_percent']:
if k in request.GET.keys():
kwargs[k] = int(request.GET[k])
response_data = self.health_alarm_model.calculate_alarms(group=group, test=test, **kwargs)
except Exception as e:
response_data = {
'message': str(e)
}
status_code = 400
return HttpResponse(json.dumps(response_data), content_type="application/json", status=status_code)
class HealthTestView(View):
def get(self, request, uid=None, test=None):
"""Get historical test results by test, uid."""
status_code = 200
try:
if not uid and not test:
response_data = {'tests': HealthTest._get_tests()}
else:
model = HealthTest._get_model(test)
kwargs = {}
if uid:
kwargs['uids'] = [uid]
elif 'uids' in request.GET:
kwargs['uids'] = request.GET['uids'].split(',')
if 'start_time' in request.GET:
kwargs['start_time'] = utils.iso_to_datetime(request.GET['start_time'])
if 'end_time' in request.GET:
kwargs['end_time'] = utils.iso_to_datetime(request.GET['end_time'])
response_data = []
results = model.get_history(**kwargs)
if 'latest' in request.GET and results:
latest = distutils.util.strtobool(request.GET['latest'])
results = [results.order_by('time').last()] if latest else model.get_history(**kwargs)
fields = [x.name for x in model._meta.fields if x.name != 'id']
for result in results:
entry = {}
for field in fields:
entry[field] = utils.datetime_to_iso(getattr(result, field))
entry['score'] = result.get_score()
response_data.append(entry)
except Exception as e:
response_data = {
'message': str(e)
}
status_code = 400
return HttpResponse(json.dumps(response_data), content_type="application/json", status=status_code)
def post(self, request, uid=None, test=None):
"""Post health test by test and uid."""
kwargs = {}
response_data = {}
if request.POST:
for key, value in request.POST.items():
kwargs[key] = value
# calculate health score: red, orange, yellow, green
try:
model = HealthTest._get_model(test)
kwargs = utils.clean_str_to_bool(model, **kwargs)
result = model.create(uid=uid, **kwargs)
except Exception as e:
response_data['message'] = str(e)
status_code = 400
return HttpResponse(json.dumps(response_data), content_type="application/json", status=status_code)
score = result.get_score()
response_data['score'] = score
response_data['message'] = '{} score changed to {} for uid {}'.format(test, score, uid)
return HttpResponse(json.dumps(response_data), content_type="application/json")
|
Python
| 0
|
@@ -5864,24 +5864,80 @@
entry = %7B%7D%0A
+ entry%5B'score'%5D = result.get_score()%0A
@@ -6053,67 +6053,54 @@
ld))
-%0A entry%5B'score'%5D = result.get_score(
+ if field == 'time' else getattr(result, field
)%0A
|
83459664b12a44d16c55104e81c989fd1cfb3764
|
Set the root context as current context during startup
|
src/asphalt/core/runner.py
|
src/asphalt/core/runner.py
|
from __future__ import annotations
__all__ = ("run_application",)
import asyncio
import signal
import sys
from asyncio.events import AbstractEventLoop
from concurrent.futures import ThreadPoolExecutor
from logging import INFO, Logger, basicConfig, getLogger, shutdown
from logging.config import dictConfig
from typing import Any, Dict, Optional, Union, cast
from typeguard import check_argument_types
from asphalt.core.component import Component, component_types
from asphalt.core.context import Context
from asphalt.core.utils import PluginContainer, qualified_name
policies = PluginContainer("asphalt.core.event_loop_policies")
def sigterm_handler(logger: Logger, event_loop: AbstractEventLoop) -> None:
if event_loop.is_running():
logger.info("Received SIGTERM")
event_loop.stop()
def run_application(
component: Union[Component, Dict[str, Any]],
*,
event_loop_policy: str = None,
max_threads: int = None,
logging: Union[Dict[str, Any], int, None] = INFO,
start_timeout: Union[int, float, None] = 10,
):
"""
Configure logging and start the given root component in the default asyncio event loop.
Assuming the root component was started successfully, the event loop will continue running
until the process is terminated.
Initializes the logging system first based on the value of ``logging``:
* If the value is a dictionary, it is passed to :func:`logging.config.dictConfig` as
argument.
* If the value is an integer, it is passed to :func:`logging.basicConfig` as the logging
level.
* If the value is ``None``, logging setup is skipped entirely.
By default, the logging system is initialized using :func:`~logging.basicConfig` using the
``INFO`` logging level.
The default executor in the event loop is replaced with a new
:class:`~concurrent.futures.ThreadPoolExecutor` where the maximum number of threads is set to
the value of ``max_threads`` or, if omitted, the default value of
:class:`~concurrent.futures.ThreadPoolExecutor`.
:param component: the root component (either a component instance or a configuration dictionary
where the special ``type`` key is either a component class or a ``module:varname``
reference to one)
:param event_loop_policy: entry point name (from the ``asphalt.core.event_loop_policies``
namespace) of an alternate event loop policy (or a module:varname reference to one)
:param max_threads: the maximum number of worker threads in the default thread pool executor
(the default value depends on the event loop implementation)
:param logging: a logging configuration dictionary, :ref:`logging level <python:levels>` or
``None``
:param start_timeout: seconds to wait for the root component (and its subcomponents) to start
up before giving up (``None`` = wait forever)
"""
assert check_argument_types()
# Configure the logging system
if isinstance(logging, dict):
dictConfig(logging)
elif isinstance(logging, int):
basicConfig(level=logging)
# Inform the user whether -O or PYTHONOPTIMIZE was set when Python was launched
logger = getLogger(__name__)
logger.info("Running in %s mode", "development" if __debug__ else "production")
# Switch to an alternate event loop policy if one was provided
if event_loop_policy:
create_policy = policies.resolve(event_loop_policy)
policy = create_policy()
asyncio.set_event_loop_policy(policy)
logger.info("Switched event loop policy to %s", qualified_name(policy))
# Assign a new default executor with the given max worker thread limit if one was provided
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
try:
if max_threads is not None:
event_loop.set_default_executor(ThreadPoolExecutor(max_threads))
logger.info(
"Installed a new thread pool executor with max_workers=%d", max_threads
)
# Instantiate the root component if a dict was given
if isinstance(component, dict):
component = cast(Component, component_types.create_object(**component))
logger.info("Starting application")
context = Context()
exception: Optional[BaseException] = None
exit_code = 0
# Start the root component
try:
coro = asyncio.wait_for(component.start(context), start_timeout)
event_loop.run_until_complete(coro)
except asyncio.TimeoutError as e:
exception = e
logger.error("Timeout waiting for the root component to start")
exit_code = 1
except Exception as e:
exception = e
logger.exception("Error during application startup")
exit_code = 1
else:
logger.info("Application started")
# Add a signal handler to gracefully deal with SIGTERM
try:
event_loop.add_signal_handler(
signal.SIGTERM, sigterm_handler, logger, event_loop
)
except NotImplementedError:
pass # Windows does not support signals very well
# Finally, run the event loop until the process is terminated or Ctrl+C is pressed
try:
event_loop.run_forever()
except KeyboardInterrupt:
pass
except SystemExit as e:
exit_code = e.code
# Close the root context
logger.info("Stopping application")
event_loop.run_until_complete(context.close(exception))
# Shut down leftover async generators
event_loop.run_until_complete(event_loop.shutdown_asyncgens())
finally:
# Finally, close the event loop itself
event_loop.close()
asyncio.set_event_loop(None)
logger.info("Application stopped")
# Shut down the logging system
shutdown()
if exit_code:
sys.exit(exit_code)
|
Python
| 0.001053
|
@@ -500,16 +500,34 @@
Context
+, _current_context
%0Afrom as
@@ -4435,16 +4435,62 @@
mponent%0A
+ token = _current_context.set(context)%0A
@@ -5610,16 +5610,75 @@
= e.code
+%0A finally:%0A _current_context.reset(token)
%0A%0A
|
7f2b3d91550fd6af46ee10e6c68c8633408b12ed
|
Revert revert to properly fix #125 without cruft. Sigh.
|
director/scripts/create_dev_projects.py
|
director/scripts/create_dev_projects.py
|
"""
Create projects for the development database
"""
from django.conf import settings
from accounts.models import Account
from projects.models import Project
def run(*args):
# Ensure that this is only used in development
assert settings.DEBUG
# Assumes that there are at least 3 accounts
accounts = Account.objects.all()
Project.objects.create(
account=accounts[0],
public=True,
name='The project name',
description='''
The project description. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure
dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non
roident, sunt in culpa qui officia deserunt mollit anim id est laborum.
'''.strip()
)
Project.objects.create(
account=accounts[1],
public=True
)
Project.objects.create(
account=accounts[2],
public=False
)
|
Python
| 0
|
@@ -116,16 +116,33 @@
Account
+, AccountUserRole
%0Afrom pr
@@ -172,16 +172,140 @@
oject%0A%0A%0A
+def random_account_member(account):%0A return AccountUserRole.objects.filter(account=account).order_by('?').first().user%0A%0A%0A
def run(
@@ -534,16 +534,68 @@
nts%5B0%5D,%0A
+ creator=random_account_member(accounts%5B0%5D),%0A
@@ -1221,16 +1221,68 @@
nts%5B1%5D,%0A
+ creator=random_account_member(accounts%5B1%5D),%0A
@@ -1357,16 +1357,68 @@
nts%5B2%5D,%0A
+ creator=random_account_member(accounts%5B2%5D),%0A
|
0282c7eaecb32b736592c84cda1f7520c130c676
|
Update basic tests
|
test/basic.py
|
test/basic.py
|
import unittest
from anser import Anser
class BasicTest(unittest.TestCase):
def setUp(self):
pass
def test_creation(self):
server = Anser(__file__)
self.assertEquals(server.name, __file__)
def test_creation_explicit_no_debug(self):
server = Anser(__file__, debug=False)
self.assertFalse(server.debug)
def test_creation_implicit_no_debug(self):
server = Anser(__file__)
self.assertFalse(server.debug)
def test_creation_explicit_debug(self):
server = Anser(__file__, debug=True)
self.assertTrue(server.debug)
def test_add_action(self):
server = Anser(__file__)
@server.action('default')
def dummy_action(message, address):
pass
self.assertTrue(dummy_action in server.actions)
def test_receive(self):
pass
def test_send(self):
pass
if __name__ == '__main__':
unittest.main()
|
Python
| 0
|
@@ -32,16 +32,24 @@
rt Anser
+, Client
%0A%0A%0Aclass
@@ -54,16 +54,21 @@
ss Basic
+Anser
Test(uni
@@ -89,44 +89,8 @@
):%0A%0A
- def setUp(self):%0A pass%0A%0A%0A
@@ -197,17 +197,16 @@
ile__)%0A%0A
-%0A
def
@@ -322,33 +322,32 @@
(server.debug)%0A%0A
-%0A
def test_cre
@@ -442,33 +442,32 @@
(server.debug)%0A%0A
-%0A
def test_cre
@@ -578,17 +578,16 @@
debug)%0A%0A
-%0A
def
@@ -795,16 +795,59 @@
ions)%0A%0A%0A
+class BasicClientTest(unittest.TestCase):%0A%0A
def
@@ -855,75 +855,430 @@
est_
+c
re
-ceive(self):%0A pass%0A%0A%0A def test_send(self):%0A pass
+ation(self):%0A client = Client('10.0.0.1', 4000)%0A self.assertEquals(client.address, '10.0.0.1')%0A self.assertEquals(client.port, 4000)%0A%0A def test_creation_implicit_no_debug(self):%0A client = Client('10.0.0.1', 4000)%0A self.assertFalse(client.debug)%0A%0A def test_creation_explicit_debug(self):%0A client = Client('10.0.0.1', 4000, debug=True)%0A self.assertTrue(client.debug)
%0A%0A%0Ai
@@ -1322,9 +1322,8 @@
t.main()
-%0A
|
0fb7a7197d46d52591b4c5ae0767ef98aeb14d94
|
fix a command line to run a script
|
src/pymyinstall/installhelper/install_venv_helper.py
|
src/pymyinstall/installhelper/install_venv_helper.py
|
"""
@file
@brief Helpers for virtualenv
"""
from __future__ import print_function
import os
import sys
from .install_cmd_helper import run_cmd
class VirtualEnvError(Exception):
"""
exception raised by the function implemented in this file
"""
pass
def build_venv_cmd(params, posparams):
"""
builds the command line for virtual env
@param params dictionary of parameters
@param posparams positional arguments
@return string
"""
import venv
dir(venv)
exe = sys.executable
cmd = [exe, "-m", "venv"]
for k, v in params.items():
if v is None:
cmd.append("--" + k)
else:
cmd.append("--" + k + "=" + v)
cmd.extend(posparams)
return " ".join(cmd)
def create_virtual_env(where, symlinks=False, system_site_packages=False,
clear=True, packages=None, fLOG=print,
temp_folder=None):
"""
.. index:: virtual environment
create a virtual environment
@param where location of this virtual environment
@param symlinks attempt to symlink rather than copy
@param system_site_packages Give the virtual environment access to the system site-packages dir
@param clear Delete the environment directory if it already exists.
If not specified and the directory exists, an error is raised.
@param packages list of packages to install (it will install module
`pymyinstall <>`_).
@param fLOG logging function
@param temp_folder temporary folder (to download module if needed), by default ``<where>/download``
@return stand output
@example(Create a virtual environment)
The following example creates a virtual environment.
Packages can be added by specifying the parameter *package*.
@code
from pyquickhelper.pycode import create_virtual_env
fold = "my_env"
if not os.path.exists(fold):
os.mkdir(fold)
create_virtual_env(fold)
@endcode
@endexample
"""
fLOG("create virtual environment at:", where)
params = {}
if symlinks:
params["symlinks"] = None
if system_site_packages:
params["system-site-packages"] = None
if clear:
params["clear"] = None
cmd = build_venv_cmd(params, [where])
out, err = run_cmd(cmd, wait=True, fLOG=fLOG)
if len(err) > 0:
raise VirtualEnvError(
"unable to create virtual environement at {2}\nCMD:\n{3}\nOUT:\n{0}\nERR:\n{1}".format(out, err, where, cmd))
if sys.platform.startswith("win"):
scripts = os.path.join(where, "Scripts")
else:
scripts = os.path.join(where, "bin")
if not os.path.exists(scripts):
files = "\n ".join(os.listdir(where))
raise FileNotFoundError(
"unable to find {0}, content:\n {1}".format(scripts, files))
in_scripts = os.listdir(scripts)
pips = [_ for _ in in_scripts if _.startswith("pip")]
if len(pips) == 0:
out += venv_install(where, "pip", fLOG=fLOG,
temp_folder=temp_folder)
in_scripts = os.listdir(scripts)
pips = [_ for _ in in_scripts if _.startswith("pip")]
if len(pips) == 0:
raise FileNotFoundError(
"unable to find pip in {0}, content:\n {1}".format(scripts, in_scripts))
if packages is not None and len(packages) > 0:
fLOG("install packages in:", where)
packages = [_ for _ in packages if _ != "pymyinstall" and _ != "pip"]
if len(packages) > 0:
out += venv_install(where, packages, fLOG=fLOG,
temp_folder=temp_folder)
return out
def venv_install(venv, packages, fLOG=print, temp_folder=None):
"""
install a package or a list of packages in a virtual environment
@param venv location of the virtual environment
@param packages a package (str) or a list of packages(list[str])
@param fLOG logging function
@param temp_folder temporary folder (to download module if needed), by default ``<where>/download``
@return standard output
"""
if temp_folder is None:
temp_folder = os.path.join(venv, "download")
if isinstance(packages, str):
packages = [packages]
if packages == "pip" or packages == ["pip"]:
from .get_pip import __file__ as pip_loc
ppath = os.path.abspath(pip_loc.replace(".pyc", ".py"))
script = ["-m", ppath]
return run_venv_script(venv, script, fLOG=fLOG, is_cmd=True)
else:
p = os.path.normpath(os.path.join(
os.path.abspath(os.path.dirname(__file__)), "..", ".."))
l = ','.join("'{0}'".format(_) for _ in packages)
script = ["import sys",
"sys.path.append('{0}')".format(p.replace("\\", "\\\\")),
"import pymyinstall",
"ps=[{0}]".format(l),
"t='{0}'".format(temp_folder.replace("\\", "\\\\")),
"pymyinstall.packaged.install_all(temp_folder=t,list_module=ps)"]
return run_venv_script(venv, "\n".join(script), fLOG=fLOG)
def run_venv_script(venv, script, fLOG=print, file=False, is_cmd=False):
"""
run a script on a vritual environment (the script should be simple
@param venv virtual environment
@param script script as a string (not a file)
@param fLOG logging function
@param file is script a file or a string to execute
@param is_cmd if True, script is a command line to run (as a list) for python executable
@return output
"""
if sys.platform.startswith("win"):
exe = os.path.join(venv, "Scripts", "python")
else:
exe = os.path.join(venv, "bin", "python")
if is_cmd:
cmd = " ".join(script)
out, err = run_cmd(cmd, wait=True, fLOG=fLOG)
if len(err) > 0:
raise VirtualEnvError(
"unable to run cmd at {2}\nCMD:\n{3}\nOUT:\n{0}\nERR:\n{1}".format(out, err, venv, cmd))
return out
else:
script = ";".join(script.split("\n"))
if file:
if not os.path.exists(script):
raise FileNotFoundError(script)
cmd = " ".join([exe, "-u", '"{0}"'.format(script)])
else:
cmd = " ".join([exe, "-u", "-c", '"{0}"'.format(script)])
out, err = run_cmd(cmd, wait=True, fLOG=fLOG)
if len(err) > 0:
raise VirtualEnvError(
"unable to run script at {2}\nCMD:\n{3}\nOUT:\n{0}\nERR:\n{1}".format(out, err, venv, cmd))
return out
|
Python
| 0.000113
|
@@ -4731,17 +4731,17 @@
pt = %5B%22-
-m
+u
%22, ppath
@@ -6110,24 +6110,32 @@
= %22 %22.join(
+%5Bexe%5D +
script)%0A
|
db4d5263c38e95ad8c2e253512c563ea97b8772f
|
Fix adduser script first line
|
src/adduser.py
|
src/adduser.py
|
#!/usr/bin/env python
# Licensed under the Apache 2.0 License
'''
Add a user to the database
Usage: adduser username password
The environment variable LIGHTS_WEB_DATABASE must be set to the path of the database
Created on Nov 13, 2014
@author: Gary O'Neall
'''
import sys
import sqlite3
from hashlib import sha256
from os import path
DB_VAR = '$LIGHTS_WEB_DATABASE'
print DB_VAR
DB_PATH = path.expandvars(DB_VAR)
print DB_VAR + ':' + DB_PATH
def usage():
''' Pprints the usage to the console
'''
print "Usage:"
print "adduser username password"
if __name__ == '__main__':
print 'hello'
if len(sys.argv) != 3:
usage()
sys.exit(1)
username = sys.argv[1].strip()
password = sys.argv[2].strip()
password_hash = sha256(password)
password_dig = password_hash.hexdigest()
if not path.isfile(DB_PATH):
print "Database is not initialized"
sys.exit(1)
con = sqlite3.connect(DB_PATH)
try:
cursor = con.execute('select id from users where username=?', [username])
row = cursor.fetchone()
if row:
print "User already exists"
sys.exit(1)
con.execute('insert into users (username, password) values (?, ?)', [username, password_dig])
print 'user added'
except Exception as ex:
print "Error updating database: "+str(ex)
finally:
con.commit()
con.close()
|
Python
| 0.000002
|
@@ -14,17 +14,16 @@
v python
-%0D
%0A# Licen
@@ -381,21 +381,8 @@
SE'%0A
-print DB_VAR%0A
DB_P
@@ -414,37 +414,8 @@
VAR)
-%0Aprint DB_VAR + ':' + DB_PATH
%0A%0D%0Ad
@@ -575,26 +575,8 @@
':%0D%0A
- print 'hello'%0A
|
a04d97bd9bb62d15201d8cadd1fd3b24980d3507
|
Fix installation path generation for configuration file
|
t2u-driver-installer.py
|
t2u-driver-installer.py
|
import os
PATH = os.getcwd()
HOME = os.getenv('HOME')
INSTALL_FILES = PATH+'/driver-files'
DEV_DIR = HOME+'/test-install'
PROD_DIR = '/etc'
BIN_DIR = '/usr/bin/'
print(('*'*25)+'\n')
print()
def take_input():
i = input("Please, disconnect all devices you're trying to install and press [I]: ")
return i
while(take_input()!="i"):
take_input()
else:
# Install files
os.system("cd {} && make clean && make && sudo make install".format(INSTALL_FILES))
# Comprobar si existe el directorio de destino de la configuracion y si no existe crearlo
print(os.path)
if os.path.isdir(DEV_DIR + '/Wireless/RT2870STA/'):
pass
else:
os.mkdir(DEV_DIR + '/Wireless/RT2870STA/')
# Copiar el archivo de configuracion
os.system("sudo cp {}/RT2870STA.dat {}/Wireless/RT2870STA/RT2870STA.dat".format(INSTALL_FILES, DEV_DIR))
# Instalar el modulo del driver
os.system("cd {}/os/linux/ && sudo insmod mt7650u_sta.ko".format(INSTALL_FILES))
# Generar el script que debería correrse al inicio del sistema para levantar la conexion
os.system("sudo cp {0}/t2u-driver {1}/t2u-driver && sudo chmod +x {1}/t2u-driver".format(PATH, BIN_DIR))
# Avisar de las opciones para deshabilitar o levantar el adaptador desde el programa LOCAL_BIN
print("*"*25+"\n\nYour computer should be restarted now.\nPlease close all running programs and restart manually when you're done.\nAn executable will be installed to enable or disable the adapter.\nJust run in your terminal '$ sudo t2u-driver'.")
|
Python
| 0
|
@@ -477,94 +477,43 @@
# C
-omprobar si existe el directorio de destino de la configuracion y si no existe crearlo
+heck for existing installation dirs
%0A
@@ -545,27 +545,28 @@
.path.isdir(
-DEV
+PROD
_DIR + '/Wir
@@ -570,27 +570,16 @@
Wireless
-/RT2870STA/
'):%0A
@@ -614,19 +614,20 @@
s.mkdir(
-DEV
+PROD
_DIR + '
@@ -653,35 +653,80 @@
/')%0A
-%0A # Copiar el archivo de
+ os.mkdir(PROD_DIR + '/Wireless/RT2870STA/')%0A %0A%0A # Copy
con
@@ -731,20 +731,25 @@
onfigura
-c
+t
ion
+ file
%0A os.
@@ -840,19 +840,20 @@
_FILES,
-DEV
+PROD
_DIR))%0A%0A
@@ -868,23 +868,8 @@
stal
-ar el modulo de
l dr
@@ -969,94 +969,67 @@
#
-Generar el script que deber%C3%ADa correrse al inicio del si
+Script generator for running t2u-driver as a sy
stem
-a
p
-ara levantar la conexion
+rogram
%0A
@@ -1145,100 +1145,23 @@
#
-Avisar de las opciones para deshabilitar o levantar el adaptador desde el programa LOCAL_BIN
+Restart warning
%0A
|
3095124fd2484605e49a30dd43778f94e292e0c3
|
fix 'logger instance not found' fix : missing return on send_suback
|
nyamuk/bee.py
|
nyamuk/bee.py
|
'''
@author : Iwan Budi Kusnanto <iwan.b.kusnanto@gmail.com>
'''
import sys
import nyamuk
from MV import MV
from mqtt_pkt import MqttPkt
class Bee(nyamuk.Nyamuk):
def __init__(self, sock, addr, conn_mgr, subs_mgr):
nyamuk.Nyamuk.__init__(self)
#from nyamuk
self.sock = sock
self.bridge = None
self.msgs = None
self.acl_list = None
self.listener = None
self.addr = addr
self.cm = conn_mgr
self.sm = subs_mgr #subscription manager attached to this bee
def packet_handle(self):
"""Packet Handling Dispatcher."""
cmd = self.in_packet.command & 0xF0
if cmd == MV.CMD_CONNECT:
return self.handle_connect()
elif cmd == MV.CMD_SUBSCRIBE:
return self.handle_subscribe()
else:
print "Unsupport CMD = ", cmd
return MV.ERR_NOT_SUPPORTED
def handle_connect(self):
"""Handle CONNECT command."""
print "Connecting client = ", self.addr
if self.state != MV.CS_NEW:
self.disconnect()
return MV.ERR_PROTOCOL
rc, ba = self.in_packet.read_string()
if rc != MV.ERR_SUCCESS:
self.disconnect()
return 1
protocol_name = ba.decode()
#Protocol Name
if protocol_name != MV.PROTOCOL_NAME:
print "INVALID Protocol in Connect from ", self.addr
self.disconnect()
return MV.ERR_PROTOCOL
#Protocol Version
rc, protocol_version = self.in_packet.read_byte()
if rc != MV.ERR_SUCCESS or protocol_version != MV.PROTOCOL_VERSION:
print "INVALID PROTOCOL VERSIOON"
self.disconnect()
return MV.ERR_PROTOCOL
#Connect Flags
rc, connect_flags = self.in_packet.read_byte()
if rc != MV.ERR_SUCCESS:
self.disconnect()
return 1
clean_session = connect_flags & 0x02
will = connect_flags & 0x04
will_qos = (connect_flags & 0x18) >> 3
will_retain = connect_flags & 0x20
password_flag = connect_flags & 0x40
username_flag = connect_flags & 0x80
rc, self.keepalive = self.in_packet.read_uint16()
if rc != MV.ERR_SUCCESS:
self.disconnect()
return 1
rc, client_id = self.in_packet.read_string()
if rc != MV.ERR_SUCCESS:
self.disconnect()
return 1
#client ID prefixes check
if will != 0:
print "WILL Unsupported "
sys.exit(-1)
if username_flag != 0:
print "username Unsupported"
sys.exit(-1)
self.id = client_id.decode()
self.clean_session = clean_session
if self.will is not None:
print "WILL Unsupported "
sys.exit(-1)
#ACL
self.cm.add(self)
print "New client connected from ", self.addr
return self.send_connack(0)
def handle_subscribe(self):
qos = 0
payload = bytearray(0)
payloadlen = 0
print "Handle subscribe from : ", self.id, " at ", self.addr
rc, mid = self.in_packet.read_uint16()
if rc != MV.ERR_SUCCESS:
return 1
while self.in_packet.pos < self.in_packet.remaining_length:
rc, ba_sub = self.in_packet.read_string()
if rc != MV.ERR_SUCCESS:
return 1
if len(ba_sub) == 0:
print "Empty Subscription from ", self.id, ". Disconnecting.."
sub = ba_sub.decode()
rc, qos = self.in_packet.read_byte()
if rc != MV.ERR_SUCCESS:
return 1
if qos > 2 or qos < 0:
#TODO
sys.exit(-1)
#fix subtopic TODO
rc = self.sm.add(self, sub, qos)
if rc == MV.ERR_SUCCESS:
sys.exit(-1)
payload.append(qos)
payloadlen += 1
rc = self.send_suback(mid, payloadlen, payload)
def disconnect(self):
print "[mqtt3_context_disconnect]Unimplemented Func"
self.socket_close()
sys.exit(-1)
def send_connack(self, result):
"""Send CONNACK command to client."""
pkt = MqttPkt()
pkt.command = MV.CMD_CONNACK
pkt.remaining_length = 2
pkt.alloc()
pkt.payload[pkt.pos + 0] = 0
pkt.payload[pkt.pos + 1] = result
return self.packet_queue(pkt)
def send_suback(self, mid, payloadlen, payload):
pkt = MqttPkt()
pkt.command = MV.CMD_SUBACK
pkt.remaining_length = 2 + payloadlen
rc = pkt.alloc()
if rc != MV.ERR_SUCCESS:
return rc
pkt.write_uint16(mid)
if payloadlen > 0:
pkt.write_bytes(payload, payloadlen)
return self.packet_queue(pkt)
|
Python
| 0.000001
|
@@ -211,16 +211,24 @@
subs_mgr
+, logger
):%0A
@@ -459,32 +459,61 @@
= addr%0A %0A
+ self.logger = logger%0A
self.cm
@@ -595,24 +595,100 @@
is bee%0A %0A
+ def loop(self, timeout = 1):%0A return self.packet_read()%0A %0A
def pack
@@ -3246,32 +3246,98 @@
lf.addr%0A
+self.logger.logger.info(%22New client connected from %25s%22, self.addr)
%0A return
@@ -4576,32 +4576,55 @@
yload)%0A %0A
+ return rc%0A %0A
def disconne
@@ -4645,60 +4645,70 @@
-print %22%5Bmqtt3_context_disconnect%5DUnimplemented Func%22
+self.logger.logger.info(%22Disconnect the Client : %25s%22, self.id)
%0A
|
90b1aebe4b67ff9f221aee3b0c668f658d915537
|
Update bottlespin.py
|
bottlespin/bottlespin.py
|
bottlespin/bottlespin.py
|
import discord
from discord.ext import commands
from random import choice
class Bottlespin:
"""Spins a bottle and lands on a random user."""
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True, no_pm=True, alias=["bottlespin"])
async def spin(self, ctx, role):
"""Spin the bottle"""
await self.bot.say(str(role))
roles = ctx.message.server.roles
if role in roles:
await self.bot.say(str(role))
await self.bot.say(str(roles))
author = ctx.message.author
server = ctx.message.server
if len(server.members) < 2:
await self.bot.say("`Not enough people are around to spin the bottle`")
return
if role in roles:
roleexist = True
else:
await self.bot.say("`{} is not a exising role`".format(role))
return
if roleexist:
target = [m for m in server.members if m != author and role in [
s.name for s in m.roles] and str(m.status) == "online" or str(m.status) == "idle"]
else:
target = [m for m in server.members if m != author and str(
m.status) == "online" or str(m.status) == "idle"]
if not target:
if role:
await self.bot.say("`Sorry I couldnt find anyone to point the bottle at with the role {}`".format(role))
else:
await self.bot.say("`Sorry I couldnt find anyone to point the bottle at`")
return
else:
target = choice(list(target))
await self.bot.say("`{0.display_name}#{0.discriminator} spinned the bottle and it landed on {1.display_name}#{1.discriminator}`".format(author, target))
def setup(bot):
n = Bottlespin(bot)
bot.add_cog(n)
|
Python
| 0
|
@@ -350,37 +350,40 @@
-await self.bot.say(str(
+roles = ctx.message.server.
role
-))
+s
%0A
@@ -383,36 +383,53 @@
les%0A role
-s
=
+discord.utils.get(
ctx.message.serv
@@ -428,32 +428,83 @@
age.server.roles
+, name = role%0A await self.bot.say(str(role))
%0A if role
|
fb6aa002e13a1d1205da28b20d419419067117f6
|
Implement basic genome crossover (#44, #36)
|
xentica/tools/genetics.py
|
xentica/tools/genetics.py
|
"""A collection of functions allowing genetics manipulations."""
def genome_crossover(*genomes):
"""
Crossover given genomes in stochastic way.
:param genomes: A list of genomes (integers) to crossover
:returns: Single integer, a resulting genome.
"""
raise NotImplementedError
|
Python
| 0.000011
|
@@ -63,245 +63,972 @@
%22%22%0A%0A
-%0Adef genome_crossover(*genomes):%0A %22%22%22%0A Crossover given genomes in stochastic way.%0A%0A :param genomes: A list of genomes (integers) to crossover%0A%0A :returns: Single integer, a resulting genome.%0A%0A %22%22%22%0A raise NotImplementedError
+from xentica import core%0Afrom xentica.tools import xmath%0A%0A%0Adef genome_crossover(state, num_genes, *genomes, rng_name=%22rng%22):%0A %22%22%22%0A Crossover given genomes in stochastic way.%0A%0A :param state:%0A A container holding model's properties.%0A :param num_genes:%0A Genome length, assuming all genomes has same number of genes.%0A :param genomes:%0A A list of genomes (integers) to crossover%0A :param rng_name:%0A Name of %60%60RandomProperty%60%60.%0A%0A :returns: Single integer, a resulting genome.%0A%0A %22%22%22%0A gene_choose = core.IntegerVariable()%0A new_genome = core.IntegerVariable()%0A for gene in range(num_genes):%0A gene_choose *= 0%0A for i, genome in enumerate(genomes):%0A gene_choose += ((genome %3E%3E gene) & 1) %3C%3C i%0A rand_val = getattr(state, rng_name).uniform%0A winner_gene = xmath.int(rand_val * (len(genomes) + 1))%0A new_genome += ((gene_choose %3E%3E winner_gene) & 1) %3C%3C gene%0A return new_genome
%0A
|
9efba71852ae06aaf7e0775fd7719a2d8cb1a26b
|
remove repair geometry until Esri fixes the bug in 2.6
|
scripts/nightly/update_fgdb.py
|
scripts/nightly/update_fgdb.py
|
# copies data from SGID to the app and makes necessary optimizations
import logging
import re
from collections import namedtuple
from os import path
import arcpy
import settings
import spreadsheet
from build_json import parse_fields
from forklift.exceptions import ValidationException
from settings import fieldnames
from update_sgid import period_replacement
commonFields = [fieldnames.ID,
fieldnames.NAME,
fieldnames.ADDRESS,
fieldnames.CITY,
fieldnames.TYPE,
fieldnames.ENVIROAPPLABEL,
fieldnames.ENVIROAPPSYMBOL]
logger = logging.getLogger('forklift')
field_type_mappings = {'Integer': 'LONG',
'String': 'TEXT',
'SmallInteger': 'SHORT'}
def post_process_dataset(dataset):
config = get_spreadsheet_config_from_dataset(dataset)
if commonFields[0] in list(config.keys()):
#: make sure that it has the five main fields
upper_fields = [x.name.upper() for x in arcpy.ListFields(dataset)]
for fld in commonFields:
if fld not in upper_fields:
logger.info('{} not found. Adding to {}'.format(fld, dataset))
# get mapped field properties
if not config[fld] == 'n/a':
try:
mappedFld = arcpy.ListFields(dataset, config[fld])[0]
except IndexError:
raise Exception('Could not find {} in {}'.format(config[fld], path.basename(dataset)))
else:
mappedFld = namedtuple('literal', 'precision scale length type')(**{'precision': 0,
'scale': 0,
'length': 50,
'type': 'String'})
arcpy.AddField_management(dataset, fld, 'TEXT', field_length=255)
if fld == fieldnames.ID:
unique = 'UNIQUE'
else:
unique = 'NON_UNIQUE'
arcpy.AddIndex_management(dataset, fld, fld + '_index', unique)
# calc field
expression = config[fld]
uses_layer = False
if not expression == 'n/a':
try:
mappedFld = arcpy.ListFields(dataset, config[fld])[0]
except IndexError:
raise Exception('Could not find {} in {}'.format(config[fld], path.basename(dataset)))
if mappedFld.type != 'String':
expression = 'str(int(!{}!))'.format(expression)
else:
expression = '!{}!'.format(expression)
calc_layer = arcpy.management.MakeFeatureLayer(dataset,
'calc-layer',
'{} IS NOT NULL'.format(config[fld]))
uses_layer = True
else:
calc_layer = dataset
expression = '"{}"'.format(expression)
arcpy.CalculateField_management(calc_layer, fld, expression, 'PYTHON')
if uses_layer:
arcpy.management.Delete(calc_layer)
apply_coded_values(dataset, config[fieldnames.codedValues])
# scrub out any empty geometries or empty ID's
#: note: arcpy.DeleteFeature_management(lyr) was leaving a weird schema lock even after deleting the layer
arcpy.RepairGeometry_management(dataset)
with arcpy.da.Editor(path.dirname(dataset)):
with arcpy.da.UpdateCursor(dataset, 'OID@', '{} IS NULL'.format(fieldnames.ID)) as ucur:
for _ in ucur:
ucur.deleteRow()
def create_relationship_classes(staging, test_layer):
for config in spreadsheet.get_relationship_classes():
# create relationship class if missing
rcName = config[fieldnames.relationshipName]
rcPath = path.join(staging, settings.fgd, rcName)
if test_layer is not None and config[fieldnames.parentDatasetName] != test_layer.split('.')[-1]:
continue
if not arcpy.Exists(rcPath):
logger.info('Creating %s', rcPath)
origin = path.join(staging, settings.fgd, config[fieldnames.parentDatasetName])
destination = path.join(staging, settings.fgd, config[fieldnames.relatedTableName])
arcpy.CreateRelationshipClass_management(origin,
destination,
rcPath,
'SIMPLE',
config[fieldnames.relatedTableName],
config[fieldnames.parentDatasetName],
'BOTH',
'ONE_TO_MANY',
'NONE',
config[fieldnames.primaryKey],
config[fieldnames.foreignKey])
def get_spreadsheet_config_from_dataset(dataset):
name = path.basename(dataset)
for config in spreadsheet.get_query_layers() + spreadsheet.get_related_tables():
if config[fieldnames.sgidName].split('.')[-1] == name:
return config
raise Exception('{} not found in spreadsheet!'.format(name))
def validate_crate(crate):
dataFields = [f.name for f in arcpy.ListFields(crate.source)]
config = get_spreadsheet_config_from_dataset(crate.destination)
msg = '{}: Could not find matches in the source data for the following fields from the query layers spreadsheet: {}'
dataFields = set(dataFields)
try:
additionalFields = [config[f] for f in commonFields]
except Exception:
#: related tables don't have the additional fields
additionalFields = []
spreadsheetFields = set([f[0] for f in parse_fields(config[fieldnames.fields])] + additionalFields) - set(['n/a'])
invalidFields = spreadsheetFields - dataFields - set(commonFields)
if len(invalidFields) > 0:
raise ValidationException(msg.format(crate.destination_name, ', '.join(invalidFields)))
return True
def apply_coded_values(fc, codedValuesTxt):
if len(codedValuesTxt.strip()) == 0:
return
for valuesForField in codedValuesTxt.split(';'):
field_name = re.search(r'(^\S*)\:', valuesForField).group(1)
codes = re.findall(r'(\S*) \(.*?\),?', valuesForField)
descriptions = re.findall(r'\S* \((.*?)\),?', valuesForField)
logger.info('applying coded values for {} field'.format(field_name))
layer = arcpy.MakeFeatureLayer_management(fc)
for code, desc in zip(codes, descriptions):
where = '{} = \'{}\''.format(field_name, code)
arcpy.SelectLayerByAttribute_management(layer, where_clause=where)
arcpy.CalculateField_management(fc, field_name, '"{}"'.format(desc), 'PYTHON')
arcpy.Delete_management(layer)
|
Python
| 0
|
@@ -3655,23 +3655,125 @@
e layer%0A
+%0A
+ #: a bug in Pro 2.5.1 prevents this line from running, but will supposedly be fixed in 2.6%0A #
arcpy.R
|
c0d79ba0420f6e0176e98266a02c60b1f53f4a93
|
apply same simplification to setfield
|
obj_update.py
|
obj_update.py
|
from __future__ import unicode_literals
import logging
import sys
# for python 2/3 compatibility
text_type = unicode if sys.version_info[0] < 3 else str
logger = logging.getLogger('obj_update')
def setfield(obj, fieldname, value):
"""Fancy setattr with debugging."""
old = getattr(obj, fieldname)
if old is None and value is None:
changed = False
elif old is None and value is not None:
changed = True
else:
changed = text_type(old) != text_type(value)
if changed:
setattr(obj, fieldname, value)
if not hasattr(obj, '_is_dirty'):
obj._is_dirty = []
obj._dirty_fields = []
# obj._is_dirty.append(u'[%s %s->%s]' % (fieldname, old, value))
obj._is_dirty.append(fieldname)
obj._dirty_fields.append(fieldname)
def set_foreign_field(obj, fieldname, value):
"""Fancy setattr with debugging for foreign fields."""
old = getattr(obj, fieldname)
old_repr = old if old is None else old.pk
new_repr = value if value is None else value.pk
if old_repr != new_repr:
setattr(obj, fieldname, value)
if not hasattr(obj, '_is_dirty'):
obj._is_dirty = []
obj._dirty_fields = []
obj._is_dirty.append('[%s %s->%s]' % (fieldname, old_repr, new_repr))
obj._dirty_fields.append(fieldname)
def update(obj, data):
"""
Fancy way to update `obj` with `data` dict.
Returns True if data changed and was saved.
"""
for field_name, value in data.items():
# is_relation is Django 1.8 only
if obj._meta.get_field(field_name).is_relation:
set_foreign_field(obj, field_name, value)
else:
setfield(obj, field_name, value)
if getattr(obj, '_is_dirty', None):
logger.debug(u''.join(obj._is_dirty))
obj.save(update_fields=obj._dirty_fields)
del obj._is_dirty
del obj._dirty_fields
return True
|
Python
| 0.000002
|
@@ -312,90 +312,81 @@
-if old is None and value is None:%0A changed = False%0A elif old is None and
+old_repr = old if old is None else text_type(old)%0A new_repr = value if
val
@@ -395,86 +395,17 @@
is
-not
None
-:%0A changed = True%0A else:%0A changed = text_type(old) !=
+ else
tex
@@ -425,23 +425,36 @@
%0A if
-changed
+old_repr != new_repr
:%0A
@@ -601,18 +601,16 @@
%0A
- #
obj._is
@@ -659,56 +659,24 @@
old
-, value))%0A obj._is_dirty.append(fieldname
+_repr, new_repr)
)%0A
|
e08e50bcaa04f11d6cb3e015f19e704203939cff
|
fix issue#28
|
zaifbot/moving_average.py
|
zaifbot/moving_average.py
|
# moving_average.py
import time
from zaifapi import ZaifPublicApi
from db import Tradelogs, MovingAverage
import numpy as np
PERIOD_SECS = {'1d': 86400, '12h': 43200, '8h': 28800, '4h': 14400,
'1h': 3600, '1m': 60, '5m': 300, '15m': 900, '30m': 1800}
def _check_tradelogs(currency_pair, period, length, start_time, end_time, count):
tradelogs = Tradelogs(currency_pair, period)
# create tradelogs table if not exsit
tradelogs.create_table()
# get tradelogs count
tradelogs_count = tradelogs.get_tradelogs_count(end_time, start_time)
# update tradelogs from API if some tradelogs are missing
if tradelogs_count < (count + length - 1):
public_api = ZaifPublicApi()
tradelogs_api_result = public_api.everything('ohlc_data', currency_pair, {
'period': period, 'count': count + length - 1, 'to_epoch_time': end_time})
tradelogs.update_tradelog(tradelogs_api_result)
def _check_moving_average(currency_pair, period, length, start_time, end_time, count, sma_ema):
moving_average = MovingAverage(currency_pair, period, length, sma_ema)
# create moving_average table if not exsit
moving_average.create_table()
# get moving_average from table
mv_avrg_result = moving_average.get_moving_average(end_time, start_time)
sma = []
ema = []
insert_params = []
for i in range(0, len(mv_avrg_result)):
nums = []
params = []
if i > (length - 2) and mv_avrg_result[i][3] is None:
if sma_ema == 'sma':
# prepare numbers to calculate sma
for j in range(0, length):
nums.append(mv_avrg_result[i - j][1])
# calculate sma
value = np.sum(nums) / length
sma.append(
{'time_stamp': mv_avrg_result[i][0], 'value': value})
elif sma_ema == 'ema':
# for the first time ema calculation
if len(_ema) == 0:
# prepare numbers for first calculation of last value
for j in range(1, length + 1):
nums.append(_mv_avrg_result[i - j][1])
last_val = np.sum(_nums) / length
else:
last_val = ema[i - 1]['value']
# calculate ema
value = _calculate_ema(
mv_avrg_result[i][1], last_val, length)
ema.append(
{'time_stamp': mv_avrg_result[i][0], 'value': value})
if(mv_avrg_result[i][2] == 1):
insert_params.append((mv_avrg_result[i][0], value))
elif i > (length - 2):
if sma_ema == 'sma':
sma.append({'time_stamp': mv_avrg_result[i][
0], 'value': mv_avrg_result[i][2]})
elif sma_ema == 'ema':
ema.append({'time_stamp': mv_avrg_result[i][
0], 'value': mv_avrg_result[i][2]})
moving_average.update_moving_average(insert_params)
def get_moving_average(currency_pair, count=1000, to_epoch_time=int(time.time()), period='1d', length=5, sma_ema='sma'):
LIMIT_COUNT = 1000
start_time = to_epoch_time - ((count + length) * PERIOD_SECS[period])
count = min(count, LIMIT_COUNT)
_check_tradelogs(currency_pair, period, length,
start_time, to_epoch_time, count)
_check_moving_average(currency_pair, period, length,
start_time, to_epoch_time, count, sma_ema)
def _calculate_ema(current_val, last_val, length):
k = 2 / (length + 1)
ema = current_val * k + last_val * (1 - k)
return ema
|
Python
| 0
|
@@ -261,16 +261,35 @@
: 1800%7D%0A
+LIMIT_COUNT = 1000%0A
%0A%0Adef _c
@@ -3169,20 +3169,27 @@
, count=
-1000
+LIMIT_COUNT
, to_epo
@@ -3257,31 +3257,8 @@
'):%0A
- LIMIT_COUNT = 1000%0A
|
db4ed3026aae73f9ba8926f2ee11ccfe2d6b9a35
|
Tweak log level in TaskCompletionQueue.__aexit__
|
py/g1/asyncs/kernels/g1/asyncs/kernels/utils.py
|
py/g1/asyncs/kernels/g1/asyncs/kernels/utils.py
|
"""Utilities for external users."""
__all__ = [
# Task completion queue.
'Closed',
'TaskCompletionQueue',
# In-memory stream.
'BytesStream',
'StringStream',
]
import collections
import io
import logging
from g1.bases.assertions import ASSERT
from . import errors
from . import locks
LOG = logging.getLogger(__name__)
class Closed(Exception):
pass
class TaskCompletionQueue:
"""Provide queue-like interface on waiting for task completion.
NOTE: It does not support future objects; this simplifies its
implementation, and thus may be more efficient.
"""
def __init__(self):
self._event = locks.Event()
self._completed = collections.deque()
self._uncompleted = set()
self._closed = False
def __repr__(self):
return '<%s at %#x: %s, completed=%d, uncompleted=%d>' % (
self.__class__.__qualname__,
id(self),
'closed' if self._closed else 'open',
len(self._completed),
len(self._uncompleted),
)
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, *_):
"""Reasonable default policy on joining tasks.
* First, it will close the queue.
* On normal exit, it will join all remaining tasks.
* On error, it will cancel tasks before joining them.
This is not guaranteed to fit any use case though. On those
cases, you will have to roll your own context manager.
"""
# Do not call close with ``graceful=False`` to get the remaining
# tasks because the queue might have been closed already.
self.close()
tasks = self._move_tasks()
if exc_type:
for task in tasks:
task.cancel()
for task in tasks:
exc = await task.get_exception()
if not exc:
pass
elif isinstance(exc, errors.Cancelled):
LOG.warning('task is cancelled: %r', task, exc_info=exc)
else:
LOG.error('task error: %r', task, exc_info=exc)
def is_closed(self):
return self._closed
def __bool__(self):
return bool(self._completed) or bool(self._uncompleted)
def __len__(self):
return len(self._completed) + len(self._uncompleted)
def close(self, graceful=True):
if self._closed:
return []
if graceful:
tasks = []
else:
tasks = self._move_tasks()
self._closed = True
self._event.set() # Notify all waiters on close.
return tasks
def _move_tasks(self):
tasks = list(self._completed)
tasks.extend(self._uncompleted)
self._completed.clear()
self._uncompleted.clear()
return tasks
async def get(self):
while True:
if self._completed:
return self._completed.popleft()
elif self._uncompleted or not self._closed:
self._event.clear()
await self._event.wait()
else:
raise Closed
async def as_completed(self):
while True:
try:
yield await self.get()
except Closed:
break
def put(self, task):
if self._closed:
raise Closed
self._uncompleted.add(task)
task.add_callback(self._on_completion)
def _on_completion(self, task):
if self._uncompleted:
self._uncompleted.remove(task)
self._completed.append(task)
self._event.set()
class StreamBase:
"""In-memory stream base class.
The semantics that this class implements is similar to a pipe, not a
regular file (and ``close`` only closes the write-end of stream).
Compared to a pipe, this class employs an unbounded buffer, and thus
a writer is never blocked.
This class provides both blocking and non-blocking interface.
"""
def __init__(self, buffer_type, data_type, newline):
self._buffer_type = buffer_type
self._data_type = data_type
self._newline = newline
self._buffer = self._buffer_type()
self._closed = False
self._event = locks.Event()
def _make_buffer(self, data):
if data:
buffer = self._buffer_type(data)
buffer.seek(len(data))
else:
buffer = self._buffer_type()
return buffer
def __repr__(self):
return '<%s at %#x: %s>' % (
self.__class__.__qualname__,
id(self),
'closed' if self._closed else 'open',
)
async def close(self):
return self.close_nonblocking()
def __aiter__(self):
return self
async def __anext__(self):
line = await self.readline()
if not line:
raise StopAsyncIteration
return line
async def read(self, size=-1):
while True:
data = self.read_nonblocking(size)
if data is None:
self._event.clear()
await self._event.wait()
else:
return data
async def readline(self, size=-1):
while True:
line = self.readline_nonblocking(size)
if line is None:
self._event.clear()
await self._event.wait()
else:
return line
async def readlines(self, hint=None):
if hint is None or hint <= 0:
hint = float('+inf')
lines = []
num_read = 0
async for line in self:
lines.append(line)
num_read += len(line)
if num_read >= hint:
break
return lines
async def write(self, data):
return self.write_nonblocking(data)
#
# Non-blocking counterparts.
#
# There is no implementation for ``__iter__`` and ``readlines``
# because their interface is not (easily?) compatible with
# non-blocking semantics.
#
NonblockingMethods = collections.namedtuple(
'NonblockingMethods',
(
'close',
'read',
'readline',
'write',
),
)
@property
def nonblocking(self):
"""Expose non-blocking interface via a file-like interface."""
return self.NonblockingMethods(
close=self.close_nonblocking,
read=self.read_nonblocking,
readline=self.readline_nonblocking,
write=self.write_nonblocking,
)
def close_nonblocking(self):
self._closed = True
self._event.set()
def read_nonblocking(self, size=-1):
data = self._buffer.getvalue()
if not data:
if self._closed:
return data
else:
return None
if size < 0:
size = len(data)
if size == 0:
data = self._data_type()
elif size >= len(data):
self._buffer = self._buffer_type()
else:
self._buffer = self._make_buffer(data[size:])
data = data[:size]
return data
def readline_nonblocking(self, size=-1):
data = self._buffer.getvalue()
if not data:
if self._closed:
return data
else:
return None
pos = data.find(self._newline)
if pos < 0 and size < 0:
if self._closed:
size = len(data)
else:
return None
elif size < 0 <= pos:
size = pos + len(self._newline)
elif pos < 0 <= size:
pass # Nothing here.
else:
# pos >= 0 and size >= 0.
size = min(size, pos + len(self._newline))
if size == 0:
data = self._data_type()
elif size >= len(data):
self._buffer = self._buffer_type()
else:
self._buffer = self._make_buffer(data[size:])
data = data[:size]
return data
def write_nonblocking(self, data):
ASSERT.false(self._closed)
self._event.set()
return self._buffer.write(data)
class BytesStream(StreamBase):
def __init__(self):
super().__init__(io.BytesIO, bytes, b'\n')
class StringStream(StreamBase):
def __init__(self):
# TODO: Handle all corner cases of newline characters (for now
# it is fixed to '\n').
super().__init__(io.StringIO, str, '\n')
|
Python
| 0
|
@@ -1988,14 +1988,12 @@
LOG.
-warnin
+debu
g('t
|
29fa15c46aa18930219fd87c66c3fefcdfc5fa90
|
use gradient clipping, print running time
|
model/archaeological_features.py
|
model/archaeological_features.py
|
import os
from datetime import datetime
import numpy as np
import sys
from keras import Input
from keras.callbacks import TensorBoard, EarlyStopping
from keras.engine import Model
from keras.layers import LSTM, TimeDistributed, Dense, Flatten
from keras.optimizers import Adam
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from topoml_util import geom_scaler
from topoml_util.slack_send import notify
SCRIPT_VERSION = '0.1.6'
SCRIPT_NAME = os.path.basename(__file__)
TIMESTAMP = str(datetime.now()).replace(':', '.')
SIGNATURE = SCRIPT_NAME + ' ' + TIMESTAMP
TRAINING_DATA_FILE = '../files/archaeology/archaeo_features_train.npz'
# Hyperparameters
BATCH_SIZE = int(os.getenv('BATCH_SIZE', 1024))
TRAIN_VALIDATE_SPLIT = float(os.getenv('TRAIN_VALIDATE_SPLIT', 0.1))
REPEAT_DEEP_ARCH = int(os.getenv('REPEAT_DEEP_ARCH', 0))
LSTM_SIZE = int(os.getenv('LSTM_SIZE', 256))
DENSE_SIZE = int(os.getenv('DENSE_SIZE', 64))
EPOCHS = int(os.getenv('EPOCHS', 200))
LEARNING_RATE = float(os.getenv('LEARNING_RATE', 1e-4))
PATIENCE = int(os.getenv('PATIENCE', 16))
RECURRENT_DROPOUT = float(os.getenv('RECURRENT_DROPOUT', 0.05))
GEOM_SCALE = float(os.getenv('GEOM_SCALE', 0)) # If no default or 0: overridden when data is known
OPTIMIZER = Adam(lr=LEARNING_RATE)
train_loaded = np.load(TRAINING_DATA_FILE)
train_geoms = train_loaded['geoms']
train_labels = train_loaded['feature_type']
# Determine final test mode or standard
if len(sys.argv) > 1 and sys.argv[1] in ['-t', '--test']:
print('Training in final test mode')
TEST_DATA_FILE = '../files/archaeology/archaeo_features_test.npz'
test_loaded = np.load(TEST_DATA_FILE)
test_geoms = test_loaded['geoms']
test_labels = test_loaded['feature_type']
else:
print('Training in standard validation mode')
# Split the training data in random seen/unseen sets
train_geoms, test_geoms, train_labels, test_labels = train_test_split(train_geoms, train_labels, test_size=0.1)
# Normalize
geom_scale = GEOM_SCALE or geom_scaler.scale(train_geoms)
train_geoms = geom_scaler.transform(train_geoms, geom_scale)
test_geoms = geom_scaler.transform(test_geoms, geom_scale) # re-use variance from training
# Map types to one-hot vectors
train_targets = np.zeros((len(train_labels), train_labels.max() + 1))
for index, feature_type in enumerate(train_labels):
train_targets[index, feature_type] = 1
message = '''
running {} with
version: {} batch size: {}
train/validate split: {} repeat deep: {}
lstm size: {} dense size: {}
epochs: {} learning rate: {}
geometry scale: {:f} recurrent dropout: {}
patience {}
'''.format(
SIGNATURE,
SCRIPT_VERSION, BATCH_SIZE,
TRAIN_VALIDATE_SPLIT, REPEAT_DEEP_ARCH,
LSTM_SIZE, DENSE_SIZE,
EPOCHS, LEARNING_RATE,
geom_scale, RECURRENT_DROPOUT,
PATIENCE,
)
print(message)
# Shape determination
geom_max_points, geom_vector_len = train_geoms.shape[1:]
output_seq_length = train_targets.shape[-1]
# Build model
inputs = Input(shape=(geom_max_points, geom_vector_len))
model = LSTM(LSTM_SIZE, return_sequences=True, recurrent_dropout=RECURRENT_DROPOUT)(inputs)
model = TimeDistributed(Dense(DENSE_SIZE, activation='relu'))(model)
for layer in range(REPEAT_DEEP_ARCH):
model = LSTM(LSTM_SIZE, return_sequences=True, recurrent_dropout=RECURRENT_DROPOUT)(model)
model = Dense(DENSE_SIZE, activation='relu')(model)
model = Flatten()(model)
model = Dense(output_seq_length, activation='softmax')(model)
model = Model(inputs=inputs, outputs=model)
model.compile(
loss='categorical_crossentropy',
metrics=['accuracy'],
optimizer=OPTIMIZER),
model.summary()
# Callbacks
callbacks = [
TensorBoard(log_dir='./tensorboard_log/' + SIGNATURE, write_graph=False),
EarlyStopping(patience=PATIENCE, min_delta=0.001),
]
history = model.fit(
x=train_geoms,
y=train_targets,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
validation_split=TRAIN_VALIDATE_SPLIT,
callbacks=callbacks).history
# Run on unseen test data
test_pred = [np.argmax(classes) for classes in model.predict(test_geoms)]
accuracy = accuracy_score(test_labels, test_pred)
message = '''
test accuracy of {:f} with
version: {} batch size {}
train/validate split {} repeat deep arch {}
lstm size {} dense size {}
epochs {} learning rate {}
geometry scale {:f} recurrent dropout {}
patience {}
'''.format(
accuracy,
SCRIPT_VERSION, BATCH_SIZE,
TRAIN_VALIDATE_SPLIT, REPEAT_DEEP_ARCH,
LSTM_SIZE, DENSE_SIZE,
len(history['val_loss']), LEARNING_RATE,
geom_scale, RECURRENT_DROPOUT,
PATIENCE,
)
notify(SIGNATURE, message)
print(SCRIPT_NAME, 'finished successfully')
|
Python
| 0
|
@@ -3,16 +3,38 @@
port os%0A
+from time import time%0A
from dat
@@ -54,16 +54,27 @@
datetime
+, timedelta
%0A%0Aimport
@@ -502,17 +502,17 @@
= '0.1.
-6
+7
'%0ASCRIPT
@@ -708,16 +708,38 @@
ain.npz'
+%0ASCRIPT_START = time()
%0A%0A# Hype
@@ -1349,16 +1349,29 @@
ING_RATE
+, clipnorm=1.
)%0A%0Atrain
@@ -1862,26 +1862,24 @@
tandard
-validation
+training
mode')%0A
@@ -3868,16 +3868,18 @@
se),%0A
+ #
EarlySt
@@ -4260,16 +4260,48 @@
_pred)%0A%0A
+runtime = time() - SCRIPT_START%0A
message
@@ -4327,16 +4327,31 @@
of %7B:f%7D
+ in %7B%7D
with %0Av
@@ -4568,25 +4568,24 @@
-
recurrent dr
@@ -4630,16 +4630,44 @@
ccuracy,
+ timedelta(seconds=runtime),
%0A SCR
@@ -4927,11 +4927,42 @@
essfully
-'
+ in', timedelta(seconds=runtime)
)%0A
|
60874bb237bfe8fc2c95499cf7fd37a868ff315c
|
Cache the image for four days(original is seven)
|
zhihudaily/views/utils.py
|
zhihudaily/views/utils.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from StringIO import StringIO
from flask import send_file, g, Blueprint
from zhihudaily.configs import Config
from zhihudaily.crawler import Crawler
utils = Blueprint('utils', __name__)
@utils.before_app_request
def before_request():
g.db = Config.database
g.db.connect()
@utils.after_app_request
def after_request(response):
g.db.close()
return response
@utils.route('/img/<server>/<path:hash_string>')
def image(server, hash_string):
"""Handle image, use redis to cache image."""
image_url = 'http://{0}.zhimg.com/{1}'.format(server, hash_string)
cached = Config.redis_server.get(image_url)
if cached:
buffer_image = StringIO(cached)
buffer_image.seek(0)
else:
r = Crawler().send_request(image_url)
buffer_image = StringIO(r.content)
buffer_image.seek(0)
Config.redis_server.setex(image_url, (60*60*24*7),
buffer_image.getvalue())
return send_file(buffer_image, mimetype='image/jpeg')
|
Python
| 0.999999
|
@@ -1000,9 +1000,9 @@
*24*
-7
+4
),%0A
|
5fc17533d66fdaac2ea649578dac0b2feaee5464
|
fix build script
|
build_python.py
|
build_python.py
|
#!/usr/local/bin/python2.7
# load basic modules only
import os, sys, platform, shutil, subprocess
def go():
# check what OS we are on.
if sys.platform.startswith('win'):
if os.path.exists('C:\\Program Files (x86)\\'):
programfiles_path = 'C:\\Program Files (x86)\\'
else:
programfiles_path = 'C:\\Program Files\\'
# get script path.
script_path = ''
if hasattr(sys,"frozen"):
script_path = os.path.dirname(os.path.realpath(sys.executable))
else:
script_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(script_path)
# make build directory to compile python
if os.path.exists(os.path.join(script_path, 'build')) == True:
shutil.rmtree(os.path.join(script_path, 'build'))
if os.path.exists(os.path.join(script_path, 'dist')) == True:
shutil.rmtree(os.path.join(script_path, 'dist'))
# Setup pyinstaller to Compile.
head_projpath, tail = os.path.split(script_path)
specf = 'kill_proc.spec'
# try and find pyinstaller location
if sys.platform.startswith('win'):
# C:\
try:
root_path = os.environ['SystemDrive']
root_path += os.sep
except KeyError:
root_path = 'not found'
try:
# C:\Users\jay.stevens
user_path = os.environ['HOMEDRIVE']
user_path += os.environ['HOMEPATH']
except KeyError:
user_path = 'not found'
else: # mac/linux
# /
root_path = os.sep
# /home/jay.stevens
user_path = os.environ['HOME']
# root_path pyinstaller
if os.path.exists(os.path.join(root_path, 'pyinstaller', 'pyinstaller.py')):
pyinstaller_path = os.path.join(root_path, 'pyinstaller', 'pyinstaller.py')
# user_path pyinstaller
elif os.path.exists(os.path.join(user_path, 'pyinstaller', 'pyinstaller.py')):
pyinstaller_path = os.path.join(user_path, 'pyinstaller', 'pyinstaller.py')
# head_projpath _pyinstaller
elif os.path.exists(os.path.join(head_projpath, '_pyinstaller', 'pyinstaller.py')):
pyinstaller_path = os.path.join(head_projpath, '_pyinstaller', 'pyinstaller.py')
else:
print('unable to find pyinstaller.py\nsearch paths:\n%s\n%s\n%s' % (os.path.join(root_path, 'pyinstaller'), os.path.join(user_path, 'pyinstaller'), os.path.join(head_projpath, '_pyinstaller')))
sys.exit(1)
# get the version of python we are running
pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
# compile pyinstaller package using the copy of python that was used to launch this build script
if sys.platform.startswith('darwin'):
os.system('/bin/bash -c \'%s%sbin%spython%s %s --noconfirm %s%s%s\'' % (sys.prefix, os.sep, os.sep, pyver, pyinstaller_path, script_path, os.sep, specf))
elif sys.platform.startswith('lin'):
os.system('%s%sbin%spython%s %s --noconfirm %s%s%s' % (sys.prefix, os.sep, os.sep, pyver, pyinstaller_path, script_path, os.sep, specf))
else:
os.system('%s%spython.exe "%s" --noconfirm "%s%s%s"' % (sys.prefix, os.sep, pyinstaller_path, script_path, os.sep, specf))
# cleanup build dir
if os.path.exists('build'):
shutil.rmtree('build')
print('end')
if sys.platform.startswith('win'): os.system('pause')
if __name__ == '__main__':
go()
|
Python
| 0.000001
|
@@ -1014,17 +1014,21 @@
= '
-kill_proc
+vc2008cleaner
.spe
|
a1e5ffe0a6964d10f46e81f088e400ce4b192f94
|
fix param name returned
|
jmbo_analytics/utils.py
|
jmbo_analytics/utils.py
|
import time
import uuid
import random
import urllib
from hashlib import md5
from django.conf import settings
from jmbo_analytics import CAMPAIGN_TRACKING_PARAMS
VERSION = '4.4sh'
COOKIE_NAME = '__utmmobile'
COOKIE_PATH = '/'
COOKIE_USER_PERSISTENCE = 63072000
CAMPAIGN_PARAMS_KEY = 'ga_campaign_params'
def get_visitor_id(guid, account, user_agent, cookie):
"""Generate a visitor id for this hit.
If there is a visitor id in the cookie, use that, otherwise
use the guid if we have one, otherwise use a random number.
"""
if cookie:
return cookie
message = ""
if guid:
# create the visitor id using the guid.
message = guid + account
else:
# otherwise this is a new user, create a new random id.
message = user_agent + str(uuid.uuid4())
md5String = md5(message).hexdigest()
return "0x" + md5String[:16]
def gen_utma(domain_name):
domain_hash = 0
g = 0
i = len(domain_name) - 1
while i >= 0:
c = ord(domain_name[i])
domain_hash = ((domain_hash << 6) & 0xfffffff) + c + (c << 14)
g = domain_hash & 0xfe00000
if g != 0:
domain_hash = domain_hash ^ (g >> 21)
i = i - 1
rnd_num = str(random.randint(1147483647, 2147483647))
time_num = str(time.time()).split('.')[0]
_utma = '%s.%s.%s.%s.%s.%s' % (domain_hash, rnd_num, time_num,
time_num, time_num, 1)
return _utma
def build_ga_params(request, path=None, event=None, referer=None):
meta = request.META
# get the account id
try:
account = settings.JMBO_ANALYTICS['google_analytics_id']
except:
raise Exception("No Google Analytics ID configured")
# determine the domian
domain = meta.get('HTTP_HOST', '')
# determine the referrer
referer = referer or request.GET.get('r', '')
# get the path from the referer header
path = path or request.GET.get('p', '/')
# try and get visitor cookie from the request
user_agent = meta.get('HTTP_USER_AGENT', 'Unknown')
cookie = request.COOKIES.get(COOKIE_NAME)
visitor_id = get_visitor_id(meta.get('HTTP_X_DCMGUID', ''),
account, user_agent, cookie)
# build the parameter collection
params = {
'utmwv': VERSION,
'utmn': str(random.randint(0, 0x7fffffff)),
'utmhn': domain,
'utmsr': '',
'utme': '',
'utmr': referer,
'utmp': path,
'utmac': account,
'utmcc': '__utma=%s;' % gen_utma(domain),
'utmvid': visitor_id,
'utmip': meta.get('REMOTE_ADDR', ''),
}
# add event parameters if supplied
if event:
params.update({
'utmt': 'event',
'utme': '5(%s)' % '*'.join(event),
})
# retrieve campaign tracking parameters from session
campaign_params = request.session.get(CAMPAIGN_PARAMS_KEY, {})
# update campaign params from request
for param in CAMPAIGN_TRACKING_PARAMS:
if param in request.GET:
campaign_params[param] = request.GET[param]
# store campaign tracking parameters in session
request.session[CAMPAIGN_PARAMS_KEY] = campaign_params
# add campaign tracking parameters if provided
params.update(campaign_params)
# construct the gif hit url
utm_gif_location = "http://www.google-analytics.com/__utm.gif"
utm_url = utm_gif_location + "?" + urllib.urlencode(params)
# add event parameters if supplied
if event:
utm_url += '&utmt=event' + \
'&utme=5(%s)' % '*'.join(event)
return {'url': utm_url,
'user_agent': user_agent,
'language': meta.get('HTTP_ACCEPT_LANGUAGE', ''),
'visitor_id': visitor_id,
'COOKIE_USER_PERSISTENCE': COOKIE_USER_PERSISTENCE,
'COOKIE_NAME': COOKIE_NAME,
'COOKIE_PATH': COOKIE_PATH,
}
|
Python
| 0.000003
|
@@ -3615,16 +3615,20 @@
eturn %7B'
+utm_
url': ut
|
35eadd561a47204c32627e26c5e6e64849e38f2e
|
improve enrico_config
|
enrico/config.py
|
enrico/config.py
|
"""Central place for config file handling"""
import sys
from os.path import join
from configobj import ConfigObj, flatten_errors
from extern.configobj.validate import Validator
from environ import CONFIG_DIR
def get_config(infile, configspec=join(CONFIG_DIR, 'default.conf')):
"""Parse config file, and in addition:
- include default options
- exit with an error if a required option is missing"""
config = ConfigObj(infile, configspec=configspec,
file_error=True)
validator = Validator()
# @todo: I'm not sure we always want to copy all default options here
results = config.validate(validator, copy=True)
if results != True:
for (section_list, key, _) in flatten_errors(config, results):
if key is not None:
print('The "%s" key in the section "%s" failed validation' %
(key, ', '.join(section_list)))
else:
print('The following section was missing:%s ' %
', '.join(section_list))
print(' Please check your config file for missing '
'and wrong options!')
print('FATAL: Config file is not valid.')
sys.exit(1)
return config
# @todo: This doesn't work because missing values are invalid!!!
# Maybe fill those values by hand?
def get_default_config(configspec=join(CONFIG_DIR, 'default.conf')):
return ConfigObj(None, configspec=configspec)
def query_config():
"""Make a new config object, asking the user for required options"""
config = ConfigObj(indent_type='\t')
print('Please provide the following required options:')
config['out'] = raw_input('Output directory: ')
config['target'] = {}
config['target']['name'] = raw_input('Target Name : ')
config['target']['ra'] = raw_input('Right Ascension: ')
config['target']['dec'] = raw_input('Declination: ')
message = ('Options are : PowerLaw, PowerLaw2, LogParabola, '
'PLExpCutoff\nSpectral Model : ')
config['target']['spectrum'] = raw_input(message)
config['space'] = {}
config['space']['xref'] = config['target']['ra']
config['space']['yref'] = config['target']['dec']
config['space']['rad'] = raw_input('ROI Size: ')
return get_config(config)
|
Python
| 0.000002
|
@@ -1469,16 +1469,30 @@
nfig():%0A
+ import os%0A
%22%22%22M
@@ -1653,16 +1653,27 @@
options
+ %5Bdefault%5D
:')%0A
@@ -1683,24 +1683,46 @@
fig%5B'out'%5D =
+ os.getcwd()%0A out =
raw_input('
@@ -1737,20 +1737,125 @@
irectory
-: ')
+ %5B'+config%5B'out'%5D+'%5D : ')%0A if not(out=='') :%0A config%5B'out'%5D = out%0A%0A# Informations about the source
%0A con
@@ -2158,16 +2158,27 @@
al Model
+ %5BPowerLaw%5D
: ')%0A
@@ -2214,26 +2214,149 @@
%5D =
-raw_input(message)
+'PowerLaw'%0A model = raw_input(message)%0A if not(model=='') :%0A config%5B'target'%5D%5B'spectrum'%5D = model%0A%0A# informations about the ROI
%0A
@@ -2508,24 +2508,39 @@
e'%5D%5B'rad'%5D =
+ '15'%0A roi =
raw_input('
@@ -2547,20 +2547,826 @@
ROI Size
-: ')
+ %5B15%5D : ')%0A if not(roi=='') :%0A config%5B'space'%5D%5B'rad'%5D = roi%0A%0A# informations about the input files%0A config%5B'file'%5D = %7B%7D%0A config%5B'file'%5D%5B'spacecraft'%5D = config%5B'out'%5D+'/spacecraft.fits'%0A ft2 = raw_input('FT2 file %5B'+config%5B'file'%5D%5B'spacecraft'%5D+'%5D : ')%0A if not(ft2=='') :%0A config%5B'file'%5D%5B'spacecraft'%5D = ft2%0A config%5B'file'%5D%5B'event'%5D = config%5B'out'%5D+'/events.lis'%0A ft1list = raw_input('FT1 list of files %5B'+config%5B'file'%5D%5B'event'%5D+'%5D : ')%0A if not(ft1list=='') :%0A config%5B'file'%5D%5B'event'%5D = ft1list%0A config%5B'file'%5D%5B'xml'%5D = config%5B'out'%5D+'/'+config%5B'target'%5D%5B'name'%5D+'_'+config%5B'target'%5D%5B'spectrum'%5D+'_model.xml'%0A tag = raw_input('tag %5BLAT_Analysis%5D : ')%0A if not(tag=='') :%0A config%5B'file'%5D%5B'tag'%5D = tag%0A else :%0A config%5B'file'%5D%5B'tag'%5D = 'LAT_Analysis'%0A
%0A ret
|
d8d8138c7e6ef7ca3a91cfb3affd342b0d6072ff
|
Add chronometer
|
report_py3o_fusion_server/models/py3o_report.py
|
report_py3o_fusion_server/models/py3o_report.py
|
# -*- coding: utf-8 -*-
# © 2013 XCG Consulting <http://odoo.consulting>
# © 2016 ACSONE SA/NV
# © 2017 Therp BV <http://therp.nl>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import json
import logging
import os
import requests
import tempfile
from contextlib import closing
from openerp import _, api, models
from openerp.exceptions import UserError
from StringIO import StringIO
logger = logging.getLogger(__name__)
try:
from py3o.template import Template
from py3o.template.helpers import Py3oConvertor
except ImportError:
logger.debug('Cannot import py3o.template')
class Py3oReport(models.TransientModel):
_inherit = 'py3o.report'
@api.multi
def _create_single_report(self, model_instance, data, save_in_attachment):
""" This function to generate our py3o report
"""
self.ensure_one()
report_xml = self.ir_actions_report_xml_id
filetype = report_xml.py3o_filetype
if not report_xml.py3o_server_id:
return super(Py3oReport, self)._create_single_report(
model_instance, data, save_in_attachment,
)
elif report_xml.py3o_is_local_fusion:
result_path = super(
Py3oReport, self.with_context(
report_py3o_skip_conversion=True,
)
)._create_single_report(
model_instance, data, save_in_attachment,
)
with closing(open(result_path, 'r')) as out_stream:
tmpl_data = out_stream.read()
datadict = {}
else:
result_fd, result_path = tempfile.mkstemp(
suffix='.' + filetype, prefix='p3o.report.tmp.')
tmpl_data = self.get_template(model_instance)
in_stream = StringIO(tmpl_data)
with closing(os.fdopen(result_fd, 'w+')) as out_stream:
template = Template(in_stream, out_stream, escape_false=True)
localcontext = self._get_parser_context(model_instance, data)
expressions = template.get_all_user_python_expression()
py_expression = template.convert_py3o_to_python_ast(
expressions)
convertor = Py3oConvertor()
data_struct = convertor(py_expression)
datadict = data_struct.render(localcontext)
# Call py3o.server to render the template in the desired format
files = {
'tmpl_file': tmpl_data,
}
fields = {
"targetformat": filetype,
"datadict": json.dumps(datadict),
"image_mapping": "{}",
"escape_false": "on",
}
if report_xml.py3o_is_local_fusion:
fields['skipfusion'] = '1'
if filetype == 'pdf':
options = report_xml.pdf_options_id or\
report_xml.py3o_server_id.pdf_options_id
if options:
pdf_options_dict = options.odoo2libreoffice_options()
fields['pdf_options'] = json.dumps(pdf_options_dict)
r = requests.post(
report_xml.py3o_server_id.url, data=fields, files=files)
if r.status_code != 200:
# server says we have an issue... let's tell that to enduser
raise UserError(
_('Fusion server error %s') % r.text,
)
chunk_size = 1024
with open(result_path, 'w+') as fd:
for chunk in r.iter_content(chunk_size):
fd.write(chunk)
if len(model_instance) == 1:
self._postprocess_report(
result_path, model_instance.id, save_in_attachment)
return result_path
|
Python
| 0.000002
|
@@ -262,16 +262,46 @@
empfile%0A
+from datetime import datetime%0A
from con
@@ -2805,16 +2805,191 @@
%5D = '1'%0A
+ url = report_xml.py3o_server_id.url%0A logger.info(%0A 'Connecting to %25s to convert report %25s to %25s',%0A url, report_xml.report_name, filetype)%0A
@@ -3010,16 +3010,16 @@
'pdf':%0A
-
@@ -3294,65 +3294,137 @@
-r = requests.post(%0A report_xml.py3o_server_id.
+ logger.debug('PDF Export options: %25s', pdf_options_dict)%0A start_chrono = datetime.now()%0A r = requests.post(
url,
@@ -3556,16 +3556,81 @@
enduser%0A
+ logger.error('Py3o fusion server error: %25s', r.text)%0A
@@ -3842,16 +3842,16 @@
_size):%0A
-
@@ -3874,16 +3874,261 @@
(chunk)%0A
+ end_chrono = datetime.now()%0A convert_seconds = (end_chrono - start_chrono).total_seconds()%0A logger.info(%0A 'Report %25s converted to %25s in %25s seconds',%0A report_xml.report_name, filetype, convert_seconds)%0A
|
b0970703d4df2a3072f267855fa348428d01fc93
|
allow configuration of repl. factor
|
pcassandra/dj18/tests/dj18test_app/settings.py
|
pcassandra/dj18/tests/dj18test_app/settings.py
|
"""
Django settings for dj18tests project.
Generated by 'django-admin startproject' using Django 1.8.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
import os
#
# Settings for pcassandra
#
CASSANDRA_CONNECTION = {
'KEYSPACE': 'dj18test_app' + os.environ.get('TEST_KEYSPACE_SUFFIX', ''),
'HOSTS': ['127.0.0.1'],
'KEYSPACE_REPLICATION': """
{ 'class' : 'SimpleStrategy', 'replication_factor' : 1 }
""",
'CLUSTER_KWARGS': {
'protocol_version': 3
}
}
AUTHENTICATION_BACKENDS = (
# 'django.contrib.auth.backends.ModelBackend',
'pcassandra.dj18.auth_backend.ModelBackend',
)
AUTH_USER_MODEL = 'pcassandra.DjangoUserProxy'
PCASSANDRA_AUTH_USER_MODEL = 'pcassandra.dj18.models.CassandraUser'
WSGI_APPLICATION = 'dj18test_app.wsgi.application_development'
#
# Django settings
#
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'f)%dy9k(e7hz!%hkskp60lu2804_ojpat*ztg6dpn0zx6fdo8p'
DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# pcassandra + test app
'pcassandra',
'dj18test_app',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'dj18test_app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
# Just to suppress the warnings when running tests.
os.environ['CQLENG_ALLOW_SCHEMA_MANAGEMENT'] = 'yeah'
|
Python
| 0.000023
|
@@ -516,16 +516,26 @@
%7B
+%7B%0A
'class'
@@ -554,16 +554,25 @@
rategy',
+%0A
'replic
@@ -591,19 +591,137 @@
' :
-1 %7D%0A %22%22%22
+%7Breplication_factor%7D%0A %7D%7D%0A %22%22%22.format(%0A replication_factor=os.environ.get('TEST_KEYSPACE_REPL_FACTOR', '1')%0A )
,%0A
|
89c3960298f315b448c2aee9dc04d073b6467c12
|
add notification to tasks
|
bzoing/tasks.py
|
bzoing/tasks.py
|
import datetime
import pickle
import os
from functools import total_ordering
from bzoing.playme import Playme
import time
import threading
@total_ordering
class Task():
"""Defines tasks, their representation and ordering."""
def __init__(self, id, description, alarm, sound, function):
self.id = id
self.description = description
self.alarm = alarm
self.function = function
self.sound = sound
def __repr__(self):
return '{}: {} {} {}'.format(self.__class__.__name__,
self.id,
self.description,
self.alarm)
def __lt__(self, other):
if hasattr(other, 'alarm'):
return self.alarm.__lt__(other.alarm)
def __eq__(self, other):
if hasattr(other, 'alarm'):
return self.alarm.__eq__(other.alarm)
class Bzoinq():
"""Creates a running Bzoinq."""
def __init__(self):
self.task_id = 0
self.task_list = []
# load the saved tasks
try:
with open('outfile.p', 'rb') as fp:
self.task_list = pickle.load(fp)
print("tasks loaded from file")
# remove the pickle file
os.remove('outfile.p')
except IOError:
print("could't load task list file")
# make task_id equal to the greatest of all task_id's
bigger = 0
for task in self.task_list:
if task.id > bigger:
bigger = task.id
self.task_id = bigger
print("new task id = {}".format(self.task_id))
def __repr__(self):
return '{}'.format(self.task_list)
def create_task(self, description="Sample task",
alarm=datetime.datetime.now(), sound=True, function=None):
"""Creates a new task"""
assert type(alarm) is datetime.datetime
self.task_id += 1
# create the task
new_task = Task(self.task_id, description, alarm, sound, function)
# add task to task list
self.task_list.append(new_task)
# sort the task list
self.task_list = sorted(self.task_list)
print("new task created")
def remove_task(self, id_to_remove):
"""Removes task with given id"""
for task in self.task_list[:]:
if task.id == id_to_remove:
try:
self.task_list.remove(task)
except:
print("couldn't remove task")
def remove_all_tasks(self):
"""Clears all the tasks"""
self.task_list = []
self.task_id = 0
print("All tasks have been cleaned")
def get_task_list(self):
"""Returns the list of tasks"""
return self.task_list
def save_tasks(self):
"""Saves current tasks to file"""
with open('outfile.p', 'wb') as fp:
pickle.dump(self.task_list, fp)
print("Tasks have been saved")
def change_alarm(self, id_to_change, new_time):
"""
Changes the alarm time of a task.
new_time must be a datetime object
"""
assert type(new_time) is datetime.datetime
# time on a task can only be changed if the task still exists
for task in self.task_list()[:]:
if task.id == id_to_change:
task.alarm = new_time
print("alarm with id {} changed".format(id_to_change))
class Monitorthread(threading.Thread):
def __init__(self, name=None, target=None):
super().__init__(name=name, target=target)
# def run(self):
# pass
class Monitor():
"""Defines a monitor that keeps checking a task list for changes"""
def __init__(self, bzoinq_obj):
self.stopit = False
self.bzoinq_obj = bzoinq_obj
def stop(self):
"""stops the monitor thread"""
self.stopit = True
def start(self):
"""Starts the monitor thread"""
t = Monitorthread(target=self.keep_checking)
t.start()
print("Monitor thread has started")
def keep_checking(self):
"""Keeps checking time and sorts the task_list"""
while True:
time.sleep(1)
if self.stopit:
break
# get current task list
task_list = self.bzoinq_obj.get_task_list()
if len(task_list) > 0:
# make sure task_list is sorted
task_list = sorted(task_list)
# check the time
current_time = datetime.datetime.now()
if current_time >= task_list[0].alarm:
# get task id
current_id = task_list[0].id
current_desc = task_list[0].description
print("executing alarm: {}".format(task_list[0].alarm))
# if there is a function, execute it
if task_list[0].function is not None:
task_list[0].function()
# play the sound if sound is True
if task_list[0].sound:
# play sound
my_sound = Playme()
my_sound.play()
# remove current alarm from the original task_list
self.bzoinq_obj.remove_task(current_id)
print("alarm is done {}".format(current_desc))
# help function
def to_datetime(sometime):
"""converts Y-M-D 00:00:00 (string) time input to datetime"""
try:
my_datetime = datetime.datetime.strptime(sometime, '%Y-%m-%d %H:%M:%S')
except ValueError:
print("Incorrect time. Please use Y-M-D 00:00:00 format.")
raise ValueError
return my_datetime
|
Python
| 0.000001
|
@@ -132,16 +132,34 @@
reading%0A
+import subprocess%0A
%0A%0A@total
@@ -304,16 +304,24 @@
function
+, notify
):%0A
@@ -431,24 +431,24 @@
= function%0A
-
self
@@ -461,16 +461,45 @@
= sound
+%0A self.notify = notify
%0A%0A de
@@ -1867,33 +1867,106 @@
w(),
- sound=True, function=Non
+%0A sound=True,%0A function=None,%0A notify=Tru
e):%0A
@@ -2171,16 +2171,24 @@
function
+, notify
)%0A
@@ -5376,16 +5376,132 @@
d.play()
+%0A if task_list%5B0%5D.notify:%0A subprocess.Popen(%5B'notify-send', current_desc%5D)
%0A%0A
|
c3587c23f6a5f34cf7bdc0a88b4057381f7752ac
|
add cascade delete table
|
flaskutils/test.py
|
flaskutils/test.py
|
from flaskutils import app
from .models import FlaskModel
from pgsqlutils.base import syncdb, Session
class ModelTestCase(object):
def setup(self):
"""
Use this test case when no interaction in a view is required
"""
syncdb()
def teardown(self):
Session.rollback()
Session.close()
class TransactionalTestCase(object):
"""
This tests should be used when testing views
"""
def setup(self):
self.client = app.test_client()
self.json_request_headers = {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
syncdb()
def teardown(self):
for t in FlaskModel.metadata.sorted_tables:
sql = 'delete from {};'.format(t.name)
Session.execute(sql)
Session.commit()
Session.close()
class ApiTestCase(object):
"""
Instanciates an http client ready to make json requests and get
json responses, it doesn't instanciate a database connection
"""
def setup(self):
self.client = app.test_client()
self.json_request_headers = {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
|
Python
| 0.000001
|
@@ -762,16 +762,24 @@
from %7B%7D
+ cascade
;'.forma
|
750195a169edf643f66e072e0cbde154a787b62c
|
Fix a bunch of unicode problems in logging.
|
floo/common/msg.py
|
floo/common/msg.py
|
import os
import time
try:
from . import shared as G
assert G
unicode = str
python2 = False
except ImportError:
python2 = True
import shared as G
LOG_LEVELS = {
'DEBUG': 1,
'MSG': 2,
'WARN': 3,
'ERROR': 4,
}
LOG_LEVEL = LOG_LEVELS['MSG']
LOG_FILE = os.path.join(G.BASE_DIR, 'msgs.floobits.log')
try:
fd = open(LOG_FILE, 'w')
fd.close()
except Exception as e:
pass
# Overridden by each editor
def editor_log(msg):
print(msg)
class MSG(object):
def __init__(self, msg, timestamp=None, username=None, level=LOG_LEVELS['MSG']):
self.msg = msg
self.timestamp = timestamp or time.time()
self.username = username
self.level = level
def display(self):
if self.level < LOG_LEVEL:
return
msg = unicode(self)
if G.LOG_TO_CONSOLE or G.CHAT_VIEW is None:
# TODO: ridiculously inefficient
try:
fd = open(LOG_FILE, 'a+')
fd.write(msg)
fd.write('\n')
fd.close()
except Exception as e:
print(unicode(e))
print(msg)
else:
editor_log(msg)
def __str__(self):
if python2:
return self.__unicode__().encode('utf-8')
return self.__unicode__()
def __unicode__(self):
if self.username:
msg = '[{time}] <{user}> {msg}'
else:
msg = '[{time}] {msg}'
return unicode(msg).format(user=self.username, time=time.ctime(self.timestamp), msg=self.msg)
def msg_format(message, *args, **kwargs):
message += ' '.join([unicode(x) for x in args])
if kwargs:
message = unicode(message).format(**kwargs)
return message
def _log(message, level, *args, **kwargs):
if level >= LOG_LEVEL:
# TODO: kill MSG class and just format and print the thing right away
MSG(msg_format(message, *args, **kwargs), level=level).display()
def debug(message, *args, **kwargs):
_log(message, LOG_LEVELS['DEBUG'], *args, **kwargs)
def log(message, *args, **kwargs):
_log(message, LOG_LEVELS['MSG'], *args, **kwargs)
def warn(message, *args, **kwargs):
_log(message, LOG_LEVELS['WARN'], *args, **kwargs)
def error(message, *args, **kwargs):
_log(message, LOG_LEVELS['ERROR'], *args, **kwargs)
|
Python
| 0.999987
|
@@ -334,16 +334,17 @@
log')%0A%0A%0A
+%0A
try:%0A
@@ -418,16 +418,546 @@
pass%0A%0A%0A
+def safe_print(msg):%0A # Some environments can have trouble printing unicode:%0A # %22When print() is not outputting to the terminal (being redirected to%0A # a file, for instance), print() decides that it does not know what%0A # locale to use for that file and so it tries to convert to ASCII instead.%22%0A # See: https://pythonhosted.org/kitchen/unicode-frustrations.html#frustration-3-inconsistent-treatment-of-output%0A try:%0A print(msg)%0A except UnicodeEncodeError:%0A print(msg.encode('utf-8'))%0A%0A%0A%0A
# Overri
@@ -997,24 +997,29 @@
g(msg):%0A
+safe_
print(msg)%0A%0A
@@ -1654,24 +1654,29 @@
+safe_
print(unicod
@@ -1685,32 +1685,37 @@
e))%0A
+safe_
print(msg)%0A
@@ -2022,32 +2022,49 @@
%5B%7Btime%7D%5D %7Bmsg%7D'%0A
+ try:%0A
return u
@@ -2153,103 +2153,357 @@
sg)%0A
-%0A%0Adef msg_format(message, *args, **kwargs):%0A message += ' '.join(%5Bunicode(x) for x in
+ except UnicodeEncodeError:%0A return unicode(msg).format(user=self.username, time=time.ctime(self.timestamp), msg=self.msg.encode(%0A 'utf-8'))%0A%0A%0Adef msg_format(message, *args, **kwargs):%0A for arg in args:%0A try:%0A message += unicode(arg)%0A except UnicodeEncodeError:%0A message +=
arg
-s%5D)
%0A
@@ -2536,24 +2536,15 @@
e =
-unicode(
message
-)
.for
|
e8f941aca9a111eb81c41e0be3a0c6591386083c
|
change way to get if celery should be used
|
flowjs/settings.py
|
flowjs/settings.py
|
from django.conf import settings
# Media path where the files are saved
FLOWJS_PATH = getattr(settings, "FLOWJS_PATH", 'flowjs/')
# Remove the upload files when the model is deleted
FLOWJS_REMOVE_FILES_ON_DELETE = getattr(settings, "FLOWJS_REMOVE_FILES_ON_DELETE", True)
# Remove temporary chunks after file have been upload and created
FLOWJS_AUTO_DELETE_CHUNKS = getattr(settings, "FLOWJS_AUTO_DELETE_CHUNKS", True)
# Time in days to remove non completed uploads
FLOWJS_EXPIRATION_DAYS = getattr(settings, "FLOWJS_EXPIRATION_DAYS", 1)
# When flowjs should join files in background. Options: 'none', 'media' (audio and video), 'all' (all files).
FLOWJS_JOIN_CHUNKS_IN_BACKGROUND = getattr(settings, "FLOWJS_JOIN_CHUNKS_IN_BACKGROUND", 'none')
# Check if FLOWJS should use Celery
FLOWJS_WITH_CELERY = 'celery' in settings.INSTALLED_APPS
|
Python
| 0.000001
|
@@ -804,39 +804,50 @@
Y =
-'celery' in settings.INSTALLED_APPS
+getattr(settings, %22FLOWJS_USE_CELERY%22, False)%0A
|
c2798702a1f2b1dc40c10b481b9989f9a86c71b2
|
Fix indentation error in some helpers
|
helpers/fix_fathatan.py
|
helpers/fix_fathatan.py
|
# -*- coding: utf-8 -*-
import os
import re
import argparse
def fix_fathatan(file_path):
with open(file_path, 'r') as file:
lines = file.readlines()
new_lines = []
for line in lines:
new_lines.append(re.sub(r'اً', 'ًا', line))
file_path = file_path.split(os.sep)
file_path[-1] = 'fixed_' + file_path[-1]
file_path = os.sep.join(file_path)
with open(file_path, 'w') as file:
file.write(''.join(new_lines))
print(file_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Changes after-Alif fathatan to before-Alit fathatan')
parser.add_argument('-in', '--file-path', help='File path to fix it', required=True)
args = parser.parse_args()
fix_fathatan(args.file_path)
|
Python
| 0.000017
|
@@ -481,9 +481,10 @@
_':%0A
-%09
+
pars
|
080bccbf8b61edf9f3c9da4163497cc82e287db0
|
Update roll.py
|
roll.py
|
roll.py
|
import random, re
#Update schema
__url__ = 'https://raw.githubusercontent.com/KittyHawkIrc/modules/production/' + __name__ + '.py'
__version__ = 1.0
# global variable for maximum length
max_len = 378
# declare trigger
def declare():
return {'roll': 'privmsg'}
# response to trigger
def callback(self):
if "joint" in self.message.lower():
return self.msg(self.channel, "See ^4/20")
try:
# find search pattern in self.message
match = match_roll(self, self.message)
# return roll result
value = roll(self, match[0], match[1])
return self.msg(self.channel, unicode(value))
except Exception as e:
# return error
return self.msg(self.channel, e)
# roll(rolls, sides) takes integers rolls and sides, and returns a random number
# from rolls to (rolls * sides), except when result is greater than global var
# max_len
# roll: Int Int => Int
def roll(self, rolls, sides):
roll_sum = 0
for roll in rolls:
roll_sum += random.randint(sides) + 1
# raise error if length of roll_sum is greater than maximum allowed length
if len(str(roll_sum)) > max_len:
raise ValueError('Overflow!')
return roll_sum
# match_roll(input_string) takes string input_string, attempts to match regex
# '\d+d\d+' (#d#, where # are digits of len 1 or greater), and returns a tuple
# containing the numbers before and after the 'd'.
# match_roll: Str => (Int, Int)
def match_roll(self, input_string):
# compile regex to case insensitively match digits, 'd', and more digits
input_format = re.compile('\d+d\d+', re.IGNORECASE)
# match input_string to imput_format
match = input_format.search(input_string)
# make sure match is found
if match:
# return result of match
roll_input = re.findall(r'\d+', match.group())
# return match results as tuple
return (int(roll_input[0]), int(roll_input[1]))
else:
return self.msg(self.channel, "Invalid input!")
# test class
class api:
def msg(self, channel, text):
return text
# run tests if main program
if __name__ == "__main__":
api = api()
setattr(api, 'isop', True)
setattr(api, 'type', 'privmsg')
setattr(api, 'command', 'roll')
setattr(api, 'user', 'nick!ident@host')
setattr(api, 'channel', '#test')
# check normal die roll
setattr(api, 'message', '^roll 5d20')
print(callback(api))
if int(callback(api)) < 5 or int(callback(api)) > 5*20:
print ('5d20 failed')
exit(1)
# check when num_rolls is 0
setattr(api, 'message', '^roll 0d20')
print(callback(api))
if int(callback(api)) != 0:
print ('0d20 failed')
exit(2)
# check when num_sides is 0
setattr(api, 'message', '^roll 5d0')
print(callback(api))
if int(callback(api)) != 0:
print ('5d0 failed')
exit(3)
# check when num_rolls and num_sides are 0
setattr(api, 'message', '^roll 0d0')
print(callback(api))
if int(callback(api)) != 0:
print ('0d0 failed')
exit(4)
# check when num_rolls and num_sides are 0
setattr(api, 'message', '^roll joint')
print(callback(api))
if "4/20" not in callback(api):
print ('joint failed')
exit(5)
print ('All tests passed!')
|
Python
| 0.000001
|
@@ -415,22 +415,10 @@
%22)%0D%0A
- try:%0D%0A
+%0D%0A
@@ -460,20 +460,16 @@
ge%0D%0A
-
-
match =
@@ -500,28 +500,24 @@
ssage)%0D%0A
-
-
# return rol
@@ -526,20 +526,16 @@
result%0D%0A
-
valu
@@ -570,16 +570,135 @@
ch%5B1%5D)%0D%0A
+ # raise error if length of roll_sum is greater than maximum allowed length%0D%0A if len(str(roll_sum)) %3C= max_len:%0D%0A
@@ -753,27 +753,10 @@
e
-xcept Exception as
+ls
e:%0D%0A
@@ -817,17 +817,27 @@
hannel,
-e
+'Overflow!'
)%0D%0A%0D%0A# r
@@ -952,68 +952,8 @@
des)
-, except when result is greater than global var%0D%0A# max_len
%0D%0A#
@@ -1102,167 +1102,8 @@
%0D%0A%0D%0A
- # raise error if length of roll_sum is greater than maximum allowed length%0D%0A if len(str(roll_sum)) %3E max_len:%0D%0A raise ValueError('Overflow!')%0D%0A%0D%0A
|
5398f356cab1e98673c253849a1de2bb76fc537a
|
move lapse archival to staging
|
scripts/util/autolapses2box.py
|
scripts/util/autolapses2box.py
|
"""Send autolapse tar files to box for archival.
Run from RUN_MIDNIGHT.sh for the previous date"""
import datetime
import os
import stat
import glob
from pyiem.box_utils import sendfiles2box
def main():
"""Run for the previous date, please"""
valid = datetime.date.today() - datetime.timedelta(days=1)
now = datetime.datetime.now()
os.chdir("/mesonet/share/lapses/auto")
localfns = []
for tarfilename in glob.glob("*frames.tar"):
# Make sure this file was generated yesterday and not old.
mtime = os.stat(tarfilename)[stat.ST_MTIME]
age = float(now.strftime("%s")) - mtime
if age > 86400.0:
continue
localfns.append(tarfilename)
if not localfns:
print("autolapses2box found no files within the past day?")
return
remotepath = valid.strftime("/iemwebcams/auto/%Y/%m/%d")
res = sendfiles2box(remotepath, localfns)
for sid, fn in zip(res, localfns):
if sid is None:
print("failed to upload %s" % (fn,))
if __name__ == "__main__":
main()
|
Python
| 0
|
@@ -24,19 +24,23 @@
iles to
-box
+staging
for arc
@@ -113,16 +113,34 @@
atetime%0A
+import subprocess%0A
import o
@@ -181,25 +181,20 @@
iem.
-box_
util
-s
import
send
@@ -189,29 +189,38 @@
import
-sendfiles2box
+logger%0A%0ALOG = logger()
%0A%0A%0Adef m
@@ -761,31 +761,19 @@
-print(%22autolapses2box f
+LOG.info(%22F
ound
@@ -855,16 +855,22 @@
ftime(%22/
+stage/
iemwebca
@@ -896,160 +896,228 @@
-res = sendfiles2box(remotepath, localfns)%0A for sid, fn in zip(res, localfns):%0A if sid is None:%0A print(%22failed to upload %25s%22 %25 (fn,)
+cmd = (%0A 'rsync -a --rsync-path %22mkdir -p %25s && rsync%22 %25s '%0A %22mesonet@metl60.agron.iastate.edu:%25s%22%0A ) %25 (remotepath, %22 %22.join(localfns), remotepath)%0A LOG.debug(cmd)%0A subprocess.call(cmd, shell=True
)%0A%0A%0A
|
f77d3728f5bcf34c396d5855054c679efa908f16
|
Update field names for school query (#731)
|
salesforce/management/commands/update_schools.py
|
salesforce/management/commands/update_schools.py
|
from django.core.management.base import BaseCommand
from salesforce.models import School
from salesforce.salesforce import Salesforce
class Command(BaseCommand):
help = "update schools from salesforce.com"
def handle(self, *args, **options):
with Salesforce() as sf:
query = "SELECT Name, Phone, " \
"Website, " \
"Type, " \
"K_I_P__c, " \
"Achieving_the_Dream_School__c, " \
"HBCU__c, " \
"Texas_Higher_Ed__c, " \
"Approximate_Enrollment__c, " \
"Pell_Grant_Recipients__c, " \
"Students_Pell_Grant__c, " \
"Current_Students__c, " \
"All_Time_Students2__c, " \
"Current_Savings__c, " \
"All_Time_Savings2__c, " \
"BillingStreet, " \
"BillingCity, " \
"BillingState, " \
"BillingPostalCode, " \
"BillingCountry, " \
"Address_Latitude__c, " \
"Address_Longitude__c," \
"Testimonial__c," \
"Testimonial_Name__c, " \
"Testimonial_Position__c, " \
"Number_of_Adoptions__c FROM Account"
response = sf.query_all(query)
sf_schools = response['records']
if sf_schools:
School.objects.all().delete()
updated_schools = 0
for sf_school in sf_schools:
if sf_school["Number_of_Adoptions__c"] > 0:
school, created = School.objects.update_or_create(name=sf_school['Name'],
phone=sf_school['Phone'],
website=sf_school['Website'],
type=sf_school['Type'],
key_institutional_partner=sf_school['K_I_P__c'],
achieving_the_dream_school=sf_school['Achieving_the_Dream_School__c'],
hbcu=sf_school['HBCU__c'],
texas_higher_ed=sf_school['Texas_Higher_Ed__c'],
undergraduate_enrollment=sf_school['Approximate_Enrollment__c'],
pell_grant_recipients=sf_school['Pell_Grant_Recipients__c'],
percent_students_pell_grant=sf_school['Students_Pell_Grant__c'],
current_year_students=sf_school['Current_Students__c'],
all_time_students=sf_school['All_Time_Students2__c'],
current_year_savings=sf_school['Current_Savings__c'],
all_time_savings=sf_school['All_Time_Savings2__c'],
physical_country=sf_school['BillingCountry'],
physical_street=sf_school['BillingStreet'],
physical_city=sf_school['BillingCity'],
physical_state_province=sf_school['BillingState'],
physical_zip_postal_code=sf_school['BillingPostalCode'],
long=sf_school['Address_Latitude__c'],
lat=sf_school['Address_Longitude__c'],
testimonial=sf_school['Testimonial__c'],
testimonial_name=['Testimonial_Name__c'],
testimonial_position=['Testimonial_Position__c'])
school.save()
updated_schools = updated_schools + 1
response = self.style.SUCCESS("Successfully updated {} schools".format(updated_schools))
self.stdout.write(response)
|
Python
| 0
|
@@ -752,32 +752,24 @@
%22
-Current_
Students
__c, %22 %5C
@@ -756,24 +756,37 @@
%22Students
+_Current_Year
__c, %22 %5C%0A
@@ -855,24 +855,32 @@
%22
+Savings_
Current_
Savings_
@@ -871,23 +871,20 @@
Current_
-Savings
+Year
__c, %22 %5C
@@ -3107,24 +3107,16 @@
ol%5B'
-Current_
Students
__c'
@@ -3107,24 +3107,37 @@
ol%5B'Students
+_Current_Year
__c'%5D,%0A
@@ -3357,24 +3357,32 @@
school%5B'
+Savings_
Current_
Savings_
@@ -3373,23 +3373,20 @@
Current_
-Savings
+Year
__c'%5D,%0A
|
5bd8ac0fd94458836d3abbbac693b3970136d028
|
use LifoQueue for block replay
|
mediachain/transactor/blockchain_follower.py
|
mediachain/transactor/blockchain_follower.py
|
import threading
import time
from base58 import b58encode
from Queue import Queue, Empty as QueueEmpty
from mediachain.transactor.block_cache import get_block_cache
from mediachain.proto import Transactor_pb2 # pylint: disable=no-name-in-module
from mediachain.datastore.utils import ref_base58
from mediachain.rpc.utils import with_retry
from grpc.beta.interfaces import StatusCode
from grpc.framework.interfaces.face.face import AbortionError
class BlockchainFollower(object):
def __init__(self,
stream_func,
catchup = True,
last_known_block_ref=None,
block_cache = None,
event_map_fn = None,
max_retry=20):
if block_cache is None:
block_cache = get_block_cache()
self.max_retry = max_retry
self.cache = block_cache
self.stream_func = stream_func
self.block_ref_queue = Queue()
self.incoming_event_queue = Queue()
self.block_replay_queue = Queue()
self.catchup_begin = threading.Event()
self.catchup_complete = threading.Event()
self.cancel_flag = threading.Event()
self.should_catchup = catchup
self.last_known_block_ref = ref_base58(last_known_block_ref)
if event_map_fn is None:
self.event_map_fn = lambda x: x
else:
self.event_map_fn = event_map_fn
self.catchup_thread = threading.Thread(
name='blockchain-catchup',
target=self._perform_catchup)
self.incoming_event_thread = threading.Thread(
name='journal-stream-listener',
target=self._receive_incoming_events)
self._event_iterator = self._event_stream()
def __iter__(self):
return self
def next(self):
return next(self._event_iterator)
def __enter__(self):
return self
def __exit__(self, *args):
self.cancel()
return False
def start(self):
self.catchup_thread.start()
self.incoming_event_thread.start()
def cancel(self):
# print('BlockchainFollower cancel')
self.cancel_flag.set()
def _clear_queues(self):
if self.cancel_flag.is_set():
return
with self.block_ref_queue.mutex:
self.block_ref_queue.queue.clear()
with self.block_replay_queue.mutex:
self.block_replay_queue.queue.clear()
with self.incoming_event_queue.mutex:
self.incoming_event_queue.queue.clear()
def _perform_catchup(self):
if not self.should_catchup:
return
while True:
if self.cancel_flag.is_set():
return
# block until the event consumer thread tells us to start the
# blockchain catchup process
self.catchup_begin.wait()
ref = self.block_ref_queue.get()
if self.cancel_flag.is_set():
return
if ref_base58(ref) == self.last_known_block_ref:
print('hit last known block: {}'.format(
self.last_known_block_ref
))
self.catchup_complete.set()
continue
block = self.cache.get(ref)
if block is None:
# FIXME: we need to handle this better
# should throw or otherwise signal that something went wrong
print('Could not get block with ref {}'.format(ref))
return
self.block_replay_queue.put(ref)
chain = chain_ref(block)
if chain is None:
# print('Reached genesis block {}'.format(ref))
self.catchup_complete.set()
continue
self.block_ref_queue.put(chain)
def _receive_incoming_events(self):
def event_receive_worker():
# first clear out all the queues, and event state,
# in case we're being called from the retry helper after a stream
# interruption
self.catchup_complete.clear()
self.catchup_begin.clear()
self._clear_queues()
first_event_received = False
try:
stream = self.stream_func()
for event in stream:
if self.cancel_flag.is_set():
stream.cancel()
return
if not first_event_received:
first_event_received = True
block_ref = block_event_ref(event)
if block_ref is None:
self.catchup_complete.set()
else:
self.block_ref_queue.put(block_ref)
self.catchup_begin.set()
self.incoming_event_queue.put(event)
except AbortionError as e:
if self.cancel_flag.is_set() and e.code == StatusCode.CANCELLED:
return
else:
raise
with_retry(event_receive_worker, max_retry_attempts=self.max_retry)
def _event_stream(self):
while True:
if self.cancel_flag.is_set():
return
# wait until catchup process signals that it's complete
# this will be immediate if catchup is not in progress
self.catchup_complete.wait()
# get all values from the catchup queue and yield their entries
while not self.block_replay_queue.empty():
if self.cancel_flag.is_set():
return
block_ref = self.block_replay_queue.get()
print('Replaying block: {}'.format(block_ref))
block = self.cache.get(block_ref)
entries = block.get('entries', [])
for e in entries:
e = self.event_map_fn(block_event_to_rpc_event(e))
if e is not None:
yield e
self.last_known_block_ref = block_ref
block_event = Transactor_pb2.JournalEvent()
block_event.journalBlockEvent.reference = ref_base58(block_ref)
block_event = self.event_map_fn(block_event)
if block_event is not None:
yield block_event
if self.cancel_flag.is_set():
return
# Try to pull an event off of the incoming event queue
# If there's no event received within a second, loop back.
try:
e = self.incoming_event_queue.get(block=False, timeout=1)
block_ref = block_event_ref(e)
if block_ref is not None:
self.last_known_block_ref = block_ref
e = self.event_map_fn(e)
if e is not None:
yield e
except QueueEmpty:
pass
def chain_ref(block):
try:
ref_bytes = bytes(block['chain']['@link'])
return b58encode(ref_bytes)
except (KeyError, ValueError):
return None
def block_event_ref(rpc_event):
if rpc_event.WhichOneof('event') != 'journalBlockEvent':
return None
return rpc_event.journalBlockEvent.reference
def block_event_to_rpc_event(event):
rpc_event = Transactor_pb2.JournalEvent()
if event['type'] == 'insert':
rpc_event.insertCanonicalEvent.reference = ref_base58(event['ref'])
elif event['type'] == 'update':
rpc_event.updateChainEvent.canonical.reference = ref_base58(
event['ref'])
rpc_event.updateChainEvent.chain.reference = ref_base58(event['chain'])
if 'chainPrevious' in event:
rpc_event.updateChainEvent.chainPrevious.reference = ref_base58(
event['chainPrevious'])
else:
raise ValueError('unknown journal event type: {}'.format(
event['type']
))
return rpc_event
|
Python
| 0
|
@@ -75,16 +75,27 @@
t Queue,
+ LifoQueue,
Empty a
@@ -1016,32 +1016,36 @@
_replay_queue =
+Lifo
Queue()%0A
|
97a490db75f0a4976199365c3f654ba8cdb9a781
|
Test zip, and print format
|
01_Built-in_Types/tuple.py
|
01_Built-in_Types/tuple.py
|
#!/usr/bin/env python
import sys
import pickle
# Check argument
if len(sys.argv) != 2:
print("%s filename" % sys.argv[0])
raise SystemExit(1)
# Write tuples
file = open(sys.argv[1], "wb");
line = []
while True:
print("Enter name, age, score (ex: zzz, 16, 90) or quit");
line = sys.stdin.readline()
if line == "quit\n":
break
raws = line.split(",")
name = raws[0]
age = int(raws[1])
score = int(raws[2])
record = (name, age, score)
pickle.dump(record, file)
file.close()
# Read back
file = open(sys.argv[1], "rb");
while True:
try:
record = pickle.load(file)
print record
name, age, score= record
print("name = %s" % name)
print("name = %d" % age)
print("name = %d" % score)
except (EOFError):
break
file.close()
|
Python
| 0
|
@@ -41,16 +41,202 @@
pickle%0A%0A
+# Test zip, and format in print%0Anames = %5B%22xxx%22, %22yyy%22, %22zzz%22%5D%0Aages = %5B18, 19, 20%5D%0A%0Apersons = zip(names, ages)%0A%0Afor name, age in persons:%0A print %22%7B0%7D's age is %7B1%7D%22.format(name, age)%0A%0A
# Check
|
ba6ef8c9f0881e7236063d5372f64656df1b4bf0
|
rename package from 'motion_control' to 'kinesis'
|
msl/equipment/resources/thorlabs/__init__.py
|
msl/equipment/resources/thorlabs/__init__.py
|
"""
Wrappers around APIs from Thorlabs.
"""
from .motion_control.motion_control import MotionControl
from .motion_control.callbacks import MotionControlCallback
|
Python
| 0.000181
|
@@ -43,30 +43,23 @@
%22%0Afrom .
-motion_control
+kinesis
.motion_
@@ -93,30 +93,23 @@
l%0Afrom .
-motion_control
+kinesis
.callbac
|
ce56148d04725d3c9c5fd12fb42c44d05d41f774
|
append more option flags in goober's output
|
goober.py
|
goober.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from nose.plugins.base import Plugin
from nose.case import Test
import logging
import unittest
import os
log = logging.getLogger(__name__)
class Goober(Plugin):
"""
After a multiprocess test run, print out a one-line command that will rerun all the failed / error'd tests
"""
name = "goober"
score = 2
enableOpt = "goober"
activate = "--goober"
def __init__(self):
super(Goober, self).__init__()
def options(self, parser, env):
parser.add_option('--goober',
action='store_true',
help="print failed test paths: %s" %
(self.help()))
parser.add_option('--goober-prefix',
action='store',
type='string',
dest='prefix',
help="Environment variables to prepend to goober's output. For example, --goober-prefix='LOCALE' will attach 'LOCALE=<os.environ.get('LOCALE')> to 'nosetests -v --goober'")
super(Goober, self).options(parser, env)
def configure(self, options, conf):
super(Goober, self).configure(options, conf)
self.prefix = ''
if not options.prefix:
return
self.env_vars = options.prefix.split(',')
if self.env_vars:
self.assemble_prefix()
def assemble_prefix(self):
self.prefix += ' '.join(["%s=%s" % (var, os.environ.get(var)) for var in self.env_vars if os.environ.get(var) is not None]) + ' '
def get_output(self, test):
try:
info = test.test
except AttributeError:
info = test._id
bits = str(info).split('.')
testname = bits.pop()
path = "/".join(bits)
path += ".py:%s" % testname
return path
def determine_test_path(self, problem):
"""
When we receive a multiprocess failure/error object, there is a stacktrace and an id. The id is a string representation of the path to the test that failed. But, no determination is made about whether the test that failed was part of a test class, or a standalone test.
We will break that id into pieces, splitting on '.' - we will re-assemble that id as a path, from the begnning, and search for it in the stacktrace. The first time we don't find it marks the end of the file path, and the beginning of the name of the test class/specific test. We keep the dots separating that part.
"""
test, trace = problem
try:
test_id = test.id()
except AttributeError:
test_id = test._id
test_bits = str(test_id).split('.')
path = ''
while path in trace:
bit = test_bits.pop(0)
path += bit + "/"
path = path.rstrip('/') + '.py'
path = path + ':' + '.'.join(test_bits)
return path
def finalize(self, result):
if not result.errors and not result.failures:
print "ALL GOOD!"
return
problems = []
for error in result.errors:
problems.append(self.determine_test_path(error))
for failure in result.failures:
problems.append(self.determine_test_path(failure))
print "YOU SHOULD RE-RUN:"
msg = "nosetests -v --goober "
if self.prefix:
msg = self.prefix + msg + '--goober-prefix=' + ','.join(self.env_vars) + ' '
print msg + ' '.join(problems)
|
Python
| 0.000002
|
@@ -1082,16 +1082,296 @@
ober'%22)%0A
+ parser.add_option('--goober-extra',%0A action='store',%0A help=%22append additional nose options to goober's output (give comma separated). For example: --goober-extra=--with-xunit,--with-xcover,--xcoverage-file=coverage.xml%22)%0A%0A
@@ -1690,16 +1690,125 @@
prefix()
+%0A if options.goober_extra:%0A self.extra_options = str(options.goober_extra).replace(%22,%22,%22 %22)
%0A%0A de
@@ -3876,16 +3876,105 @@
) + ' '%0A
+ if self.extra_options:%0A msg += str(self.extra_options) + ' '%0A %0A
|
89fbd8ba88edfcbc4877f06434098f628cbc3f9d
|
Fix client
|
src/client.py
|
src/client.py
|
#!/usr/bin/env python3
import time
from scapy.all import ls
from tuntap import TunThread
import dns
import query
from packet import Packet
addr = '192.168.33.10'
hostname = 'vpn.bgpat.net'
class VPNClient(TunThread):
daemon = True
name = 'tun_client'
addr = '192.168.200.2'
gateway = '192.168.200.1'
def receive(self, data):
pkt = Packet(data, hostname=hostname)
# print(pkt)
ini = query.TxInitialize(
hostname=hostname,
id=pkt.id,
count=pkt.count
)
# print(ini)
client = dns.Client(addr=addr, type='A', data={'value': bytes(ini)})
if query.Error(client.response.an.rdata).decode() is not None:
return
res = query.Ok(client.response.an.rdata).decode()
if res['count'] != res['sequence']:
return
# while len(pkt):
seq = list(pkt.keys())[0]
send = query.TxSend(data=pkt[seq], sequence=seq, id=pkt.id,
hostname=hostname)
# print(send)
data = {
'value': bytes(send)
}
c = dns.Client(addr=addr, type='A', data=data)
# print(c.response.an.rdata)
tun = VPNClient()
tun.start()
pool = {}
while True:
poll = query.Polling(hostname=hostname, padding=253-len(hostname))
cl = dns.Client(addr=addr, type='A', data={'value': bytes(poll)})
rdata = cl.response.an.rdata
if isinstance(rdata, bytes):
rdata = rdata.decode('utf8')
if query.Error(cl.response.an.rdata).decode() is not None:
print('noop')
time.sleep(1)
continue
while True:
ls(cl.response)
params = query.RxInitialize(rdata).decode()
if params is not None and params['id'] not in pool:
pkt = Packet(params['count'])
id = params['id']
pool[id] = pkt
seq = 0
else:
id = params['id']
pkt = pool[id]
seq = params['sequence']
pkt[seq] = params['data']
if len(pkt) == pkt.count:
tun.send(pkt.unpack())
del pool[id]
break
recv = query.Receive(hostname=hostname, sequence=seq, id=id, padding=10)
cl = dns.Client(addr=addr, type='A', data={'value': bytes(recv)})
|
Python
| 0.000001
|
@@ -50,16 +50,20 @@
l import
+ IP,
ls%0Afrom
@@ -551,20 +551,46 @@
- #
print(
+'
ini
+tialize', ini, ini.__dict__
)%0A
@@ -676,42 +676,182 @@
-if query.Error(client.response.an.
+while len(pkt):%0A rdata = client.response.an.rdata%0A if isinstance(rdata, bytes):%0A rdata = rdata.decode('utf8')%0A if query.Error(
rdat
@@ -879,39 +879,47 @@
ne:%0A
+
return%0A
+
res = qu
@@ -925,35 +925,16 @@
uery.Ok(
-client.response.an.
rdata).d
@@ -933,32 +933,36 @@
rdata).decode()%0A
+
if res%5B'
@@ -1005,30 +1005,55 @@
-return%0A # while
+ del pkt%5Bres%5B'sequence'%5D%5D%0A if
len
@@ -1049,34 +1049,70 @@
if len(pkt)
+ == 0
:%0A
+ return%0A
seq = li
@@ -1129,16 +1129,20 @@
s())%5B0%5D%0A
+
@@ -1221,32 +1221,36 @@
+
+
hostname=hostnam
@@ -1260,26 +1260,8 @@
- # print(send)%0A
@@ -1285,16 +1285,20 @@
+
+
'value':
@@ -1318,16 +1318,20 @@
+
%7D%0A
@@ -1332,17 +1332,73 @@
-c
+ print('send', send, send.__dict__)%0A client
= dns.C
@@ -1439,45 +1439,8 @@
ta)%0A
- # print(c.response.an.rdata)%0A
%0A%0Atu
@@ -1797,32 +1797,34 @@
ot None:%0A
+ #
print('noop')%0A
@@ -1841,16 +1841,18 @@
e.sleep(
+0.
1)%0A
@@ -1883,32 +1883,8 @@
ue:%0A
- ls(cl.response)%0A
@@ -1923,32 +1923,67 @@
rdata).decode()%0A
+ print('recv', rdata, pool)%0A
if param
@@ -2159,16 +2159,110 @@
else:%0A
+ params = query.RxSend(rdata).decode()%0A print('rx send', params, rdata)%0A
@@ -2393,33 +2393,123 @@
-if len(pkt) == pkt.count:
+print('recv pkt', pkt.__dict__, len(pkt), pkt)%0A if len(pkt) == pkt.count:%0A # ls(IP(pkt.unpack()))
%0A
@@ -2734,12 +2734,127 @@
tes(recv)%7D)%0A
+ rdata = cl.response.an.rdata%0A if isinstance(rdata, bytes):%0A rdata = rdata.decode('utf8')%0A
|
a68f9e0e7f9d99e0052c6c01395dbb131c052797
|
remove k4
|
esproxy/views.py
|
esproxy/views.py
|
from django.http import HttpResponse, HttpResponseRedirect
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
from django.shortcuts import render_to_response
from settings import ELASTICSEARCH_PROXY, ELASTICSEARCH_REAL
def login_or_404(func):
def inner(*args, **karags):
request = args[0]
if request.user.is_authenticated():
return func(*args, **karags)
else:
return HttpResponseRedirect("/es/")
return inner
#@login_or_404
@csrf_exempt
def elasticsearch(request):
fullpath = request.get_full_path()
fullpath = fullpath[len(ELASTICSEARCH_PROXY):]
response = HttpResponse()
response['X-Accel-Redirect'] = ELASTICSEARCH_REAL + '/' + fullpath
return response
@login_required
def home(request):
html = open('templates/index.html').read()
return HttpResponse(html)
|
Python
| 0.999691
|
@@ -1,20 +1,30 @@
+import os%0A
from django.http imp
@@ -324,16 +324,27 @@
RCH_REAL
+,KIBANA_DIR
%0A%0A%0Adef l
@@ -911,19 +911,33 @@
pen(
-'templates/
+os.path.join(KIBANA_DIR,%22
inde
@@ -942,17 +942,18 @@
dex.html
-'
+%22)
).read()
@@ -983,8 +983,9 @@
e(html)%0A
+%0A
|
8cd2332871bd246352f23f286ae459c2cf399a35
|
allow classifier parameter to be configurable
|
sklearn/sklearn-template/template/trainer/model.py
|
sklearn/sklearn-template/template/trainer/model.py
|
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from sklearn import compose
from sklearn import ensemble
from sklearn import impute
from sklearn import pipeline
from sklearn import preprocessing
import numpy as np
from trainer import metadata
def get_estimator(flags):
classifier = ensemble.RandomForestClassifier()
# TODO(cezequiel): Make use of flags for hparams
_ = flags
numeric_transformer = pipeline.Pipeline([
('imputer', impute.SimpleImputer(strategy='median')),
('scaler', preprocessing.StandardScaler()),
])
numeric_log_transformer = pipeline.Pipeline([
('imputer', impute.SimpleImputer(strategy='median')),
('log', preprocessing.FunctionTransformer(
func=np.log1p, inverse_func=np.expm1, validate=True)),
('scaler', preprocessing.StandardScaler()),
])
numeric_bin_transformer = pipeline.Pipeline([
('imputer', impute.SimpleImputer(strategy='median')),
('bin', preprocessing.KBinsDiscretizer(n_bins=5, encode='onehot-dense')),
])
categorical_transformer = pipeline.Pipeline([
('imputer', impute.SimpleImputer(
strategy='constant', fill_value='missing')),
('onehot', preprocessing.OneHotEncoder(handle_unknown='ignore')),
])
preprocessor = compose.ColumnTransformer([
('numeric', numeric_transformer, metadata.NUMERIC_FEATURES),
('numeric', numeric_log_transformer, metadata.NUMERIC_FEATURES),
('numeric', numeric_bin_transformer, metadata.NUMERIC_FEATURES),
('categorical', categorical_transformer, metadata.CATEGORICAL_FEATURES),
])
estimator = pipeline.Pipeline([
('preprocessor', preprocessor),
('classifier', classifier),
])
return estimator
|
Python
| 0.000001
|
@@ -904,118 +904,125 @@
:%0A
-classifier = ensemble.RandomForestClassifier()%0A%0A # TODO(cezequiel): Make use of flags for hparams%0A _ = flags
+# TODO: Allow pre-processing to be configurable through flags%0A classifier = ensemble.RandomForestClassifier(**flags)
%0A%0A
@@ -1836,16 +1836,30 @@
'ignore'
+, sparse=False
)),%0A %5D)
|
ad8ff0e8d280a8a0b3876382b63a1be4ad0784e5
|
increment version
|
version.py
|
version.py
|
# -*- coding: utf-8 -*-
import platform
name = "Fourth Evaz"
version = (0, 1, 9)
source = "https://github.com/shacknetisp/fourthevaz"
def gitstr():
try:
return "%s" % (open('.git/refs/heads/master').read().strip()[0:10])
except FileNotFoundError:
return ""
except IndexError:
return ""
def versionstr():
return "%d.%d.%d%s" % (version[0], version[1], version[2],
'-' + gitstr() if gitstr() else '')
def pythonversionstr():
return '{t[0]}.{t[1]}.{t[2]}'.format(t=platform.python_version_tuple())
def systemversionstr():
return platform.platform()
|
Python
| 0.000004
|
@@ -75,9 +75,10 @@
1,
-9
+10
)%0Aso
|
185f174b6c1d50ad51987765f42e078a6081e5d3
|
Remove semi-colon
|
06-pipeline/tf-06.py
|
06-pipeline/tf-06.py
|
#!/usr/bin/env python
import sys, re, operator, string
#
# The functions
#
def read_file(path_to_file):
"""
Takes a path to a file and returns the entire
contents of the file as a string
"""
with open(path_to_file) as f:
data = f.read()
return data
def filter_chars_and_normalize(str_data):
"""
Takes a string and returns a copy with all nonalphanumeric
chars replaced by white space
"""
pattern = re.compile('[\W_]+')
return pattern.sub(' ', str_data).lower()
def scan(str_data):
"""
Takes a string and scans for words, returning
a list of words.
"""
return str_data.split()
def remove_stop_words(word_list):
"""
Takes a list of words and returns a copy with all stop
words removed
"""
with open('../stop_words.txt') as f:
stop_words = f.read().split(',')
# add single-letter words
stop_words.extend(list(string.ascii_lowercase))
return [w for w in word_list if not w in stop_words]
def frequencies(word_list):
"""
Takes a list of words and returns a dictionary associating
words with frequencies of occurrence
"""
word_freqs = {}
for w in word_list:
if w in word_freqs:
word_freqs[w] += 1
else:
word_freqs[w] = 1
return word_freqs
def sort(word_freq):
"""
Takes a dictionary of words and their frequencies
and returns a list of pairs where the entries are
sorted by frequency
"""
return sorted(word_freq.items(), key=operator.itemgetter(1), reverse=True)
def print_all(word_freqs):
"""
Takes a list of pairs where the entries are sorted by frequency and print them recursively.
"""
if(len(word_freqs) > 0):
print(word_freqs[0][0], '-', word_freqs[0][1])
print_all(word_freqs[1:]);
#
# The main function
#
print_all(sort(frequencies(remove_stop_words(scan(filter_chars_and_normalize(read_file(sys.argv[1]))))))[0:25])
|
Python
| 0.999991
|
@@ -1830,9 +1830,8 @@
1:%5D)
-;
%0A%0A#%0A
|
e5963987e678926ad8cdde93e2551d0516a7686b
|
Increase timeout for bench_pictures on Android
|
slave/skia_slave_scripts/android_bench_pictures.py
|
slave/skia_slave_scripts/android_bench_pictures.py
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Run the Skia bench_pictures executable. """
from android_render_pictures import AndroidRenderPictures
from android_run_bench import DoBench
from bench_pictures import BenchPictures
from build_step import BuildStep
import sys
class AndroidBenchPictures(BenchPictures, AndroidRenderPictures):
def _DoBenchPictures(self, config, threads):
data_file = self._BuildDataFile(self._device_dirs.SKPPerfDir(), config,
threads)
args = self._PictureArgs(self._device_dirs.SKPDir(), config, threads)
DoBench(serial=self._serial,
executable='bench_pictures',
perf_data_dir=self._perf_data_dir,
device_perf_dir=self._device_dirs.SKPPerfDir(),
data_file=data_file,
extra_args=args)
def _Run(self):
self._PushSKPSources(self._serial)
super(AndroidBenchPictures, self)._Run()
if '__main__' == __name__:
sys.exit(BuildStep.RunBuildStep(AndroidBenchPictures))
|
Python
| 0.000011
|
@@ -479,16 +479,207 @@
tures):%0A
+ def __init__(self, args, attempts=1, timeout=4800):%0A super(AndroidBenchPictures, self).__init__(args, attempts=attempts,%0A timeout=timeout)%0A%0A
def _D
|
d4e890a16fcb155c6df78d378b3ba9429590c74b
|
fix test
|
src/unittest/python/aws/kms_tests.py
|
src/unittest/python/aws/kms_tests.py
|
import unittest2
from boto.kms.exceptions import InvalidCiphertextException
from cfn_sphere.aws.kms import KMS
from mock import patch
from cfn_sphere.exceptions import InvalidEncryptedValueException
class KMSTests(unittest2.TestCase):
@patch('cfn_sphere.aws.kms.kms.connect_to_region')
def test_decrypt_value(self, kms_mock):
kms_mock.return_value.decrypt.return_value = {'Plaintext': 'decryptedValue'}
self.assertEqual('decryptedValue', KMS().decrypt("ZW5jcnlwdGVkVmFsdWU="))
kms_mock.return_value.decrypt.assert_called_once_with("encryptedValue")
@patch('cfn_sphere.aws.kms.kms.connect_to_region')
def test_invalid_base64(self, kms_mock):
with self.assertRaises(InvalidEncryptedValueException):
KMS().decrypt("asdqwda")
@patch('cfn_sphere.aws.kms.kms.connect_to_region')
def test_invalid_kms_key(self, kms_mock):
kms_mock.return_value.decrypt.side_effect = InvalidCiphertextException("400", "Bad Request")
with self.assertRaises(InvalidEncryptedValueException):
KMS().decrypt("ZW5jcnlwdGVkVmFsdWU=")
|
Python
| 0.000002
|
@@ -1,12 +1,27 @@
+import base64%0A%0A
import unitt
@@ -578,24 +578,48 @@
ith(
-%22encryptedValue%22
+base64.b64decode(%22ZW5jcnlwdGVkVmFsdWU=%22)
)%0A%0A
|
4ca3fa3a6692facf72bcf73033c9f92053e1fb30
|
Make missing MSR descriptions a documentation-build-breaker
|
service/docs/source/_ext/geopm_rst_extensions.py
|
service/docs/source/_ext/geopm_rst_extensions.py
|
# Copyright (c) 2015 - 2022, Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
"""Add a Sphinx directive to import a json definition of MSR descriptions.
Examples:
* Render all MSRs in a json file
.. geopm-msr-json:: ../../src/msr_data_arch.json
* Render only signals
.. geopm-msr-json:: ../../src/msr_data_arch.json
:no-controls:
* Render only controls
.. geopm-msr-json:: ../../src/msr_data_arch.json
:no-signals:
"""
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx.util.docutils import SphinxDirective
from sphinx.util import logging
from sphinx import addnodes
import json
import re
logger = logging.getLogger(__name__)
MSR_REGEX = re.compile(r'(MSR::\w+:\w+)', flags=re.ASCII)
def create_msr_description_paragraph(text):
"""Create an MSR description in a docutils paragraph node. Automatically
wrap any named MSR signals/controls in hyperlinks to their descriptions.
"""
msr_definition_paragraph = nodes.paragraph()
for text_part in re.split(MSR_REGEX, text):
match = re.match(MSR_REGEX, text_part)
if match:
# This part of the description names an MSR, so wrap the text in a
# link to the expected docutils reference ID. The ID should exist
# if the mentioned MSR has a description in our documentation.
# Use Sphinx's pending_xref instead of docutils' reference so that
# Sphinx will automatically check for broken references.
# This does not emit a very helpful error message (it just says
# undefined label at the line where we are loading the json file).
# We can implement our own context-aware xref checking and error
# messaging if we create a set of sphinx roles/directives/domains,
# e.g., to define and xref arbitrary signals/controls. Maybe
# something nice to add later on.
ref_node = addnodes.pending_xref(
'', refdomain='std', refexplicit='False',
reftarget=nodes.make_id(match.group(0)), reftype='ref',
refwarn='False')
ref_node += nodes.Text(match.group(0))
msr_definition_paragraph += ref_node
else:
# This is not an MSR cross-reference, so just insert the text.
msr_definition_paragraph += nodes.Text(text_part)
return msr_definition_paragraph
class GeopmMsrJson(SphinxDirective):
has_content = False
required_arguments = 1
option_spec = {
'no-signals': directives.flag,
'no-controls': directives.flag
}
def run(self):
# Use relfn2path(...)[1] # to get its absolute path.
json_path = self.env.relfn2path(self.arguments[0])[1]
no_controls = 'no-controls' in self.options
no_signals = 'no-signals' in self.options
if no_controls and no_signals:
logger.error('Requested MSR information, but asked for neither signals nor controls.',
location=self.get_location())
# Notify Sphinx that changes to the json file should cause a rebuild
# of any docs that point this directive to that file.
self.env.note_dependency(json_path)
msr_documentation = dict()
try:
with open(json_path) as json_file:
msr_documentation = json.load(json_file)
except FileNotFoundError:
logger.error('Unable to read %s', json_path, location=self.get_location())
return []
except json.decoder.JSONDecodeError as e:
logger.error('Unable to parse %s. Error: %s', json_path, e, location=self.get_location())
return []
msr_list = nodes.definition_list()
for msr_base_name, msr_data in msr_documentation['msrs'].items():
for msr_field_name, msr_field_data in msr_data['fields'].items():
is_control = msr_field_data['writeable']
is_signal = not is_control
if (is_control and no_controls) or (is_signal and no_signals):
continue
geopm_msr_name = f'MSR::{msr_base_name}:{msr_field_name}'
msr_definition_body = nodes.definition()
# Create a global docutils ID so we can :ref:`msr-some-field` to it
docutils_id = nodes.make_id(geopm_msr_name)
self.env.app.env.domaindata["std"]["labels"][docutils_id] = (
self.env.docname, docutils_id, geopm_msr_name)
self.env.app.env.domaindata["std"]["anonlabels"][docutils_id] = (
self.env.docname, docutils_id)
# Signal/control names are formatted as ``literal`` terms in
# our documentation
msr_definition_name = nodes.term()
msr_definition_name += nodes.literal(text=geopm_msr_name, ids=[docutils_id], names=[docutils_id])
# Signal/control descriptions start with a paragraph containing
# the high-level description
try:
description_text = msr_field_data['description']
except KeyError:
description_text = f'TODO: Add a description to {self.arguments[0]}'
# TODO: Promote from 'info' to 'error' after we ratchet down the count
logger.info('Missing a description for %s in %s',
geopm_msr_name,
json_path, color='yellow')
msr_definition_body += create_msr_description_paragraph(description_text)
# The MSR description is followed by a list of properties.
msr_property_list = nodes.bullet_list()
description_items = [
('Aggregation', msr_field_data.get('aggregation', 'select_first')),
('Domain', msr_data['domain']),
('Format', 'integer'),
('Unit', msr_field_data['units'])
]
for item_name, item_value in description_items:
bullet_node = nodes.list_item()
item_body = nodes.paragraph()
item_body += nodes.strong(text=item_name)
item_body += nodes.Text(f': {item_value}')
bullet_node += item_body
msr_property_list += bullet_node
msr_definition_body += msr_property_list
msr_definition = nodes.definition_list_item()
msr_definition += msr_definition_name
msr_definition += msr_definition_body
msr_list += msr_definition
return [msr_list]
def setup(app):
app.add_directive('geopm-msr-json', GeopmMsrJson)
return {
'version': '0.1',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
|
Python
| 0.000001
|
@@ -5316,76 +5316,116 @@
#
-TODO: Promote from 'info' to 'error' after we ratchet down the count
+Change from 'error' to 'info' if you ever need to make%0A # this a non-build-blocking check
%0A
@@ -5448,20 +5448,21 @@
logger.
-info
+error
('Missin
@@ -5516,32 +5516,33 @@
+
geopm_msr_name,%0A
@@ -5541,16 +5541,17 @@
r_name,%0A
+
@@ -5587,24 +5587,8 @@
path
-, color='yellow'
)%0A
|
8e65b0bf3a2c703b452385e5919bdb7282275ff6
|
fix popen command
|
src/tasks/save.py
|
src/tasks/save.py
|
import couchdb
import pyes
from pymongo import Connection
from celery.task import task
from celery.log import get_default_logger
import redis
import requests
from neo4jrestclient.client import GraphDatabase
from pycassa.pool import ConnectionPool
from pycassa.columnfamily import ColumnFamily
from BeautifulSoup import BeautifulSoup
from urlparse import urlparse
import hashlib
from lxml import etree
from StringIO import StringIO
import subprocess
import time
import subprocess
import os
log = get_default_logger()
dc_namespaces = {
"nsdl_dc": "http://ns.nsdl.org/nsdl_dc_v1.02/",
"dc": "http://purl.org/dc/elements/1.1/",
"dct": "http://purl.org/dc/terms/"
}
def save_image(url, couchdb_id, dbUrl):
db = couchdb.Database(dbUrl)
p = subprocess.Popen(["firefox", "-saveimage", url])
p.wait()
time.sleep(15)
h = hashlib.md5()
h.update(url)
filename = h.hexdigest()
with open("/home/wegrata/images/" + filename + ".png", "rb") as f:
db.put_attachment(db[couchdb_id], f, "screenshot.jpeg", "image/jpeg")
@task
def insertDocumentMongo(envelope, config):
try:
conf = config['mongodb']
con = Connection(conf['host'], conf['port'])
db = con[conf['database']]
collection = db[conf['collection']]
del envelope['_rev']
del envelope['_id']
collection.insert(envelope)
except (Exception) as exc:
log.error(exc)
log.error("Error writing to mongo")
@task
def insertDocumentCouchdb(envelope, config):
try:
conf = config['couchdb']
db = couchdb.Database(conf['dbUrl'])
del envelope['_rev']
del envelope['_id']
db.save(envelope)
except (Exception), exc:
log.error(exc)
log.error("Error writing to mongo")
@task
def insertDocumentElasticSearch(envelope, config):
r = config['redis']
r = redis.StrictRedis(host=r['host'], port=r['port'], db=r['db'])
count = r.incr('esid')
conf = config['elasticsearch']
es = pyes.ES("{0}:{1}".format(conf['host'], conf['port']))
index = {
"resource_locator": envelope['resource_locator'],
'resource_data': envelope['resource_data'],
'doc_ID': envelope['doc_ID']
}
es.index(index, conf['index'], conf['index-type'], count)
@task
def insertDocumentSolr(envelope, config):
pass
@task
def insertLRInterface(envelope, config):
if 'keys' in envelope:
for k in envelope['keys']:
saveToNeo.delay(k, config)
title = envelope['resource_locator']
try:
headers = requests.head(title)
if headers.headers['content-type'] == 'text/html':
fullPage = requests.get(title)
soup = BeautifulSoup(fullPage.content)
title = soup.html.head.title.string
except Exception:
pass # expected for invalid URLs
cassandra_data = dict(resource_url=envelope['resource_locator'],
doc_id=envelope['doc_ID'],
submitter=envelope['identity']['submitter'],
keyword=k)
cassandra_data['title'] = title
saveToCassandra.delay(cassandra_data, config)
else:
print(envelope)
@task
def saveToCassandra(data, config):
r = redis.StrictRedis(host=config['redis']['host'],
port=config['redis']['port'],
db=config['redis']['db'])
pool = ConnectionPool('lr', server_list=['localhost', '10.10.1.47'])
cf = ColumnFamily(pool, 'contentobjects')
cassandra_id = r.incr('cassandraid')
cf.insert(cassandra_id, data)
@task
def saveToNeo(keyword, config):
r = redis.StrictRedis(host=config['redis']['host'],
port=config['redis']['port'],
db=config['redis']['db'])
gdb = GraphDatabase("http://localhost:7474/db/data/")
if not r.sismember('topics', keyword):
r.sadd('topics', keyword)
gdb.nodes.create(**{"email": keyword, "topic": True})
@task
def createRedisIndex(data, config):
r = redis.StrictRedis(host=config['redis']['host'],
port=config['redis']['port'],
db=config['redis']['db'])
parts = urlparse(data['resource_locator'])
process_keywords(r, data)
save_display_data(parts, data, config)
save_image(data, config)
def process_keywords(r, data):
m = hashlib.md5()
m.update(data['resource_locator'])
url_hash = m.hexdigest()
def save_to_index(k, value):
keywords = k.split(' ')
keywords.append(k)
for keyword_part in keywords:
if not r.zadd(keyword_part, 1.0, value):
r.zincrby(keyword_part, value, 1.0)
for k in (key.lower() for key in data['keys']):
save_to_index(k, url_hash)
if 'nsdl_dc' in data['payload_schema']:
try:
s = StringIO(data['resource_data'])
tree = etree.parse(s)
result = tree.xpath('/nsdl_dc:nsdl_dc/dc:subject',
namespaces=dc_namespaces)
for subject in result:
save_to_index(subject.text.lower(), url_hash)
except etree.XMLSyntaxError:
print(data['resource_data'])
def save_display_data(parts, data, config):
title = data['resource_locator']
description = ""
m = hashlib.md5()
m.update(data['resource_locator'])
couchdb_id = m.hexdigest()
conf = config['couchdb']
db = couchdb.Database(conf['dbUrl'])
try:
headers = requests.head(data['resource_locator'])
if 'nsdl_dc' in data['payload_schema']:
try:
s = StringIO(data['resource_data'])
tree = etree.parse(s)
result = tree.xpath('/nsdl_dc:nsdl_dc/dc:title',
namespaces=dc_namespaces)
title = result[0].text
result = tree.xpath('/nsdl_dc:nsdl_dc/dc:description',
namespaces=dc_namespaces)
description = result[0].text
except etree.XMLSyntaxError:
print(data['resource_data'])
elif headers.headers['content-type'].startswith('text/html'):
fullPage = requests.get(data['resource_locator'])
soup = BeautifulSoup(fullPage.content)
title = soup.html.head.title.string
else:
title = "{0}/...{1}".format(parts.netloc,
parts.path[parts.path.rfind('/'):])
except Exception as e:
print(e)
try:
db[couchdb_id] = {
"title": title,
"description": description,
"url": data['resource_locator']
}
except couchdb.ResourceConflict:
pass
# save_image(data['resource_locator'], couchdb_id, conf['dbUrl'])
@task
def save_image(envelope, config):
m = hashlib.md5()
couchdb_id = m.hexdigest()
p = subprocess.Popen("xvfb-run python {0} {1}".format(envelope['resource_locator'], couchdb_id), shell=True, cwd=os.getcwd(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print(p.communicate())
p.wait()
filename = p.communicate()
db = couchdb.Database(conf['dbUrl'])
with open(filename, "rb") as f:
db.put_attachment(db[couchdb_id], f, "screenshot.jpeg", "image/jpeg")
|
Python
| 0.00008
|
@@ -7310,16 +7310,30 @@
python
+screenshot.py
%7B0%7D %7B1%7D%22
|
1e7fba11a40c5f477e8a1482638a9bcaaf981348
|
Make hoomd.data.typeparam.TypeParameter picklable
|
hoomd/data/typeparam.py
|
hoomd/data/typeparam.py
|
from hoomd.data.parameterdicts import AttachedTypeParameterDict
from copy import deepcopy
class TypeParameter:
def __init__(self, name, type_kind, param_dict):
self.name = name
self.type_kind = type_kind
self.param_dict = param_dict
def __getattr__(self, attr):
try:
return getattr(self.param_dict, attr)
except AttributeError:
raise AttributeError("'{}' object has no attribute "
"'{}'".format(type(self), attr))
def __getitem__(self, key):
return self.param_dict[key]
def __setitem__(self, key, value):
self.param_dict[key] = value
def __eq__(self, other):
return self.name == other.name and \
self.type_kind == other.type_kind and \
self.param_dict == other.param_dict
@property
def default(self):
return self.param_dict.default
@default.setter
def default(self, value):
self.param_dict.default = value
def _attach(self, cpp_obj, sim):
self.param_dict = AttachedTypeParameterDict(cpp_obj,
self.name,
self.type_kind,
self.param_dict,
sim)
return self
def _detach(self):
self.param_dict = self.param_dict.to_dettached()
return self
def to_dict(self):
return self.param_dict.to_dict()
def keys(self):
yield from self.param_dict.keys()
@property
def state(self):
state = self.to_dict()
if self.param_dict._len_keys > 1:
state = {str(key): value for key, value in state.items()}
state['__default__'] = self.default
return state
def __deepcopy__(self, memo):
return TypeParameter(self.name, self.type_kind,
deepcopy(self.param_dict))
|
Python
| 0
|
@@ -61,56 +61,83 @@
ict%0A
-from copy import deepcopy%0A%0A%0Aclass TypeParameter:
+%0A%0Aclass TypeParameter:%0A __slots__ = ('name', 'type_kind', 'param_dict')%0A
%0A
@@ -313,24 +313,104 @@
elf, attr):%0A
+ if attr in self.__slots__:%0A return super().__getattr__(attr)%0A
try:
@@ -1725,31 +1725,24 @@
-@property%0A def
+def __get
state
+__
(sel
@@ -1765,143 +1765,218 @@
e =
-self.to_dict()%0A if self.param_dict._len_keys %3E 1:%0A state = %7Bstr(key): value for key, value in state.items()%7D%0A
+%7B'name': self.name,%0A 'type_kind': self.type_kind,%0A 'param_dict': self.param_dict%0A %7D%0A if isinstance(self.param_dict, AttachedTypeParameterDict):%0A
stat
@@ -1975,43 +1975,61 @@
+
state%5B'
-__default__'%5D = self.default
+param_dict'%5D = self.param_dict.to_dettached()
%0A
@@ -2057,24 +2057,24 @@
def __
-deepcopy
+setstate
__(self,
@@ -2078,12 +2078,13 @@
lf,
-memo
+state
):%0A
@@ -2094,108 +2094,77 @@
-return TypeParameter(self.name, self.type_kind,%0A deepcopy(self.param_dict)
+for attr, value in state.items():%0A setattr(self, attr, value
)%0A
|
5af140148f1395e27b392f4236a552be2e81fc10
|
remove gen_frame()
|
vizgame.py
|
vizgame.py
|
import tables as tb
import matplotlib.pyplot as plt
from scipy import misc
import matplotlib.cm as cm
from matplotlib.widgets import Slider
class Experience(tb.IsDescription):
action = tb.IntCol(pos=1)
reward = tb.IntCol(pos=2)
def record(action, reward, frame, newfile=False):
if not newfile:
with tb.open_file("history.h5", "a") as f:
exp = f.root.exp.history
exp.append([(action, reward)])
frames = f.root.frames
frame_number = frames._v_nchildren
f.create_array(frames, "frame" + str(frame_number), frame,
title="frame " + str(frame_number))
else:
with tb.open_file("history.h5", "a") as f:
exp = f.create_group("/", "exp", "Experience history")
h = f.create_table(exp, "history", Experience, "(action,rewards)")
h.append([(action, reward)])
frames = f.create_group("/", "frames", "Frame history")
f.create_array(frames, "frame 0", frame, title="frame0")
def get_frames():
with tb.open_file("history.h5", "a") as f:
frames = f.root.frames._f_list_nodes()
return [frames[fidx].read() for fidx in range(len(frames))]
def gen_image(frame, frame_number):
misc.imsave("frame" + str(frame_number) + ".png", frame)
class DiscreteSlider(Slider):
""" A matplotlib slider widget with discrete steps. """
def __init__(self, *args, **kwargs):
"""Identical to Slider.__init__, except for the "increment" kwarg.
"increment" specifies the step size that the slider will be discritized
to."""
self.inc = kwargs.pop('increment', 0.5)
Slider.__init__(self, *args, **kwargs)
def set_val(self, val):
discrete_val = int(val / self.inc) * self.inc
# We can't just call Slider.set_val(self, discrete_val), because this
# will prevent the slider from updating properly (it will get stuck at
# the first step and not "slide"). Instead, we'll keep track of the
# the continuous value as self.val and pass in the discrete value to
# everything else.
xy = self.poly.xy
xy[2] = discrete_val, 1
xy[3] = discrete_val, 0
self.poly.xy = xy
self.valtext.set_text(self.valfmt % discrete_val)
if self.drawon:
self.ax.figure.canvas.draw()
self.val = val
if not self.eventson:
return
for cid, func in self.observers.iteritems():
func(discrete_val)
def view_images(frames):
""" |frames| is a list of numpy arrays. """
fig, ax = plt.subplots()
plt.subplots_adjust(left=0.25, bottom=0.25)
l = plt.imshow(frames[0], cmap=cm.Greys_r, interpolation="none")
axcolor = 'lightgoldenrodyellow'
axframe = plt.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
sframe = DiscreteSlider(axframe, "Frame no.", 0, len(frames) - 1,
increment=1, valinit=0)
def update(val):
l.set_data(frames[int(sframe.val)])
plt.draw()
plt.show()
sframe.on_changed(update)
plt.show()
|
Python
| 0.000001
|
@@ -1225,107 +1225,8 @@
%5D%0A%0A%0A
-def gen_image(frame, frame_number):%0A misc.imsave(%22frame%22 + str(frame_number) + %22.png%22, frame)%0A%0A%0A
clas
|
8425a06fb270e18b7aa7b137cb99b43ce39a4b53
|
Fix bitrotted function call
|
haas.wsgi
|
haas.wsgi
|
#!/usr/bin/env python
import haas.api
from haas import config, model, server
config.load('/etc/haas.cfg')
config.configure_logging()
config.load_extensions()
server.api_server_init()
from haas.rest import wsgi_handler as application
|
Python
| 0
|
@@ -162,19 +162,8 @@
ver.
-api_server_
init
|
a2f389fa321ce2da4e12c10ac17c8b7004977efb
|
fix tests and conditions
|
rupypy/objects/stringobject.py
|
rupypy/objects/stringobject.py
|
from pypy.rlib.objectmodel import newlist_hint, compute_hash
from pypy.rlib.rarithmetic import intmask
from pypy.rlib.rerased import new_static_erasing_pair
from rupypy.module import ClassDef
from rupypy.modules.comparable import Comparable
from rupypy.objects.objectobject import W_Object
from rupypy.objects.exceptionobject import W_ArgumentError
class StringStrategy(object):
def __init__(self, space):
pass
class ConstantStringStrategy(StringStrategy):
erase, unerase = new_static_erasing_pair("constant")
def str_w(self, storage):
return self.unerase(storage)
def liststr_w(self, storage):
strvalue = self.unerase(storage)
return [c for c in strvalue]
def length(self, storage):
return len(self.unerase(storage))
def hash(self, storage):
return compute_hash(self.unerase(storage))
def copy(self, space, storage):
return W_StringObject(space, storage, self)
def to_mutable(self, space, s):
s.strategy = strategy = space.fromcache(MutableStringStrategy)
s.str_storage = strategy.erase(self.liststr_w(s.str_storage))
def extend_into(self, src_storage, dst_storage):
dst_storage += self.unerase(src_storage)
class MutableStringStrategy(StringStrategy):
erase, unerase = new_static_erasing_pair("mutable")
def str_w(self, storage):
return "".join(self.unerase(storage))
def liststr_w(self, storage):
return self.unerase(storage)
def length(self, storage):
return len(self.unerase(storage))
def hash(self, storage):
storage = self.unerase(storage)
length = len(storage)
if length == 0:
return -1
x = ord(storage[0]) << 7
i = 0
while i < length:
x = intmask((1000003 * x) ^ ord(storage[i]))
i += 1
x ^= length
return intmask(x)
def to_mutable(self, space, s):
pass
def extend_into(self, src_storage, dst_storage):
dst_storage += self.unerase(src_storage)
def clear(self, s):
storage = self.unerase(s.str_storage)
del storage[:]
class W_StringObject(W_Object):
classdef = ClassDef("String", W_Object.classdef)
classdef.include_module(Comparable)
def __init__(self, space, storage, strategy):
W_Object.__init__(self, space)
self.str_storage = storage
self.strategy = strategy
@staticmethod
def newstr_fromstr(space, strvalue):
strategy = space.fromcache(ConstantStringStrategy)
storage = strategy.erase(strvalue)
return W_StringObject(space, storage, strategy)
@staticmethod
def newstr_fromchars(space, chars):
strategy = space.fromcache(MutableStringStrategy)
storage = strategy.erase(chars)
return W_StringObject(space, storage, strategy)
def str_w(self, space):
return self.strategy.str_w(self.str_storage)
def liststr_w(self, space):
return self.strategy.liststr_w(self.str_storage)
def length(self):
return self.strategy.length(self.str_storage)
def copy(self, space):
return self.strategy.copy(space, self.str_storage)
def extend(self, space, w_other):
self.strategy.to_mutable(space, self)
strategy = self.strategy
assert isinstance(strategy, MutableStringStrategy)
storage = strategy.unerase(self.str_storage)
w_other.strategy.extend_into(w_other.str_storage, storage)
@classdef.method("to_str")
@classdef.method("to_s")
def method_to_s(self, space):
return self
@classdef.method("+")
def method_plus(self, space, w_other):
assert isinstance(w_other, W_StringObject)
total_size = self.length() + w_other.length()
s = space.newstr_fromchars(newlist_hint(total_size))
s.extend(space, self)
s.extend(space, w_other)
return s
@classdef.method("<<")
def method_lshift(self, space, w_other):
assert isinstance(w_other, W_StringObject)
self.extend(space, w_other)
return self
@classdef.method("size")
@classdef.method("length")
def method_length(self, space):
return space.newint(self.length())
@classdef.method("hash")
def method_hash(self, space):
return space.newint(self.strategy.hash(self.str_storage))
@classdef.method("<=>")
def method_comparator(self, space, w_other):
if isinstance(w_other, W_StringObject):
s1 = space.str_w(self)
s2 = space.str_w(w_other)
if s1 < s2:
return space.newint(-1)
elif s1 == s2:
return space.newint(0)
elif s1 > s2:
return space.newint(1)
else:
if space.respond_to(w_other, space.newsymbol("to_str")) and space.respond_to(w_other, space.newsymbol("<=>")):
tmp = space.send(w_other, space.newsymbol("<=>"), [self])
if tmp is not space.w_nil:
return space.newint(-space.int_w(tmp))
return space.w_nil
@classdef.method("freeze")
def method_freeze(self, space):
pass
@classdef.method("dup")
def method_dup(self, space):
return self.copy(space)
@classdef.method("to_sym")
@classdef.method("intern")
def method_to_sym(self, space):
return space.newsymbol(space.str_w(self))
@classdef.method("clear")
def method_clear(self, space):
self.strategy.to_mutable(space, self)
self.strategy.clear(self)
return self
@classdef.method("ljust", integer="int", padstr="str")
def method_ljust(self, space, integer, padstr=" "):
if integer <= self.length():
return self
if len(padstr) is 0:
space.raise_(space.getclassfor(W_ArgumentError), "zero width padding")
elif len(padstr) is 1:
res = space.str_w(self).ljust(integer, padstr)
else:
required_padding = 1 + ((integer - self.length() - 1) / len(padstr))
res = space.str_w(self) + (padstr * required_padding)[:integer - 1]
return space.newstr_fromstr(res)
@classdef.method("split", limit="int")
def method_split(self, space, w_sep=None, limit=-1):
if w_sep is None:
sep = None
elif isinstance(w_sep, W_StringObject):
sep = space.str_w(w_sep)
else:
raise NotImplementedError("Regexp separators for String#split")
results = space.str_w(self).split(sep, limit - 1)
return space.newarray([space.newstr_fromstr(s) for s in results])
@classdef.method("to_i", radix="int")
def method_to_i(self, space, radix=10):
return space.newint(int(space.str_w(self), radix))
|
Python
| 0
|
@@ -5794,24 +5794,18 @@
if
-len(
+not
padstr
-) is 0
:%0A
@@ -5818,20 +5818,25 @@
-space.raise_
+raise space.error
(spa
@@ -5915,18 +5915,18 @@
padstr)
-is
+==
1:%0A
|
e64c9ef0212c9e28781d7bc9e667df4f3c46880a
|
Add timeing output to fxa import command
|
news/management/commands/process_fxa_data.py
|
news/management/commands/process_fxa_data.py
|
from __future__ import print_function, unicode_literals
from email.utils import formatdate
from django.conf import settings
from django.core.cache import cache
from django.core.management import BaseCommand, CommandError
import babis
import boto3
from apscheduler.schedulers.blocking import BlockingScheduler
from django_statsd.clients import statsd
from pathlib2 import Path
from pytz import utc
from raven.contrib.django.raven_compat.models import client as sentry_client
from news.backends.sfmc import sfmc, NewsletterException
TMP = Path('/tmp')
BUCKET_DIR = 'fxa-last-active-timestamp/data'
DATA_PATH = TMP.joinpath(BUCKET_DIR)
FXA_IDS = {}
FILE_DONE_KEY = 'fxa_activity:completed:%s'
FILES_IN_PROCESS = []
TWO_WEEKS = 60 * 60 * 24 * 14
schedule = BlockingScheduler(timezone=utc)
def _fxa_id_key(fxa_id):
return 'fxa_activity:%s' % fxa_id
def get_fxa_time(fxa_id):
fxatime = FXA_IDS.get(fxa_id)
if fxatime is None:
fxatime = cache.get(_fxa_id_key(fxa_id))
if fxatime:
FXA_IDS[fxa_id] = fxatime
return fxatime or 0
def set_fxa_time(fxa_id, fxa_time):
try:
sfmc.upsert_row(settings.FXA_SFMC_DE, {
'FXA_ID': fxa_id,
'Timestamp': formatdate(timeval=fxa_time, usegmt=True),
})
except NewsletterException:
sentry_client.captureException()
# try again later
return
FXA_IDS[fxa_id] = fxa_time
cache.set(_fxa_id_key(fxa_id), fxa_time, timeout=TWO_WEEKS)
def file_is_done(pathobj):
return bool(cache.get(FILE_DONE_KEY % pathobj.name))
def set_file_done(pathobj):
# cache done state for 2 weeks. files stay in s3 bucket for 1 week
cache.set(FILE_DONE_KEY % pathobj.name, 1, timeout=TWO_WEEKS)
def set_in_process_files_done():
for i in range(len(FILES_IN_PROCESS)):
set_file_done(FILES_IN_PROCESS.pop())
def update_fxa_data(current_timestamps):
"""Store the updated timestamps in a local dict, the cache, and SFMC."""
update_count = 0
total_count = len(current_timestamps)
print('attempting to update %s fxa timestamps' % total_count)
for fxaid, timestamp in current_timestamps.iteritems():
curr_ts = get_fxa_time(fxaid)
if timestamp > curr_ts:
update_count += 1
set_fxa_time(fxaid, timestamp)
# print progress every 1,000,000
if update_count % 1000000 == 0:
print('fxa_data: updated %s of %s records' % (update_count, total_count))
print('updated %s fxa timestamps' % update_count)
set_in_process_files_done()
statsd.gauge('process_fxa_data.updates', update_count)
def download_fxa_files():
s3 = boto3.resource('s3',
aws_access_key_id=settings.FXA_ACCESS_KEY_ID,
aws_secret_access_key=settings.FXA_SECRET_ACCESS_KEY)
bucket = s3.Bucket(settings.FXA_S3_BUCKET)
for obj in bucket.objects.filter(Prefix=BUCKET_DIR):
print('found %s in s3 bucket' % obj.key)
tmp_path = TMP.joinpath(obj.key)
if not tmp_path.name.endswith('.csv'):
continue
if file_is_done(tmp_path):
continue
if not tmp_path.exists():
print('getting ' + obj.key)
print('size is %s' % obj.size)
tmp_path.parent.mkdir(parents=True, exist_ok=True)
try:
bucket.download_file(obj.key, str(tmp_path))
print('downloaded %s' % tmp_path)
except Exception:
# something went wrong, delete file
print('bad things happened. deleting %s' % tmp_path)
tmp_path.unlink()
def get_fxa_data():
all_fxa_times = {}
data_files = DATA_PATH.glob('*.csv')
for tmp_path in sorted(data_files):
if file_is_done(tmp_path):
continue
print('loading data from %s' % tmp_path)
# collect all of the latest timestamps from all files in a dict first
# to ensure that we have the minimum data set to compare against SFMC
with tmp_path.open() as fxafile:
file_count = 0
for line in fxafile:
file_count += 1
fxaid, timestamp = line.strip().split(',')
curr_ts = all_fxa_times.get(fxaid, 0)
timestamp = int(timestamp)
if timestamp > curr_ts:
all_fxa_times[fxaid] = timestamp
if file_count < 1000000:
# if there were fewer than 1M rows we probably got a truncated file
# try again later (typically they contain 20M)
print('possibly truncated file: %s' % tmp_path)
else:
FILES_IN_PROCESS.append(tmp_path)
# done with file either way
tmp_path.unlink()
return all_fxa_times
@schedule.scheduled_job('interval', id='process_fxa_data', days=1, max_instances=1)
@babis.decorator(ping_before=settings.FXA_SNITCH_URL, fail_silenty=True)
def main():
download_fxa_files()
update_fxa_data(get_fxa_data())
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--cron', action='store_true', default=False,
help='Run the cron schedule instead of just once')
def handle(self, *args, **options):
if not all(getattr(settings, name) for name in ['FXA_ACCESS_KEY_ID',
'FXA_SECRET_ACCESS_KEY',
'FXA_S3_BUCKET']):
raise CommandError('FXA S3 Bucket access not configured')
main()
if options['cron']:
print('cron schedule starting')
schedule.start()
|
Python
| 0.000002
|
@@ -84,16 +84,38 @@
rmatdate
+%0Afrom time import time
%0A%0Afrom d
@@ -5016,24 +5016,48 @@
def main():%0A
+ start_time = time()%0A
download
@@ -5105,16 +5105,129 @@
data())%0A
+ total_time = time() - start_time%0A print('fxa_data: finished import in %25s minutes' %25 int(total_time / 60))%0A
%0A%0Aclass
|
4c0788003c2e579d42200c6ce1da3cedd4dff152
|
Implement reading from and writing to mfrc522 registers
|
mfrc522/mfrc522.py
|
mfrc522/mfrc522.py
|
class MFRC522:
MAX_LEN = 16
class Commands(Enum):
PCD_IDLE = 0x00
PCD_AUTHENT = 0x0E
PCD_RECEIVE = 0x08
PCD_TRANSMIT = 0x04
PCD_TRANSCEIVE = 0x0C
PCD_RESETPHASE = 0x0F
PCD_CALCCRC = 0x03
class Registers(Enum):
Reserved00 = 0x00
CommandReg = 0x01
CommIEnReg = 0x02
DivlEnReg = 0x03
CommIrqReg = 0x04
DivIrqReg = 0x05
ErrorReg = 0x06
Status1Reg = 0x07
Status2Reg = 0x08
FIFODataReg = 0x09
FIFOLevelReg = 0x0A
WaterLevelReg = 0x0B
ControlReg = 0x0C
BitFramingReg = 0x0D
CollReg = 0x0E
Reserved01 = 0x0F
Reserved10 = 0x10
ModeReg = 0x11
TxModeReg = 0x12
RxModeReg = 0x13
TxControlReg = 0x14
TxAutoReg = 0x15
TxSelReg = 0x16
RxSelReg = 0x17
RxThresholdReg = 0x18
DemodReg = 0x19
Reserved11 = 0x1A
Reserved12 = 0x1B
MifareReg = 0x1C
Reserved13 = 0x1D
Reserved14 = 0x1E
SerialSpeedReg = 0x1F
Reserved20 = 0x20
CRCResultRegM = 0x21
CRCResultRegL = 0x22
Reserved21 = 0x23
ModWidthReg = 0x24
Reserved22 = 0x25
RFCfgReg = 0x26
GsNReg = 0x27
CWGsPReg = 0x28
ModGsPReg = 0x29
TModeReg = 0x2A
TPrescalerReg = 0x2B
TReloadRegH = 0x2C
TReloadRegL = 0x2D
TCounterValueRegH = 0x2E
TCounterValueRegL = 0x2F
Reserved30 = 0x30
TestSel1Reg = 0x31
TestSel2Reg = 0x32
TestPinEnReg = 0x33
TestPinValueReg = 0x34
TestBusReg = 0x35
AutoTestReg = 0x36
VersionReg = 0x37
AnalogTestReg = 0x38
TestDAC1Reg = 0x39
TestDAC2Reg = 0x3A
TestADCReg = 0x3B
Reserved31 = 0x3C
Reserved32 = 0x3D
Reserved33 = 0x3E
Reserved34 = 0x3F
|
Python
| 0
|
@@ -1,12 +1,35 @@
+from enum import Enum%0A%0A
class MFRC52
@@ -2589,8 +2589,1340 @@
= 0x3F%0A
+%0A def __init__(self, spi_dev):%0A %22%22%22Initializes a MFRC522 module.%0A%0A spi_dev should be an object representing a SPI interface to which%0A the Reader is connected. It should have the following methods:%0A * transfer(bytes): Selects the slave, transfers bytes, unselects%0A the slave and returns received bytes.%0A * hard_powerdown(): Pulls NRST signal of the reader LOW, thus powering%0A it down%0A * reset(): Pushes NRST signal of the reader HIGH,%0A thus resetting it (and exiting the hard_powerdown)%0A If the NRST is already high, this function shall%0A pull it LOW and then HIGH again.%0A %22%22%22%0A%0A self.spi = spi_dev%0A%0A # TODO initialize the module%0A # self.init()%0A%0A def write_register(self, register, val):%0A self.spi.transfer(bytes((register.value %3C%3C 1, val)))%0A%0A def read_register(self, register):%0A return self.spi.transfer(bytes(((register.value %3C%3C 1) %7C 0x80, 0)))%5B1%5D%0A%0A def set_mask_in_register(self, reg, mask):%0A self.write_register(reg, self.read_register(reg) %7C mask)%0A%0A def clear_mask_in_register(self, reg, mask):%0A self.write_register(reg, self.read_register(reg) & (~mask))%0A
|
b219823af7188f968d7c52c5273148c510bd7454
|
Simplify the ckernel pass a bit more
|
blaze/compute/air/frontend/ckernel_impls.py
|
blaze/compute/air/frontend/ckernel_impls.py
|
"""
Lift ckernels to their appropriate rank so they always consume the full array
arguments.
"""
from __future__ import absolute_import, division, print_function
import datashape
from pykit.ir import transform, Op
#------------------------------------------------------------------------
# Run
#------------------------------------------------------------------------
def run(func, env):
strategies = env['strategies']
transform(CKernelImplementations(strategies), func)
#------------------------------------------------------------------------
# Extract CKernel Implementations
#------------------------------------------------------------------------
class CKernelImplementations(object):
"""
For kernels that are implemented via ckernels, this
grabs the ckernel_deferred and turns it into a ckernel
op.
"""
def __init__(self, strategies):
self.strategies = strategies
def op_kernel(self, op):
if self.strategies[op] != 'ckernel':
return
function = op.metadata['kernel']
overload = op.metadata['overload']
# Default overload is CKERNEL, so no need to look it up again
func = overload.func
polysig = overload.sig
monosig = overload.resolved_sig
argtypes = datashape.coretypes.Tuple(monosig.argtypes)
impl = overload.func
assert monosig == overload.resolved_sig, (monosig,
overload.resolved_sig)
new_op = Op('ckernel', op.type, [impl, op.args[1:]], op.result)
new_op.add_metadata({'rank': 0,
'parallel': True})
return new_op
|
Python
| 0.000001
|
@@ -1,95 +1,40 @@
%22%22%22%0A
-Lift c
+Convert '
kernel
-s to their appropriate rank so they always consume the full array%0Aarguments
+' Op to 'ckernel'
.%0A%22%22
@@ -106,25 +106,8 @@
on%0A%0A
-import datashape%0A
from
@@ -142,456 +142,120 @@
Op%0A%0A
-#------------------------------------------------------------------------%0A# Run%0A#------------------------------------------------------------------------%0A%0Adef run(func, env):%0A strategies = env%5B'strategies'%5D%0A transform(CKernelImplementations(strategies), func)%0A%0A#------------------------------------------------------------------------%0A# Extract CKernel Implementations%0A#------------------------------------------------------------------------
+%0Adef run(func, env):%0A strategies = env%5B'strategies'%5D%0A transform(CKernelImplementations(strategies), func)%0A
%0A%0Acl
@@ -602,93 +602,8 @@
rn%0A%0A
- function = op.metadata%5B'kernel'%5D%0A overload = op.metadata%5B'overload'%5D%0A%0A
@@ -679,15 +679,8 @@
- func =
ove
@@ -688,309 +688,64 @@
load
-.func%0A polysig = overload.sig%0A monosig = overload.resolved_sig%0A argtypes = datashape.coretypes.Tuple(monosig.argtypes)%0A%0A impl = overload.func%0A assert monosig == overload.resolved_sig, (monosig,%0A overload.resolved_sig)
+ = op.metadata%5B'overload'%5D%0A%0A impl = overload.func
%0A%0A
|
60dd476337ead3262daaa17ee4a973937cac380d
|
Add help for sites argument to manage.py scan
|
securethenews/sites/management/commands/scan.py
|
securethenews/sites/management/commands/scan.py
|
import json
import subprocess
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from sites.models import Site, Scan
def pshtt(domain):
pshtt_cmd = ['pshtt', '--json', domain]
p = subprocess.Popen(
pshtt_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
stdout, stderr = p.communicate()
# pshtt returns a list with a single item, which is a dictionary of
# the scan results.
pshtt_results = json.loads(stdout)[0]
return pshtt_results, stdout, stderr
def scan(site):
# Scan the domain with pshtt
results, stdout, stderr = pshtt(site.domain)
scan = Scan(
site=site,
live=results['Live'],
valid_https=results['Valid HTTPS'],
downgrades_https=results['Downgrades HTTPS'],
defaults_to_https=results['Defaults to HTTPS'],
hsts=results['HSTS'],
hsts_max_age=results['HSTS Max Age'],
hsts_entire_domain=results['HSTS Entire Domain'],
hsts_preload_ready=results['HSTS Preload Ready'],
hsts_preloaded=results['HSTS Preloaded'],
pshtt_stdout=stdout,
pshtt_stderr=stderr,
).save()
class Command(BaseCommand):
help = 'Rescan all sites and store the results in the database'
def add_arguments(self, parser):
parser.add_argument('sites', nargs='*', type=str, default='')
def handle(self, *args, **options):
# Support targeting a specific site to scan.
if options['sites']:
sites = []
for domain_name in options['sites']:
try:
site = Site.objects.get(domain=domain_name)
sites.append(site)
except Site.DoesNotExist:
msg = "Site with domain '{}' does not exist".format(domain_name)
raise CommandError(msg)
else:
sites = Site.objects.all()
with transaction.atomic():
for site in sites:
self.stdout.write('Scanning: {}'.format(site.domain))
scan(site)
|
Python
| 0
|
@@ -1430,16 +1430,142 @@
fault=''
+,%0A help=(%22Specify one or more domain names of sites to scan. %22%0A %22If unspecified, scan all sites.%22)
)%0A%0A%0A
|
f60399e44afb49d4bab2016deff3992875fb5c8e
|
Correct some format of 8-1.
|
8-1policy_network.py
|
8-1policy_network.py
|
import numpy as np
import tensorflow as tf
import gym
env = gym.make('CartPole-v0')
env.reset()
random_episodes = 0
reward_sum = 0
while random_episodes < 10:
env.render()
observation, reward, done, _ = env.step(np.random.randint(0, 2))
reward_sum += reward
if done:
random_episodes += 1
print("Reward for this episode was: ", reward_sum)
reward_sum = 0
env.reset()
H = 50
batch_size = 25
learning_rate = 1e-1
D = 4
gamma = 0.99
observations = tf.placeholder(tf.float32, [None, D], name = "input_x")
W1 = tf.get_variable("W1", shape = [D, H], initializer = tf.contrib.layers.xavier_initializer())
layer1 = tf.nn.relu(tf.matmul(observations, W1))
W2 = tf.get_variable("W2", shape = [H, 1], initializer = tf.contrib.layers.xavier_initializer())
score = tf.matmul(layer1, W2)
probability = tf.nn.sigmoid(score)
tvars = tf.trainable_variables()
adam = tf.train.AdamOptimizer(learning_rate = learning_rate)
W1Grad = tf.placeholder(tf.float32, name = "batch_grad1")
W2Grad = tf.placeholder(tf.float32, name = "batch_grad2")
batchGrad = [W1Grad, W2Grad]
updateGrads = adam.apply_gradients(zip(batchGrad, tvars))
def discount_rewards(r):
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(range(r.size)):
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r
input_y = tf.placeholder(tf.float32, [None, 1], name = "input_y")
advantages = tf.placeholder(tf.float32, name = "reward_signal")
loglik = tf.log(input_y * (input_y - probability) + (1 - input_y) * (input_y + probability))
loss = -tf.reduce_mean(loglik * advantages)
#tvars = tf.trainable_variables()
newGrads = tf.gradients(loss, tvars)
xs, ys, drs = [], [], []
reward_sum = 0
episode_number = 1
total_episodes = 10000
with tf.Session() as sess:
rendering = False
init = tf.global_variables_initializer()
sess.run(init)
observation = env.reset()
gradBuffer = sess.run(tvars)
for ix, grad in enumerate(gradBuffer):
gradBuffer[ix] = grad * 0
while episode_number <= total_episodes:
if reward_sum / batch_size > 100 or rendering == True:
env.render()
rendering = True
x = np.reshape(observation, [1, D])
tfprob = sess.run(probability, feed_dict = {observations: x})
action = 1 if np.random.uniform() < tfprob else 0
xs.append(x)
y = 1 - action
ys.append(y)
observation, reward, done, info = env.step(action)
reward_sum += reward
drs.append(reward)
if done:
episode_number += 1
epx = np.vstack(xs)
epy = np.vstack(ys)
epr = np.vstack(drs)
xs, ys, drs = [], [], []
discounted_epr = discount_rewards(epr)
discounted_epr -= np.mean(discounted_epr)
discounted_epr /= np.std(discounted_epr)
tGrad = sess.run(newGrads, feed_dict = {observations: epx, input_y: epy, advantages: discounted_epr})
for ix, grad in enumerate(tGrad):
gradBuffer[ix] += grad
if episode_number % batch_size == 0:
sess.run(updateGrads, feed_dict = {W1Grad: gradBuffer[0], W2Grad: gradBuffer[1]})
for ix, grad in enumerate(gradBuffer):
gradBuffer[ix] = grad * 0
print('Average reward for episode %d: %f.' % (episode_number, reward_sum / batch_size))
if reward_sum / batch_size > 200:
print("Task solved in ", episode_number, ' episodes!')
break
reward_sum = 0
observation = env.reset()
|
Python
| 0.003769
|
@@ -76,16 +76,105 @@
ole-v0')
+%0A'''from gym import wrappers%0Aenv = wrappers.Monitor(env, '/tmp/cartpole-experiment-1')'''
%0A%0Aenv.re
@@ -3707,17 +3707,16 @@
olved in
-
%22, episo
@@ -3727,17 +3727,16 @@
umber, '
-
episodes
|
bac39d2e4f1d17d74dc2ac7c6d16c702bbaeaed4
|
Include the server endpoint in log entries.
|
jsonrpcclient/server.py
|
jsonrpcclient/server.py
|
"""server.py"""
import json
import pkgutil
import logging
from requests import Request, Session
from requests.exceptions import InvalidSchema, RequestException
import jsonschema
from jsonrpcclient import rpc
from jsonrpcclient import exceptions
logger = logging.getLogger(__name__)
request_log = logging.getLogger(__name__+'.request')
response_log = logging.getLogger(__name__+'.response')
DEFAULT_HTTP_HEADERS = {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
class Server(object):
"""This class acts as the remote server"""
def __init__(self, endpoint, **kwargs):
"""Instantiate a remote server object.
>>> server = Server('http://example.com/api', \
headers={'Content-Type': 'application/json-rpc'}, \
auth=('user', 'pass'))
"""
kwargs.setdefault('headers', DEFAULT_HTTP_HEADERS)
self.endpoint = endpoint
self.headers = kwargs['headers']
self.requests_kwargs = kwargs
kwargs.pop('headers')
def __getattr__(self, name):
"""Catch undefined methods and handle them as RPC requests.
The technique is here: http://code.activestate.com/recipes/307618/
"""
def attr_handler(*args, **kwargs):
"""Call self.request from here"""
if kwargs.get('response', False):
return self.request(name, *args, **kwargs)
else:
return self.notify(name, *args, **kwargs)
return attr_handler
def request(self, method_name, *args, **kwargs):
"""JSON-RPC Request (expect a response)"""
kwargs['response'] = True
return self.handle_response(
self.send_message(rpc.request(method_name, *args, **kwargs)), True)
def notify(self, method_name, *args, **kwargs):
"""JSON-RPC Notification (no response required)"""
return self.handle_response(
self.send_message(rpc.request(method_name, *args, **kwargs)), False)
def send_message(self, request):
"""Send the RPC request (a json dict) to the server.
Calls a procedure on another server.
Raises JsonRpcClientError: On any error caught.
"""
logger.debug('Sending via http post...')
s = Session()
# Prepare the request
request = Request(method='POST', url=self.endpoint, \
headers=self.headers, json=request, **self.requests_kwargs)
request = s.prepare_request(request)
request.headers = dict(list(dict(request.headers).items()) + list(
self.headers.items()))
# Log the request before sending
request_log.info(
request.body,
extra={
'http_headers': request.headers
})
try:
response = s.send(request)
# Catch the requests module's InvalidSchema exception if the json is
# invalid.
except InvalidSchema:
raise exceptions.InvalidRequest()
# Catch all other requests exceptions, such as network issues.
# See http://stackoverflow.com/questions/16511337/
except RequestException: # Base requests exception
raise exceptions.ConnectionError()
finally:
s.close()
# Log the response, cleaning it up a bit
response_log.info(
response.text \
.replace("\n", '').replace(' ', ' ').replace('{ ', '{'),
extra={
'http_code': response.status_code,
'http_reason': response.reason,
'http_headers': response.headers
})
return response.text
@staticmethod
def handle_response(response, expected_response=False):
"""Processes the response (a json string)"""
# A response was expected, but none was given?
if expected_response and not len(response):
raise exceptions.ReceivedNoResponse()
# Was a response given?
if len(response):
# Attempt to parse the response
try:
response_dict = json.loads(response)
except ValueError:
raise exceptions.ParseResponseError()
# Unwanted response - A response was not asked for, but one was
# given anyway. It may not be necessary to raise here.
if not expected_response and 'result' in response_dict:
raise exceptions.UnwantedResponse()
# Validate the response against the Response schema
try:
jsonschema.validate(response_dict, json.loads(pkgutil.get_data(
__name__, 'response-schema.json').decode('utf-8')))
except jsonschema.ValidationError:
raise exceptions.InvalidResponse()
# If the response was "error", raise it, to ensure it's handled
if 'error' in response_dict:
raise exceptions.ReceivedErrorResponse(
response_dict['error']['code'],
response_dict['error']['message'])
# Otherwise, surely we have a result to return
return response_dict['result']
return None
|
Python
| 0
|
@@ -2707,32 +2707,75 @@
extra=%7B%0A
+ 'endpoint': self.endpoint,%0A
@@ -3510,32 +3510,75 @@
extra=%7B%0A
+ 'endpoint': self.endpoint,%0A
|
36d219164b5e7332d72a980368badec9f86e5c5e
|
Update test_corpus.py
|
sacremoses/test/test_corpus.py
|
sacremoses/test/test_corpus.py
|
# -*- coding: utf-8 -*-
"""
Tests for corpus.py
"""
import sys
import doctest
import unittest
from sacremoses.corpus import Perluniprops, NonbreakingPrefixes
class CorpusTest(unittest.TestCase):
def test_perluniprops_chars_sanity_check(self):
perluniprops = Perluniprops()
for category in perluniprops.available_categories:
if sys.version_info[0] >= 3: # Python 3
with self.subTest(category=category):
count = 0
for char in perluniprops.chars(category=category):
self.assertIsInstance(char, str)
count += 1
self.assertGreater(count, 0)
else:
self.assertEqual(all(instance(char, str) for char in
perluniprops.chars(category=category)),
True)
def test_perluniprops_chars_manual(self):
perluniprops = Perluniprops()
self.assertListEqual(list(perluniprops.chars('Open_Punctuation'))[:5],
[u'(', u'[', u'{', u'\u0f3a', u'\u0f3c'])
self.assertListEqual(list(perluniprops.chars('Currency_Symbol'))[:5],
[u'$', u'\xa2', u'\xa3', u'\xa4', u'\xa5'])
def test_nonbreaking_prefixes_sanity_check(self):
nonbreaking_prefixes = NonbreakingPrefixes()
for language in nonbreaking_prefixes.available_langs.values():
if sys.version_info[0] >= 3: # Python 3
with self.subTest(language=language):
count = 0
for word in nonbreaking_prefixes.words(lang=language):
self.assertIsInstance(word, str)
count += 1
self.assertGreater(count, 0)
else:
self.assertEqual(all(instance(word, str) for word in
nonbreaking_prefixes.words(lang=language))m
True)
def test_nonbreaking_prefixes_manual(self):
nonbreaking_prefixes = NonbreakingPrefixes()
self.assertListEqual(list(nonbreaking_prefixes.words('en'))[:10],
[u'A', u'B', u'C', u'D', u'E', u'F', u'G', u'H', u'I', u'J'])
self.assertListEqual(list(nonbreaking_prefixes.words('ta'))[:5],
[u'\u0b85', u'\u0b86', u'\u0b87', u'\u0b88', u'\u0b89'])
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(corpus))
return tests
|
Python
| 0.000001
|
@@ -1993,17 +1993,17 @@
nguage))
-m
+,
%0A
|
53c6e1f1d0939b4c585301427920e0cf1dd9e341
|
Remove tabs.
|
panel/main.py
|
panel/main.py
|
import ibus
import gtk
import dbus
import dbus.mainloop.glib
import panel
class PanelApplication:
def __init__ (self):
self._dbusconn = dbus.connection.Connection (ibus.IBUS_ADDR)
self._dbusconn.add_signal_receiver (self._disconnected_cb,
"Disconnected",
dbus_interface = dbus.LOCAL_IFACE)
self._panel = panel.PanelProxy (self._dbusconn, "/org/freedesktop/IBus/Panel")
self._ibus = self._dbusconn.get_object (ibus.IBUS_NAME, ibus.IBUS_PATH)
self._ibus.RegisterPanel (self._panel, True)
def run (self):
gtk.main ()
def _disconnected_cb (self):
print "disconnected"
gtk.main_quit ()
def main ():
# gtk.settings_get_default ().props.gtk_theme_name = "/home/phuang/.themes/aud-Default/gtk-2.0/gtkrc"
gtk.rc_parse ("./themes/default/gtkrc")
PanelApplication ().run ()
if __name__ == "__main__":
dbus.mainloop.glib.DBusGMainLoop (set_as_default=True)
main ()
|
Python
| 0
|
@@ -385,21 +385,16 @@
Panel%22)%0A
-%09%09%09%09%09
%0A%09%09self.
|
6bdc16e24e51d16b0fa214d30394317079bc90a9
|
Throw more user-friendly execption inside get_backend_instance method.
|
st2auth/st2auth/backends/__init__.py
|
st2auth/st2auth/backends/__init__.py
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from oslo_config import cfg
from stevedore.driver import DriverManager
from stevedore.extension import ExtensionManager
from st2common import log as logging
__all__ = [
'get_available_backends',
'get_backend_instance'
]
LOG = logging.getLogger(__name__)
BACKENDS_NAMESPACE = 'st2auth.backends.backend'
def get_available_backends():
"""
Return names of the available / installed authentication backends.
:rtype: ``list`` of ``str``
"""
manager = ExtensionManager(namespace=BACKENDS_NAMESPACE, invoke_on_load=False)
return manager.names()
def get_backend_instance(name):
"""
:param name: Backend name.
:type name: ``str``
"""
try:
manager = DriverManager(namespace=BACKENDS_NAMESPACE, name=name,
invoke_on_load=False)
except RuntimeError:
message = 'Invalid authentication backend specified: %s' % (name)
LOG.exception(message)
raise ValueError(message)
backend_kwargs = cfg.CONF.auth.backend_kwargs
if backend_kwargs:
try:
kwargs = json.loads(backend_kwargs)
except ValueError as e:
raise ValueError('Failed to JSON parse backend settings: %s' % (str(e)))
else:
kwargs = {}
cls = manager.driver
cls_instance = cls(**kwargs)
return cls_instance
|
Python
| 0
|
@@ -773,16 +773,33 @@
cense.%0A%0A
+import traceback%0A
import j
@@ -1417,24 +1417,86 @@
e):%0A %22%22%22%0A
+ Retrieve a class instance for the provided auth backend.%0A%0A
:param n
@@ -1542,24 +1542,97 @@
r%60%60%0A %22%22%22%0A
+%0A LOG.debug('Retrieving backend instance for backend %22%25s%22' %25 (name))%0A%0A
try:%0A
@@ -2154,25 +2154,77 @@
settings
-: %25s' %25 (
+ for backend %22%25s%22: %25s' %25%0A (name,
str(e)))
@@ -2280,16 +2280,30 @@
.driver%0A
+%0A try:%0A
cls_
@@ -2323,24 +2323,310 @@
s(**kwargs)%0A
+ except Exception as e:%0A tb_msg = traceback.format_exc()%0A msg = ('Failed to instantiate auth backend %22%25s%22 with backend settings %22%25s%22: %25s' %25%0A (name, str(kwargs), str(e)))%0A msg += '%5Cn%5Cn' + tb_msg%0A exc_cls = type(e)%0A raise exc_cls(msg)%0A%0A
return c
|
212a9a901625605000210ec8c436c8f6e9be7c39
|
correct the log filename and handler
|
xutils/log.py
|
xutils/log.py
|
# -*- coding: utf-8 -*-
import os
import os.path
import logging
from logging.handlers import RotatingFileHandler
def init(logger=None, level="INFO", file=None, handler_cls=None, process=False,
max_count=30, propagate=True, file_config=None, dict_config=None):
root = logging.getLogger()
if not logger:
logger = root
# Initialize the argument logger with the arguments, level and log_file.
if logger:
fmt = ("%(asctime)s - %(process)d - %(pathname)s - %(funcName)s - "
"%(lineno)d - %(levelname)s - %(message)s")
datefmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(fmt=fmt, datefmt=datefmt)
level = getattr(logging, level.upper())
if file:
if process:
filename, ext = os.path.splitext(file)
if ext:
file = "{0}.{1}{2}".format(filename, os.getpid(), ext)
else:
file = "{0}.{1}".format(filename, os.getpid())
if handler_cls:
handler = handler_cls(file, max_count)
else:
handler = RotatingFileHandler(file, maxBytes=1024**3, backupCount=max_count)
else:
handler = logging.StreamHandler()
handler.setLevel(level)
handler.setFormatter(formatter)
root.setLevel(level)
root.addFilter(handler)
loggers = logger if isinstance(logger, (list, tuple)) else [logger]
for logger in loggers:
if logger is root:
continue
logger.propagate = propagate
logger.setLevel(level)
logger.addHandler(handler)
# Initialize logging by the configuration file, file_config.
if file_config:
logging.config.fileConfig(file_config, disable_existing_loggers=False)
# Initialize logging by the dict configuration, dict_config.
if dict_config and hasattr(logging.config, "dictConfig"):
logging.config.dictConfig(dict_config)
|
Python
| 0.000004
|
@@ -818,36 +818,8 @@
le)%0A
- if ext:%0A
@@ -889,97 +889,8 @@
xt)%0A
- else:%0A file = %22%7B0%7D.%7B1%7D%22.format(filename, os.getpid())%0A
@@ -1261,12 +1261,13 @@
.add
-Filt
+Handl
er(h
|
7733fbcb83491c92dda082a76d9673b2382d1060
|
Enable dataset_splits to generate test-data using t2t-datagen
|
tensor2tensor/data_generators/bair_robot_pushing.py
|
tensor2tensor/data_generators/bair_robot_pushing.py
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Berkeley (BAIR) robot pushing dataset.
Self-Supervised Visual Planning with Temporal Skip Connections
Frederik Ebert, Chelsea Finn, Alex X. Lee, and Sergey Levine.
https://arxiv.org/abs/1710.05268
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tarfile
import numpy as np
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import video_utils
from tensor2tensor.layers import modalities
from tensor2tensor.utils import registry
import tensorflow as tf
DATA_URL = (
"http://rail.eecs.berkeley.edu/datasets/bair_robot_pushing_dataset_v0.tar")
# Lazy load PIL.Image
def PIL_Image(): # pylint: disable=invalid-name
from PIL import Image # pylint: disable=g-import-not-at-top
return Image
@registry.register_problem
class VideoBairRobotPushing(video_utils.VideoProblem):
"""Berkeley (BAIR) robot pushing dataset."""
@property
def num_channels(self):
return 3
@property
def frame_height(self):
return 64
@property
def frame_width(self):
return 64
@property
def is_generate_per_split(self):
return True
# num_train_files * num_videos * num_frames
@property
def total_number_of_frames(self):
return 167 * 256 * 30
@property
def random_skip(self):
return False
def eval_metrics(self):
return []
@property
def only_keep_videos_from_0th_frame(self):
return True
@property
def use_not_breaking_batching(self):
return True
@property
def extra_reading_spec(self):
"""Additional data fields to store on disk and their decoders."""
data_fields = {
"frame_number": tf.FixedLenFeature([1], tf.int64),
}
decoders = {
"frame_number": tf.contrib.slim.tfexample_decoder.Tensor(
tensor_key="frame_number"),
}
return data_fields, decoders
def hparams(self, defaults, unused_model_hparams):
p = defaults
p.modality = {"inputs": modalities.VideoModality,
"targets": modalities.VideoModality}
p.vocab_size = {"inputs": 256,
"targets": 256}
def parse_frames(self, filenames):
image_key = "{}/image_aux1/encoded"
action_key = "{}/action"
state_key = "{}/endeffector_pos"
for f in filenames:
print("Parsing ", f)
for serialized_example in tf.python_io.tf_record_iterator(f):
x = tf.train.Example()
x.ParseFromString(serialized_example)
# there are 4 features per frame
# main image, aux image, actions and states
nf = len(x.features.feature.keys()) // 4
for i in range(nf):
image_name = image_key.format(i)
action_name = action_key.format(i)
state_name = state_key.format(i)
byte_str = x.features.feature[image_name].bytes_list.value[0]
img = PIL_Image().frombytes(
"RGB", (self.frame_width, self.frame_height), byte_str)
arr = np.array(img.getdata())
frame = arr.reshape(
self.frame_width, self.frame_height, self.num_channels)
state = x.features.feature[state_name].float_list.value
action = x.features.feature[action_name].float_list.value
yield i, frame, state, action
def generate_samples(self, data_dir, tmp_dir, dataset_split):
path = generator_utils.maybe_download(
tmp_dir, os.path.basename(DATA_URL), DATA_URL)
tar = tarfile.open(path)
tar.extractall(tmp_dir)
tar.close()
if dataset_split == problem.DatasetSplit.TEST:
base_dir = os.path.join(tmp_dir, "softmotion30_44k/test/*")
filenames = tf.gfile.Glob(base_dir)
else:
base_dir = os.path.join(tmp_dir, "softmotion30_44k/train/*")
filenames = tf.gfile.Glob(base_dir)
# the test-set contains just 256 videos so this should be sufficient.
if dataset_split == problem.DatasetSplit.TRAIN:
filenames = filenames[:-2]
else:
filenames = filenames[-2:]
for frame_number, frame, state, action in self.parse_frames(filenames):
yield {
"frame_number": [frame_number],
"frame": frame,
"state": state,
"action": action,
}
@registry.register_problem
class VideoBairRobotPushingWithActions(VideoBairRobotPushing):
"""Berkeley (BAIR) robot pushing dataset with actions."""
@property
def extra_reading_spec(self):
"""Additional data fields to store on disk and their decoders."""
data_fields = {
"frame_number": tf.FixedLenFeature([1], tf.int64),
"action": tf.FixedLenFeature([4], tf.float32),
}
decoders = {
"frame_number": tf.contrib.slim.tfexample_decoder.Tensor(
tensor_key="frame_number"),
"action": tf.contrib.slim.tfexample_decoder.Tensor(tensor_key="action"),
}
return data_fields, decoders
|
Python
| 0.000348
|
@@ -2182,32 +2182,339 @@
return True%0A%0A
+ @property%0A def dataset_splits(self):%0A %22%22%22Splits of data to produce and number of output shards for each.%22%22%22%0A return %5B%0A %7B%22split%22: problem.DatasetSplit.TRAIN, %22shards%22: 10%7D,%0A %7B%22split%22: problem.DatasetSplit.EVAL, %22shards%22: 1%7D,%0A %7B%22split%22: problem.DatasetSplit.TEST, %22shards%22: 1%7D%5D%0A%0A
@property%0A de
|
b83b774f6651232afd828bf1ec2c489084832b52
|
Fix test
|
bluebottle/funding_pledge/tests/test_api.py
|
bluebottle/funding_pledge/tests/test_api.py
|
import json
from django.core import mail
from django.urls import reverse
from rest_framework import status
from bluebottle.funding.tests.factories import FundingFactory, DonationFactory
from bluebottle.initiatives.tests.factories import InitiativeFactory
from bluebottle.test.factory_models.accounts import BlueBottleUserFactory
from bluebottle.test.utils import BluebottleTestCase, JSONAPITestClient
class PaymentTestCase(BluebottleTestCase):
def setUp(self):
super(PaymentTestCase, self).setUp()
self.client = JSONAPITestClient()
self.user = BlueBottleUserFactory()
self.initiative = InitiativeFactory.create()
self.initiative.transitions.submit()
self.initiative.transitions.approve()
self.funding = FundingFactory.create(initiative=self.initiative)
self.donation = DonationFactory.create(activity=self.funding, user=self.user)
self.donation_url = reverse('funding-donation-list')
self.payment_url = reverse('pledge-payment-list')
self.data = {
'data': {
'type': 'payments/pledge-payments',
'relationships': {
'donation': {
'data': {
'type': 'contributions/donations',
'id': self.donation.pk,
}
}
}
}
}
mail.outbox = []
def test_create_payment(self):
response = self.client.post(self.payment_url, data=json.dumps(self.data), user=self.user)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data = json.loads(response.content)
self.assertEqual(data['data']['attributes']['status'], 'succeeded')
self.assertEqual(data['included'][0]['attributes']['status'], 'succeeded')
# Check that donation mails are send
self.assertEqual(len(mail.outbox), 2)
def test_create_payment_other_user(self):
response = self.client.post(
self.payment_url,
data=json.dumps(self.data),
user=BlueBottleUserFactory.create()
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_create_payment_no_user(self):
response = self.client.post(
self.payment_url,
data=json.dumps(self.data)
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
|
Python
| 0.000004
|
@@ -593,16 +593,31 @@
Factory(
+can_pledge=True
)%0A
|
0e60c23ce6e40304437218151e895dcaf856f832
|
Update project version
|
billing/__init__.py
|
billing/__init__.py
|
__version__ = '1.6'
__copyright__ = 'Copyright (c) 2020, Skioo SA'
__licence__ = 'MIT'
__URL__ = 'https://github.com/skioo/django-customer-billing'
|
Python
| 0
|
@@ -14,9 +14,9 @@
'1.
-6
+7
'%0A__
|
6f6aeb99677fd510a13ca1abd4962b64763ca5ec
|
Update repo_metadata.py
|
gitmostwanted/tasks/repo_metadata.py
|
gitmostwanted/tasks/repo_metadata.py
|
from gitmostwanted.app import app, db, celery
from gitmostwanted.models.repo import Repo, RepoMean
from gitmostwanted.lib.github import api
from sqlalchemy.sql import func, expression
from datetime import datetime, timedelta
@celery.task()
def metadata_maturity(num_months):
repos = Repo.query\
.filter(Repo.created_at <= datetime.now() + timedelta(days=num_months * 30 * -1))\
.filter(Repo.mature.is_(False))
for repo in repos:
repo.mature = True
db.session.commit()
return repos.count()
@celery.task()
def metadata_refresh(num_days):
repos = Repo.query\
.filter(
Repo.checked_at.is_(None) |
(Repo.checked_at <= datetime.now() + timedelta(days=num_days * -1))
)\
.yield_per(25)\
.limit(300) # GitHub allows only 3000 calls per day within a token
for repo in repos:
repo.checked_at = datetime.now()
details, code = api.repo_info(repo.full_name)
if not details:
if 400 <= code < 500:
repo.worth -= 1
app.logger.info(
'{0} is not found, the "worth" has been decreased by 1'.format(repo.full_name)
)
continue
for key in ['description', 'language', 'homepage', 'stargazers_count']:
if getattr(repo, key) != details[key]:
setattr(repo, key, details[key])
db.session.commit()
return repos.count()
@celery.task()
def metadata_trend(num_days):
results = db.session.query(
RepoMean.repo_id, func.substring_index(
func.group_concat(
RepoMean.value.op('ORDER BY')(expression.desc(RepoMean.created_at))
), ',', 2)
)\
.filter(RepoMean.created_at >= datetime.now() + timedelta(days=num_days * -1))\
.group_by(RepoMean.repo_id)\
.all()
for result in filter(lambda x: ',' in x[1], results):
curr, prev = result[1].split(',')
if curr < prev:
app.logger.info(
'Mean value of {0} is {1}, previous was {2}. The "worth" has been decreased by 1'
.format(result[0], curr, prev)
)
db.session.query(Repo)\
.filter(Repo.id == result[0])\
.update({Repo.worth: Repo.worth - 1})
db.session.commit()
@celery.task()
def metadata_erase():
cnt = Repo.query.filter(Repo.worth < 5).delete()
db.session.commit()
return cnt
|
Python
| 0.000002
|
@@ -2441,16 +2441,17 @@
worth %3C
+-
5).delet
|
22fef4c07a28a96267e1d3f0390bc366790252a0
|
Use alias for nodejs_tool.
|
nodejs/def.bzl
|
nodejs/def.bzl
|
_js_filetype = FileType([".js"])
SCRIPT_TEMPLATE = """\
#!/bin/bash
"{node_bin}" "{script_path}"
"""
def nodejs_binary_impl(ctx):
ctx.file_action(
ctx.outputs.executable,
SCRIPT_TEMPLATE.format(node_bin=ctx.file._nodejs_tool.short_path,
script_path=ctx.file.main_script.short_path),
executable=True)
all_runfiles = [ctx.file._nodejs_tool]
all_runfiles.append(ctx.file.main_script)
return struct(
runfiles=ctx.runfiles(files=all_runfiles),
)
nodejs_binary = rule(
nodejs_binary_impl,
executable=True,
attrs={
"main_script": attr.label(
single_file=True,
allow_files=_js_filetype,
),
"_nodejs_tool": attr.label(
default=Label("//nodejs/toolchain:nodejs_tool"),
single_file=True,
allow_files=True,
executable=True,
cfg=HOST_CFG,
)
},
)
NODEJS_BUILD_FILE_CONTENTS = """\
package(
default_visibility = ["//visibility:public"])
filegroup(
name = "nodejs_tool",
srcs = ["bin/node"],
)
"""
def nodejs_repositories():
native.new_http_archive(
name = 'nodejs_linux_amd64',
url = 'https://nodejs.org/dist/v4.4.4/node-v4.4.4-linux-x64.tar.xz',
build_file_content = NODEJS_BUILD_FILE_CONTENTS,
sha256 = 'c8b4e3c6e07e51593dddbf1d2ec3cf0e' +
'c09d5c6b8c5258b37b3816cc6b7e9fe3',
strip_prefix = "node-v4.4.4-linux-x64",
)
|
Python
| 0
|
@@ -1033,17 +1033,13 @@
%5D)%0A%0A
-filegroup
+alias
(%0A
@@ -1070,17 +1070,21 @@
-srcs = %5B%22
+actual = %22//:
bin/
@@ -1088,17 +1088,16 @@
in/node%22
-%5D
,%0A)%0A%22%22%22%0A
|
26053737aa78301a9632add73af3b815323bc309
|
Version 0.2.5.
|
cmsplugin_footnote/__init__.py
|
cmsplugin_footnote/__init__.py
|
__version__ = (0, 2, 4)
version_string = '.'.join(str(n) for n in __version__)
|
Python
| 0
|
@@ -18,9 +18,9 @@
2,
-4
+5
)%0Ave
|
e8540104547878e9f8360ba07ca0cbf1ee63e6ca
|
update Nottingham import script
|
polling_stations/apps/data_collection/management/commands/import_nottingham.py
|
polling_stations/apps/data_collection/management/commands/import_nottingham.py
|
from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "E06000018"
addresses_name = "parl.2017-06-08/Version 1/Democracy_Club__08June2017 8.tsv"
stations_name = "parl.2017-06-08/Version 1/Democracy_Club__08June2017 8.tsv"
elections = ["parl.2017-06-08"]
csv_delimiter = "\t"
|
Python
| 0
|
@@ -18,372 +18,978 @@
ion.
-management.commands import BaseXpressDemocracyClubCsvImporter%0A%0A%0Aclass Command(BaseXpressDemocracyClubCsvImporter):%0A council_id = %22E06000018%22%0A addresses_name = %22parl.2017-06-08/Version 1/Democracy_Club__08June2017 8.tsv%22%0A stations_name = %22parl.2017-06-08/Version 1/Democracy_Club__08June2017 8.tsv%22%0A elections = %5B%22parl.2017-06-08
+github_importer import BaseGitHubImporter%0A%0A%0Aclass Command(BaseGitHubImporter):%0A%0A srid = 4326%0A districts_srid = 4326%0A council_id = %22E06000018%22%0A elections = %5B%5D%0A scraper_name = %22wdiv-scrapers/DC-PollingStations-Nottingham%22%0A geom_type = %22geojson%22%0A%0A def district_record_to_dict(self, record):%0A poly = self.extract_geometry(record, self.geom_type, self.get_srid(%22districts%22))%0A return %7B%0A %22internal_council_id%22: record%5B%22POLLINGDIS%22%5D,%0A %22name%22: record%5B%22POLLINGDIS%22%5D,%0A %22area%22: poly,%0A %22polling_station_id%22: record%5B%22POLLINGDIS%22%5D,%0A %7D%0A%0A def station_record_to_dict(self, record):%0A location = self.extract_geometry(%0A record, self.geom_type, self.get_srid(%22stations%22)%0A )%0A return %7B%0A %22internal_council_id%22: record%5B%22CONST
%22%5D
+,
%0A
-csv_delimiter = %22%5Ct%22
+ %22postcode%22: %22%22,%0A %22address%22: record%5B%22NAME%22%5D + %22%5Cn%22 + record%5B%22ADDRESS%22%5D,%0A %22location%22: location,%0A %7D
%0A
|
17e8a6e72523cbfdecd97b50c0c1bc68863dd86d
|
update examples/buzz -- now available by invitation, and in 1:1
|
examples/buzz.py
|
examples/buzz.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fly with Buzz
In this example we create following commands with some lines of code:
- command: explore <planet>
- you then track in real-time the progress of the mission
- command: blast <planet>
- similar to exploration, except that the planet is nuked
- command: planets
- list available destinations
What is showcased here are commands that take significant time to execute.
Buzz is flying from Earth to some planets and come back. Obviously,
this is the kind of activity that can take ages, yet here each mission
lasts about 30 seconds.
Ok. So, when I type ``buzz explore Uranus`` in the chat box, do I have to
wait for 30 seconds before the next command is considered? Hopefully not!
The two commands ``explore`` and ``blast`` are non-interactive. This means
that they are pushed to a pipeline for background execution.
With this concept, you can get a dialog similar to the following::
> buzz explore Mercury
Ok, I am working on it
#1 - Departing to Mercury
> buzz blast Neptune
Ok, will work on it as soon as possible
#1 - Approaching Mercury
#1 - Landed on Mercury
> buzz planets
Available destinations:
- Venus
- Moon
...
In other terms, the bot is always responsive, whatever is executing in the
background. Also, non-interactive commands are executed in the exact
sequence of their submission.
To run this script you have to provide a custom configuration, or set
environment variables instead::
- ``CHAT_ROOM_MODERATORS`` - Mention at least your e-mail address
- ``CISCO_SPARK_BOT_TOKEN`` - Received from Cisco Spark on bot registration
- ``SERVER_URL`` - Public link used by Cisco Spark to reach your server
The token is specific to your run-time, please visit Cisco Spark for
Developers to get more details:
https://developer.ciscospark.com/
For example, if you run this script under Linux or macOs with support from
ngrok for exposing services to the Internet::
export CHAT_ROOM_MODERATORS="alice@acme.com"
export CISCO_SPARK_BOT_TOKEN="<token id from Cisco Spark for Developers>"
export SERVER_URL="http://1a107f21.ngrok.io"
python buzz.py
"""
import os
from shellbot import Engine, ShellBot, Context
from planets.rocket import Rocket
Context.set_logger()
# use a customized driver for new bots
#
class FlyingBot(ShellBot):
def on_init(self):
self.rocket = Rocket(self)
self.rocket.start()
# create a bot and load commands
#
from planets import PlanetFactory
engine = Engine(type='spark',
commands=PlanetFactory.commands(),
driver=FlyingBot)
# load configuration
#
os.environ['BOT_ON_START'] = 'Hello Buzz, welcome to Cape Canaveral'
os.environ['BOT_ON_STOP'] = 'Batman is now quitting the room, bye'
os.environ['CHAT_ROOM_TITLE'] = 'Buzz flights'
engine.configure()
engine.set('bot.store.planets', ['Mercury',
'Venus',
'Moon',
'Mars',
'Jupiter',
'Saturn',
'Uranus',
'Neptune',
])
# initialise a chat room
#
bot = engine.bond(reset=True)
# run the bot
#
engine.run()
# delete the chat room when the bot is stopped
#
bot.dispose()
|
Python
| 0
|
@@ -3481,13 +3481,13 @@
_ON_
-START
+ENTER
'%5D =
@@ -3550,12 +3550,12 @@
_ON_
-STOP
+EXIT
'%5D =
@@ -4083,12 +4083,15 @@
ine.
-bond
+get_bot
(res
|
be5cbcf4cae5e6cc5d1d4f82c92193c8c9347432
|
Remove README from debian_service get_all()
|
salt/modules/debian_service.py
|
salt/modules/debian_service.py
|
'''
Service support for Debian systems - uses update-rc.d and service to modify the
system
'''
# Import python libs
import glob
import re
# Import salt libs
import salt.utils
from .systemd import _sd_booted
def __virtual__():
'''
Only work on Debian and when systemd isn't running
'''
if __grains__['os'] == 'Debian' and not _sd_booted():
return 'service'
return False
def _get_runlevel():
'''
returns the current runlevel
'''
return __salt__['cmd.run']('runlevel').split()[1]
def get_enabled():
'''
Return a list of service that are enabled on boot
CLI Example::
salt '*' service.get_enabled
'''
prefix = '/etc/rc[S{0}].d/S'.format(_get_runlevel())
ret = set()
lines = glob.glob('{0}*'.format(prefix))
for line in lines:
ret.add(re.split(prefix + '\d+', line)[1])
return sorted(ret)
def get_disabled():
'''
Return a set of services that are installed but disabled
CLI Example::
salt '*' service.get_disabled
'''
prefix = '/etc/rc{0}.d/K'.format(_get_runlevel())
ret = set()
lines = glob.glob('{0}*'.format(prefix))
for line in lines:
ret.add(re.split(prefix + '\d+', line)[1])
return sorted(ret)
def get_all():
'''
Return all available boot services
CLI Example::
salt '*' service.get_all
'''
ret = set()
lines = glob.glob('/etc/init.d/*')
for line in lines:
ret.add(line.split('/etc/init.d/')[1])
return sorted(ret)
def start(name):
'''
Start the specified service
CLI Example::
salt '*' service.start <service name>
'''
cmd = 'service {0} start'.format(name)
return not __salt__['cmd.retcode'](cmd)
def stop(name):
'''
Stop the specified service
CLI Example::
salt '*' service.stop <service name>
'''
cmd = 'service {0} stop'.format(name)
return not __salt__['cmd.retcode'](cmd)
def restart(name):
'''
Restart the named service
CLI Example::
salt '*' service.restart <service name>
'''
if name == 'salt-minion':
salt.utils.daemonize_if(__opts__)
cmd = 'service {0} restart'.format(name)
return not __salt__['cmd.retcode'](cmd)
def reload(name):
'''
Reload the named service
CLI Example::
salt '*' service.reload <service name>
'''
cmd = 'service {0} reload'.format(name)
return not __salt__['cmd.retcode'](cmd)
def force_reload(name):
'''
Force-reload the named service
CLI Example::
salt '*' service.force_reload <service name>
'''
cmd = 'service {0} force-reload'.format(name)
return not __salt__['cmd.retcode'](cmd)
def status(name, sig=None):
'''
Return the status for a service, pass a signature to use to find
the service via ps
CLI Example::
salt '*' service.status <service name>
'''
if sig:
return bool(__salt__['status.pid'](sig))
cmd = 'service {0} status'.format(name)
return not __salt__['cmd.retcode'](cmd)
def enable(name, **kwargs):
'''
Enable the named service to start at boot
CLI Example::
salt '*' service.enable <service name>
'''
cmd = 'update-rc.d {0} enable'.format(name)
osmajor = __grains__['osrelease'].split('.')[0]
if int(osmajor) >= 6:
cmd = 'insserv {0} && '.format(name) + cmd
return not __salt__['cmd.retcode'](cmd)
def disable(name, **kwargs):
'''
Disable the named service to start at boot
CLI Example::
salt '*' service.disable <service name>
'''
cmd = 'update-rc.d {0} disable'.format(name)
return not __salt__['cmd.retcode'](cmd)
def enabled(name):
'''
Return True if the named servioce is enabled, false otherwise
CLI Example::
salt '*' service.enabled <service name>
'''
return name in get_enabled()
def disabled(name):
'''
Return True if the named servioce is enabled, false otherwise
CLI Example::
salt '*' service.disabled <service name>
'''
return name in get_disabled()
|
Python
| 0
|
@@ -1463,24 +1463,26 @@
-ret.add(
+service =
line.spl
@@ -1498,24 +1498,164 @@
init.d/')%5B1%5D
+%0A # Remove README. If it's an enabled service, it will be added back in.%0A if service != 'README':%0A ret.add(service
)%0A return
@@ -1657,32 +1657,48 @@
eturn sorted(ret
+ + get_enabled()
)%0A%0A%0Adef start(na
|
6647025f3cb44818d0ff403160df35aa827516c7
|
refactor env var load
|
src/config.py
|
src/config.py
|
import os
import sys
import socket
import platform
import util
import json
import re
APP = os.path.basename(sys.argv[0])
USER = os.environ.get('SUDO_USER')
if not USER:
USER = os.environ.get('USER')
HOME = os.path.expanduser(f"~{USER}")
HOME_ROOT = os.path.expanduser("~root")
BASE_PATH = os.path.dirname(os.path.dirname(__file__))
HOSTNAME = socket.gethostname()
HOSTUNAME = platform.uname().system
if util.on_macos or util.on_windows:
NAME = platform.uname()[0].lower()
else:
name_pattern = re.compile(r'-(\w*)')
NAME = re.search(pattern=name_pattern, string=platform.uname().version).group(1).lower()
if util.on_macos:
# OS_VERSION example: '12.0.1'
OS_VERSION = platform.mac_ver()[0]
elif util.on_windows:
# OS_VERSION example: '10.0.19042'
OS_VERSION = platform.win32_ver()[1]
elif util.on_wsl:
# OS_VERSION example: '10.0.19044.0'
powershell_path = '/mnt/c/Windows/System32/WindowsPowerShell/v1.0//powershell.exe'
version_path = '[Environment]::OSVersion.VersionString'
OS_VERSION = os.popen(f'{powershell_path} {version_path}').read().split(' ')[-1].replace('\n', '')
else:
# OS_VERSION example: '20.04.1'
version_pattern = re.compile('~(.*)-')
OS_VERSION = re.search(pattern=version_pattern, string=platform.uname().version).group(1)
OS = f'{HOSTUNAME}_{NAME}'
TOP_LEVEL_DOMAIN = (util.read_cache('tld') or 'docker').strip()
DOCKER_CONTAINER_TAG = (util.read_cache('tag') or 'ns0').strip()
DOCKER_CONTAINER_NAME = (util.read_cache('name') or DOCKER_CONTAINER_TAG).strip()
SUPPORTED_OS_VERSIONS = json.load(open(f'{BASE_PATH}/supported_os.json', 'r'))
|
Python
| 0.000062
|
@@ -153,57 +153,18 @@
SER'
-)%0Aif not USER:%0A USER = os.environ.get(
+,
'USER')%0A
%0AHOM
@@ -159,17 +159,16 @@
'USER')%0A
-%0A
HOME = o
|
09234d3084739075e0aba59002419c341a59a47e
|
Correct import
|
setuptools/tests/test_msvc.py
|
setuptools/tests/test_msvc.py
|
"""
Tests for msvc support module.
"""
import os
import contextlib
import distutils.errors
import six
import pytest
try:
from unittest import mock
except ImportError:
import mock
from . import contexts
# importing only setuptools should apply the patch
__import__('setuptools')
pytest.importorskip("distutils.msvc9compiler")
def mock_reg(hkcu=None, hklm=None):
"""
Return a mock for distutils.msvc9compiler.Reg, patched
to mock out the functions that access the registry.
"""
_winreg = getattr(distutils.msvc9compiler, '_winreg', None)
winreg = getattr(distutils.msvc9compiler, 'winreg', _winreg)
hives = {
winreg.HKEY_CURRENT_USER: hkcu or {},
winreg.HKEY_LOCAL_MACHINE: hklm or {},
}
@classmethod
def read_keys(cls, base, key):
"""Return list of registry keys."""
hive = hives.get(base, {})
return [
k.rpartition('\\')[2]
for k in hive if k.startswith(key.lower())
]
@classmethod
def read_values(cls, base, key):
"""Return dict of registry keys and values."""
hive = hives.get(base, {})
return dict(
(k.rpartition('\\')[2], hive[k])
for k in hive if k.startswith(key.lower())
)
return mock.patch.multiple(distutils.msvc9compiler.Reg,
read_keys=read_keys, read_values=read_values)
class TestModulePatch:
"""
Ensure that importing setuptools is sufficient to replace
the standard find_vcvarsall function with a version that
recognizes the "Visual C++ for Python" package.
"""
key_32 = r'software\microsoft\devdiv\vcforpython\9.0\installdir'
key_64 = r'software\wow6432node\microsoft\devdiv\vcforpython\9.0\installdir'
def test_patched(self):
"Test the module is actually patched"
mod_name = distutils.msvc9compiler.find_vcvarsall.__module__
assert mod_name == "setuptools.msvc", "find_vcvarsall unpatched"
@pytest.mark.xfail(six.PY2,
reason="https://github.com/pypa/setuptools/issues/707")
def test_no_registry_entries_means_nothing_found(self):
"""
No registry entries or environment variable should lead to an error
directing the user to download vcpython27.
"""
find_vcvarsall = distutils.msvc9compiler.find_vcvarsall
query_vcvarsall = distutils.msvc9compiler.query_vcvarsall
with contexts.environment(VS90COMNTOOLS=None):
with mock_reg():
assert find_vcvarsall(9.0) is None
try:
query_vcvarsall(9.0)
except Exception as exc:
expected = distutils.errors.DistutilsPlatformError
assert isinstance(exc, expected)
assert 'aka.ms/vcpython27' in str(exc)
@pytest.yield_fixture
def user_preferred_setting(self):
"""
Set up environment with different install dirs for user vs. system
and yield the user_install_dir for the expected result.
"""
with self.mock_install_dir() as user_install_dir:
with self.mock_install_dir() as system_install_dir:
reg = mock_reg(
hkcu={
self.key_32: user_install_dir,
},
hklm={
self.key_32: system_install_dir,
self.key_64: system_install_dir,
},
)
with reg:
yield user_install_dir
def test_prefer_current_user(self, user_preferred_setting):
"""
Ensure user's settings are preferred.
"""
result = distutils.msvc9compiler.find_vcvarsall(9.0)
expected = os.path.join(user_preferred_setting, 'vcvarsall.bat')
assert expected == result
@pytest.yield_fixture
def local_machine_setting(self):
"""
Set up environment with only the system environment configured.
"""
with self.mock_install_dir() as system_install_dir:
reg = mock_reg(
hklm={
self.key_32: system_install_dir,
},
)
with reg:
yield system_install_dir
def test_local_machine_recognized(self, local_machine_setting):
"""
Ensure machine setting is honored if user settings are not present.
"""
result = distutils.msvc9compiler.find_vcvarsall(9.0)
expected = os.path.join(local_machine_setting, 'vcvarsall.bat')
assert expected == result
@pytest.yield_fixture
def x64_preferred_setting(self):
"""
Set up environment with 64-bit and 32-bit system settings configured
and yield the canonical location.
"""
with self.mock_install_dir() as x32_dir:
with self.mock_install_dir() as x64_dir:
reg = mock_reg(
hklm={
# This *should* only exist on 32-bit machines
self.key_32: x32_dir,
# This *should* only exist on 64-bit machines
self.key_64: x64_dir,
},
)
with reg:
yield x32_dir
def test_ensure_64_bit_preferred(self, x64_preferred_setting):
"""
Ensure 64-bit system key is preferred.
"""
result = distutils.msvc9compiler.find_vcvarsall(9.0)
expected = os.path.join(x64_preferred_setting, 'vcvarsall.bat')
assert expected == result
@staticmethod
@contextlib.contextmanager
def mock_install_dir():
"""
Make a mock install dir in a unique location so that tests can
distinguish which dir was detected in a given scenario.
"""
with contexts.tempdir() as result:
vcvarsall = os.path.join(result, 'vcvarsall.bat')
with open(vcvarsall, 'w'):
pass
yield result
|
Python
| 0.000031
|
@@ -86,16 +86,39 @@
errors%0A%0A
+from setuptools.extern
import s
|
c13d83146ea89e813acc12a863a70ac82afec3a8
|
Use the correct encoding when passing the results to buildkite-agent
|
buildifier/buildifier.py
|
buildifier/buildifier.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import fnmatch
import html
import os.path
import re
import subprocess
import sys
regex = re.compile(
r"(?P<filename>[^:]*):(?P<line>\d*):(?:(?P<column>\d*):)? (?P<message_id>[^:]*): (?P<message>.*) \((?P<message_url>.*)\)"
)
def eprint(*args, **kwargs):
"""
Print to stderr and flush (just in case).
"""
print(*args, flush=True, file=sys.stderr, **kwargs)
def upload_output(output):
# Generate output usable by Buildkite's annotations.
eprint("--- :hammer_and_wrench: Printing raw output for debugging")
eprint(output)
eprint("+++ :buildkite: Uploading output via 'buildkite annotate'")
result = subprocess.run(
["buildkite-agent", "annotate", "--style", "warning", "--context", "buildifier"],
input=output,
)
if result.returncode != 0:
eprint(
":rotating_light: 'buildkite-agent annotate' failed with exit code {}".format(
result.returncode
)
)
def get_file_url(filename, line):
commit = os.environ.get("BUILDKITE_COMMIT")
repo = os.environ.get("BUILDKITE_PULL_REQUEST_REPO", os.environ.get("BUILDKITE_REPO", None))
if not commit or not repo:
return None
# Example 1: https://github.com/bazelbuild/bazel.git
# Example 2: git://github.com/philwo/bazel.git
# Example 3: git@github.com:bazelbuild/bazel.git
match = re.match(r"(?:(?:git|https?)://|git@)(github.com[:/].*)\.git", repo)
if match:
return "https://{}/blob/{}/{}#L{}".format(
match[1].replace(":", "/"), commit, filename, line
)
return None
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
# Gather all files to process.
eprint("+++ :female-detective: Looking for BUILD, BUILD.bazel and *.bzl files")
files = []
build_bazel_found = False
for root, dirnames, filenames in os.walk("."):
for filename in filenames:
if fnmatch.fnmatch(filename, "BUILD.bazel"):
build_bazel_found = True
for pattern in ("BUILD", "BUILD.bazel", "*.bzl"):
if fnmatch.fnmatch(filename, pattern):
files.append(os.path.relpath(os.path.join(root, filename)))
if build_bazel_found:
eprint(
"Found BUILD.bazel files in the workspace, thus ignoring BUILD files without suffix."
)
files = [fname for fname in files if not fnmatch(os.path.basename(fname), "BUILD")]
if not files:
eprint("No files found, exiting.")
return 0
# Run buildifier.
eprint("+++ :bazel: Running buildifier")
result = subprocess.run(
["buildifier", "--lint=warn"] + sorted(files), capture_output=True, universal_newlines=True
)
# If buildifier was happy, there's nothing left to do for us.
if result.returncode == 0:
eprint("+++ :tada: Buildifier found nothing to complain about")
return result.returncode
# Parse output.
eprint("+++ :gear: Parsing buildifier output")
findings = []
for line in result.stderr.splitlines():
# Skip empty lines.
line = line.strip()
if not line:
continue
# Try to parse as structured data.
match = regex.match(line)
if match:
findings.append(match)
else:
output = "##### :bazel: buildifier: error while parsing output\n"
output += "<pre><code>" + html.escape(result.stderr) + "</code></pre>"
if "BUILDKITE_JOB_ID" in os.environ:
output += "\n\nSee [job {job}](#{job})\n".format(job=os.environ["BUILDKITE_JOB_ID"])
upload_output(output)
return result.returncode
output = "##### :bazel: buildifier: found {} problems in your BUILD and *.bzl files\n".format(
len(findings)
)
output += "<pre><code>"
for finding in findings:
file_url = get_file_url(finding["filename"], finding["line"])
if file_url:
output += '<a href="{}">{}:{}</a>:'.format(
file_url, finding["filename"], finding["line"]
)
else:
output += "{}:{}:".format(finding["filename"], finding["line"])
if finding["column"]:
output += "{}:".format(finding["column"])
output += ' <a href="{}">{}</a>: {}\n'.format(
finding["message_url"], finding["message_id"], finding["message"]
)
output = output.strip() + "</pre></code>"
upload_output(output)
# Preserve buildifier's exit code.
return result.returncode
if __name__ == "__main__":
sys.exit(main())
|
Python
| 0.000001
|
@@ -68,16 +68,30 @@
rt html%0A
+import locale%0A
import o
@@ -823,16 +823,59 @@
t=output
+.encode(locale.getpreferredencoding(False))
,%0A )%0A
|
efacaa8a302133b938e933c61f29d9fa6dca0912
|
Version bump.
|
minion/__init__.py
|
minion/__init__.py
|
from minion.core import Application
from minion.request import Response
__version__ = "0.7.0"
|
Python
| 0
|
@@ -86,9 +86,9 @@
%220.
-7
+8
.0%22%0A
|
24110636fd6eaae8962478c9f3e56c9da469be81
|
bump version to 0.9.0
|
zs/version.py
|
zs/version.py
|
# This file is part of ZS
# Copyright (C) 2013-2014 Nathaniel Smith <njs@pobox.com>
# See file LICENSE.txt for license information.
# This file must be kept very simple, because it is consumed from several
# places -- it is imported by zs/__init__.py, execfile'd by setup.py, etc.
# We use a simple scheme:
# 1.0.0 -> 1.0.0-dev -> 1.1.0 -> 1.1.0-dev
# where the -dev versions are never released into the wild, they're just what
# we stick into the VCS in between releases.
#
# This is compatible with PEP 440:
# http://legacy.python.org/dev/peps/pep-0440/
# in a slightly abusive way -- PEP 440 provides no guidance on what version
# number to use for *unreleased* versions, so we use an "integrator suffix",
# which is intended to be used for things like Debian's locally patched
# version, and is not allowed on public index servers. Which sounds about
# right, actually... Crucially, PEP 440 says that "foo-bar" sorts *after*
# "foo", which is what we want for a dev version. (Compare to "foo.dev0",
# which sorts *before* "foo".)
__version__ = "0.0.0-dev"
|
Python
| 0.000001
|
@@ -1051,17 +1051,13 @@
__ = %220.
-0
+9
.0
--dev
%22%0A
|
14cf80f75426093fbb5c29656da6abf6c218eac5
|
Use environment's python
|
weather.py
|
weather.py
|
#!/usr/local/bin/python3
import requests
import subprocess
from datetime import date, timedelta
from multiprocessing.dummy import Pool
with open('metoffice_api_key') as keyfile:
api_key = keyfile.read()
weather_types = {'NA': 'Not available',
0: 'Clear night',
1: 'Sunny day',
2: 'Partly cloudy', # night
3: 'Partly cloudy', # day
4: 'Not used',
5: 'Mist',
6: 'Fog',
7: 'Cloudy',
8: 'Overcast',
9: 'Light rain shower', # night
10: 'Light rain shower', # day
11: 'Drizzle',
12: 'Light rain',
13: 'Heavy rain shower', # night
14: 'Heavy rain shower', # day
15: 'Heavy rain',
16: 'Sleet shower', # night
17: 'Sleet shower', # day
18: 'Sleet',
19: 'Hail shower', # night
20: 'Hail shower', # day
21: 'Hail',
22: 'Light snow shower', # night
23: 'Light snow shower', # day
24: 'Light snow',
25: 'Heavy snow shower', # night
26: 'Heavy snow shower', # day
27: 'Heavy snow',
28: 'Thunder shower', # night
29: 'Thunder shower', # day
30: 'Thunder'}
forecast_codes = {
'FDm': {'name': 'Feels like',
'units': '°C'},
'Dm': {'name': 'Max temp',
'units': '°C'},
'Gn': {'name': 'Wind gusts',
'units': 'mph'},
'Hn': {'name': 'Humidity',
'units': '%'},
'V': {'name': 'Visibility',
'units': ''},
'D': {'name': 'Wind direction',
'units': ''},
'S': {'name': 'Wind speed',
'units': 'mph'},
'U': {'name': 'UV',
'units': ''},
'W': {'name': 'Weather type',
'units': ''},
'PPd': {'name': 'Precipitation probability',
'units': '%'},
}
urls = {
'base': 'http://datapoint.metoffice.gov.uk/public/data/',
'forecast': 'val/wxfcs/all/json/{location}',
'summary': 'txt/wxfcs/regionalforecast/json/{location}',
}
location_tuples = (
('Aberdeen', 310170),
('Birmingham', 310002),
('Cardiff', 350758),
('Edinburgh', 351351),
('Glasgow', 310009),
('Inverness', 320002),
('Liverpool', 310012),
('London', 352409),
('Manchester', 310013),
('Newcastle', 352793),
('Norwich', 310115),
('Plymouth', 310016),
('Sheffield', 353467),
('Southampton', 353595),
)
location_dicts = [{'name': a, 'code': b} for a, b in location_tuples]
def fetch_forecast(location_code, target_date):
url = urls['base'] + urls['forecast'].format(location=location_code)
params = {'key': api_key,
'res': 'daily',
'time': target_date}
response = requests.request('GET', url, params=params)
conditions = response.json()['SiteRep']['DV']['Location']['Period']['Rep']
day_forecast = next(f for f in conditions if f['$'] == 'Day')
return day_forecast
def parse_forecast(forecast):
working_dict = {}
for k, v in forecast.items():
if k == '$':
continue
name = forecast_codes[k]['name']
units = forecast_codes[k]['units']
if name == 'Weather type':
working_dict[name] = weather_types[int(v)]
else:
working_dict[name] = v + units
return working_dict
def build_weather_string(parsed_dict):
weather_template = '''\
{Weather type}
{temp}
Wind {Wind speed} {Wind direction}{precip}'''
temp_string = parsed_dict['Max temp'] + ' max'
if parsed_dict['Feels like'] != parsed_dict['Max temp']:
temp_string += ', feels like {}'.format(parsed_dict['Feels like'])
precip_chance = parsed_dict['Precipitation probability']
if int(precip_chance.replace('%', '')) < 20:
precip_string = ''
else:
precip_string = '\n{} chance of rain'.format(precip_chance)
weather_string = weather_template.format(temp=temp_string,
precip=precip_string,
**parsed_dict)
return weather_string
def add_daily_forecast_to_dict(location, forecast_date):
raw_forecast = fetch_forecast(location['code'], forecast_date)
parsed = parse_forecast(raw_forecast)
location[forecast_date] = build_weather_string(parsed)
def date_string(d):
return d.isoformat() + 'T00:00:00Z'
def fetch_uk_outlook():
url = urls['base'] + urls['summary'].format(location=515)
params = {'key': api_key}
response = requests.request('GET', url, params=params)
periods = response.json()['RegionalFcst']['FcstPeriods']['Period']
summary_dict = next(p for p in periods if p['id'] == 'day3to5')
outlook = summary_dict['Paragraph']['$']
return outlook
def asrun(ascript):
"Run the given AppleScript and return the standard output and error."
osa = subprocess.Popen(['osascript', '-'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
return osa.communicate(ascript)[0]
def set_frame_contents(frame_name, text):
script = '''\
tell application "Adobe InDesign CS4"
\ttell the front document
\t\tset the contents of text frame "{frame}" to "{contents}"
\tend tell
end tell
'''
asrun(script.format(frame=frame_name, contents=text).encode())
def most_common_condition():
conditions = [loc[dt].split('\n')[0]
for loc in location_dicts
for dt in date_list]
counted = set((conditions.count(c), c) for c in conditions)
return max(counted)[1]
if __name__ == '__main__':
outlook_text = fetch_uk_outlook()
today = date.today()
date_list = [date_string(today + timedelta(1))]
if today.weekday() == 4: # today is Friday
date_list.append(date_string(today + timedelta(2)))
# for dt in date_list:
# for loc in location_dicts:
# parsed = parse_forecast(fetch_forecast(loc['code'], dt))
# loc[dt] = build_weather_string(parsed)
with Pool() as pool:
pool.starmap(
add_daily_forecast_to_dict,
[(loc, dt) for loc in location_dicts for dt in date_list]
)
for loc in location_dicts:
set_frame_contents(loc['name'], loc[date_list[0]])
if len(date_list) == 2:
set_frame_contents(loc['name'] + '_Sun', loc[date_list[1]])
set_frame_contents('Outlook', outlook_text)
|
Python
| 0.000637
|
@@ -4,18 +4,16 @@
usr/
-local/
bin/
+env
pyth
|
b0b434f9aca014ad7ca0e722c461322222ab0007
|
remove unused imports
|
sevenbridges/meta/resource.py
|
sevenbridges/meta/resource.py
|
import os
import copy
import logging
import six
from sevenbridges.errors import SbgError
from sevenbridges.http.client import HttpClient
from sevenbridges.meta.data import DataContainer
from sevenbridges.meta.fields import Field
from sevenbridges.meta.transformer import Transform
logger = logging.getLogger(__name__)
# noinspection PyProtectedMember
class ResourceMeta(type):
"""
Metaclass for all resources, knows how to inject instance of API from
class that contains classes with this meta. Class that contains this class
has to have 'api' property which will be injected into class level
API property of Resource class.
Creates constructors for all resources and manages instantiation of
resource fields.
"""
def __new__(mcs, name, bases, dct):
# Attach fields object fo resource instance.
fields = {}
for k, v in dct.items():
if isinstance(v, Field):
if v.name is None:
fields[k] = v
else: # field has explicit name set in the field constructor
fields[v.name] = v
if v.name is None:
v.name = k
dct['_fields'] = fields
if '__init__' not in dct:
def init(self, **kwargs):
self._api = kwargs.pop('api', None)
urls = getattr(self, '_URL', None)
self._data = DataContainer(urls=urls, api=self._api)
self._dirty = {}
for k, v in kwargs.items():
if k in fields:
value = fields[k].validate(v)
self._data[k] = value
self._old = copy.deepcopy(self._data.data)
def _data_diff(d1, d2):
data = {}
for key in d1.keys():
if key not in d2.keys():
continue
else:
if type(d1[key]) is dict:
inner_diff = _data_diff(d1[key], d2[key])
if inner_diff:
data[key] = inner_diff
else:
if d1[key] != d2[key]:
data[key] = d2[key]
return data
# get modified data from the instance
def modified_data(self):
difference = _data_diff(self._old, self._data.data)
self._dirty.update(difference)
return self._dirty
def equals(self, other):
if not hasattr(other, '__class__'):
return False
if not self.__class__ == other.__class__:
return False
return self is other or self._data == other._data
def deepcopy(self):
return self.__class__(api=self._api, **self._data.data)
if '__str__' not in dct:
dct['__str__'] = lambda self: self.__class__.__name__
if '__repr__' not in dct:
if six.PY2:
dct['__repr__'] = lambda self: str(self).encode('utf-8')
else:
dct['__repr__'] = lambda self: str(self)
dct['__init__'] = init
dct['equals'] = equals
dct['deepcopy'] = deepcopy
dct['_modified_data'] = modified_data
return type.__new__(mcs, name, bases, dct)
def __get__(cls, obj, objtype=None):
if obj is None:
return cls
cls._API = obj
return cls
# noinspection PyProtectedMember,PyAttributeOutsideInit
class Resource(six.with_metaclass(ResourceMeta)):
"""
Resource is base class for all resources, hiding implementation details
of magic of injecting instance of API and common operations (like generic
query).
"""
_API = None
def __init__(self, api):
self.api = api
@classmethod
def _query(cls, **kwargs):
"""
Generic query implementation that is used
by the resources.
"""
from sevenbridges.models.link import Link
from sevenbridges.meta.collection import Collection
api = kwargs.pop('api', cls._API)
url = kwargs.pop('url')
extra = {'resource': cls.__name__, 'query': kwargs}
logger.info('Querying {} resource'.format(cls), extra=extra)
response = api.get(url=url, params=kwargs)
data = response.json()
total = response.headers['x-total-matching-query']
items = [cls(api=api, **item) for item in data['items']]
links = [Link(**link) for link in data['links']]
href = data['href']
return Collection(
resource=cls, href=href, total=total, items=items,
links=links, api=api
)
@classmethod
def get(cls, id, api=None):
"""
Fetches the resource from the server.
:param id: Resource identifier
:param api: sevenbridges Api instance.
:return: Resource object.
"""
id = Transform.to_resource(id)
api = api if api else cls._API
if 'get' in cls._URL:
extra = {'resource': cls.__name__, 'query': {'id': id}}
logger.info('Fetching {} resource'.format(cls), extra=extra)
resource = api.get(url=cls._URL['get'].format(id=id)).json()
return cls(api=api, **resource)
else:
raise SbgError('Unable to fetch resource!')
def delete(self):
"""
Deletes the resource on the server.
"""
if 'delete' in self._URL:
extra = {'resource': self.__class__.__name__, 'query': {
'id': self.id}}
logger.info("Deleting {} resource.".format(self), extra=extra)
self._api.delete(url=self._URL['delete'].format(id=self.id))
else:
raise SbgError('Resource can not be deleted!')
def reload(self):
"""
Refreshes the resource with the data from the server.
"""
try:
if hasattr(self, 'href'):
data = self._api.get(self.href, append_base=False).json()
resource = self.__class__(api=self._api, **data)
elif hasattr(self, 'id') and hasattr(self, '_URL') and \
'get' in self._URL:
data = self._api.get(
self._URL['get'].format(id=self.id)).json()
resource = self.__class__(api=self._api, **data)
else:
raise SbgError('Resource can not be refreshed!')
query = {'id': self.id} if hasattr(self, 'id') else {}
extra = {'resource': self.__class__.__name__, 'query': query}
logger.info('Reloading {} resource.'.format(self), extra=extra)
except Exception:
raise SbgError('Resource can not be refreshed!')
self._data = resource._data
self._dirty = resource._dirty
self._old = copy.deepcopy(self._data.data)
return self
def field(self, name):
"""
Return field value if it's set
:param name: Field name
:return: Field value or None
"""
return self._data.data.get(name, None)
|
Python
| 0.000001
|
@@ -1,14 +1,4 @@
-import os%0A
impo
@@ -78,56 +78,8 @@
ror%0A
-from sevenbridges.http.client import HttpClient%0A
from
|
970978b5355259fe943d5efed1b8b4ce945fdfa7
|
Debug control flow and exit on errors
|
weather.py
|
weather.py
|
#! /usr/bin/python2
from os.path import expanduser,isfile
from sys import argv
from urllib import urlopen
location_path="~/.location"
def location_from_homedir():
if isfile(expanduser(location_path)):
with open(expanduser(location_path)) as f:
return "&".join(f.read().split("\n"))
else:
print("no location file at ", location_path)
def location_from_file(file):
try:
f = open(expanduser(file),'r')
except:
print("file ", location_file, " not found")
location_from_homedir
if len(argv) == 1:
# not given location file
data = location_from_homedir()
elif len(argv) == 2:
# given location file
data = location_from_file(argv[1])
else:
# wrong number of arguments
print("Usage: ", argv[0], " [location file]")
url="http://forecast.weather.gov/MapClick.php?"+data+"FcstType=digitalDWML"
forecast = urlopen(url).read()
|
Python
| 0
|
@@ -56,28 +56,18 @@
ile%0A
-from sys
import
-argv
+sys
%0Afro
@@ -355,16 +355,35 @@
n_path)%0A
+ sys.exit(2)
%0A%0Adef lo
@@ -399,16 +399,25 @@
om_file(
+location_
file):%0A
@@ -452,16 +452,25 @@
anduser(
+location_
file),'r
@@ -533,16 +533,43 @@
ot found
+%5CnLooking in home directory
%22)%0A
@@ -571,16 +571,23 @@
+return
location
@@ -599,16 +599,18 @@
_homedir
+()
%0A%0Aif len
@@ -606,24 +606,28 @@
r()%0A%0Aif len(
+sys.
argv) == 1:%0A
@@ -700,16 +700,20 @@
lif len(
+sys.
argv) ==
@@ -772,16 +772,20 @@
om_file(
+sys.
argv%5B1%5D)
@@ -844,16 +844,20 @@
age: %22,
+sys.
argv%5B0%5D,
@@ -876,16 +876,32 @@
file%5D%22)
+%0A sys.exit(1)
%0A%0Aurl=%22h
|
a5eb71570c70109ab39285bdc108e54d27c54dba
|
Change Content-Encoding to gzip in memory_cache_http_server.
|
telemetry/telemetry/core/memory_cache_http_server.py
|
telemetry/telemetry/core/memory_cache_http_server.py
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import BaseHTTPServer
from collections import namedtuple
import mimetypes
import os
import SimpleHTTPServer
import SocketServer
import sys
import zlib
ByteRange = namedtuple('ByteRange', ['from_byte', 'to_byte'])
ResourceAndRange = namedtuple('ResourceAndRange', ['resource', 'byte_range'])
class MemoryCacheHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
"""Serve a GET request."""
resource_range = self.SendHead()
if not resource_range or not resource_range.resource:
return
response = resource_range.resource['response']
if not resource_range.byte_range:
self.wfile.write(response)
return
start_index = resource_range.byte_range.from_byte
end_index = resource_range.byte_range.to_byte
self.wfile.write(response[start_index:end_index + 1])
def do_HEAD(self):
"""Serve a HEAD request."""
self.SendHead()
def SendHead(self):
path = self.translate_path(self.path)
if path not in self.server.resource_map:
self.send_error(404, 'File not found')
return None
resource = self.server.resource_map[path]
total_num_of_bytes = resource['content-length']
byte_range = self.GetByteRange(total_num_of_bytes)
if byte_range:
# request specified a range, so set response code to 206.
self.send_response(206)
self.send_header('Content-Range',
'bytes %d-%d/%d' % (byte_range.from_byte,
byte_range.to_byte,
total_num_of_bytes))
total_num_of_bytes = byte_range.to_byte - byte_range.from_byte + 1
else:
self.send_response(200)
self.send_header('Content-Length', str(total_num_of_bytes))
self.send_header('Content-Type', resource['content-type'])
self.send_header('Last-Modified',
self.date_time_string(resource['last-modified']))
if resource['zipped']:
self.send_header('Content-Encoding', 'deflate')
self.end_headers()
return ResourceAndRange(resource, byte_range)
def GetByteRange(self, total_num_of_bytes):
"""Parse the header and get the range values specified.
Args:
total_num_of_bytes: Total # of bytes in requested resource,
used to calculate upper range limit.
Returns:
A ByteRange namedtuple object with the requested byte-range values.
If no Range is explicitly requested or there is a failure parsing,
return None.
Special case: If range specified is in the format "N-", return N-N.
If upper range limit is greater than total # of bytes, return upper index.
"""
range_header = self.headers.getheader('Range')
if range_header is None:
return None
if not range_header.startswith('bytes='):
return None
# The range header is expected to be a string in this format:
# bytes=0-1
# Get the upper and lower limits of the specified byte-range.
# We've already confirmed that range_header starts with 'bytes='.
byte_range_values = range_header[len('bytes='):].split('-')
from_byte = 0
to_byte = 0
if len(byte_range_values) == 2:
from_byte = int(byte_range_values[0])
if byte_range_values[1]:
to_byte = int(byte_range_values[1])
else:
return None
# Do some validation.
if from_byte < 0:
return None
if to_byte < from_byte:
to_byte = from_byte
if to_byte >= total_num_of_bytes:
# End of range requested is greater than length of requested resource.
# Only return # of available bytes.
to_byte = total_num_of_bytes - 1
return ByteRange(from_byte, to_byte)
class MemoryCacheHTTPServer(SocketServer.ThreadingMixIn,
BaseHTTPServer.HTTPServer):
# Increase the request queue size. The default value, 5, is set in
# SocketServer.TCPServer (the parent of BaseHTTPServer.HTTPServer).
# Since we're intercepting many domains through this single server,
# it is quite possible to get more than 5 concurrent requests.
request_queue_size = 128
def __init__(self, host_port, handler, paths):
BaseHTTPServer.HTTPServer.__init__(self, host_port, handler)
self.resource_map = {}
for path in paths:
if os.path.isdir(path):
self.AddDirectoryToResourceMap(path)
else:
self.AddFileToResourceMap(path)
def AddDirectoryToResourceMap(self, directory_path):
"""Loads all files in directory_path into the in-memory resource map."""
for root, dirs, files in os.walk(directory_path):
# Skip hidden files and folders (like .svn and .git).
files = [f for f in files if f[0] != '.']
dirs[:] = [d for d in dirs if d[0] != '.']
for f in files:
file_path = os.path.join(root, f)
if not os.path.exists(file_path): # Allow for '.#' files
continue
self.AddFileToResourceMap(file_path)
def AddFileToResourceMap(self, file_path):
"""Loads file_path into the in-memory resource map."""
with open(file_path, 'rb') as fd:
response = fd.read()
fs = os.fstat(fd.fileno())
content_type = mimetypes.guess_type(file_path)[0]
zipped = False
if content_type in ['text/html', 'text/css', 'application/javascript']:
zipped = True
response = zlib.compress(response, 9)
self.resource_map[file_path] = {
'content-type': content_type,
'content-length': len(response),
'last-modified': fs.st_mtime,
'response': response,
'zipped': zipped
}
index = os.path.sep + 'index.html'
if file_path.endswith(index):
self.resource_map[
file_path[:-len(index)]] = self.resource_map[file_path]
def Main():
assert len(sys.argv) > 2, 'usage: %prog <port> [<path1>, <path2>, ...]'
port = int(sys.argv[1])
paths = sys.argv[2:]
server_address = ('127.0.0.1', port)
MemoryCacheHTTPRequestHandler.protocol_version = 'HTTP/1.1'
httpd = MemoryCacheHTTPServer(server_address, MemoryCacheHTTPRequestHandler,
paths)
httpd.serve_forever()
if __name__ == '__main__':
Main()
|
Python
| 0.000163
|
@@ -216,16 +216,28 @@
edtuple%0A
+import gzip%0A
import m
@@ -310,19 +310,24 @@
ort
-sys
+StringIO
%0Aimport
zlib
@@ -322,20 +322,19 @@
%0Aimport
-zlib
+sys
%0A%0A%0AByteR
@@ -2181,15 +2181,12 @@
', '
-deflate
+gzip
')%0A
@@ -5490,44 +5490,203 @@
-response = zlib.compress(response, 9
+sio = StringIO.StringIO()%0A gzf = gzip.GzipFile(fileobj=sio, compresslevel=9, mode='wb')%0A gzf.write(response)%0A gzf.close()%0A response = sio.getvalue()%0A sio.close(
)%0A
|
0da8123703355435425ecf4d6aa88a62cdcad7ba
|
Fix flake8 error.
|
mozillians/common/authbackend.py
|
mozillians/common/authbackend.py
|
import base64
import hashlib
import json
import re
from django.db import transaction
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.models import User
from cities_light.models import Country
from mozilla_django_oidc.auth import OIDCAuthenticationBackend
from waffle import switch_is_active
from mozillians.common.templatetags.helpers import get_object_or_none
from mozillians.dino_park.views import search_get_profile
from mozillians.users.models import IdpProfile
from mozillians.users.tasks import send_userprofile_to_cis
# Only allow the following login flows
# Passwordless > Google > Github, FxA > LDAP
# There is no way to downgrade
ALLOWED_IDP_FLOWS = {
IdpProfile.PROVIDER_PASSWORDLESS: IdpProfile.MFA_ACCOUNTS + [
IdpProfile.PROVIDER_PASSWORDLESS,
IdpProfile.PROVIDER_GOOGLE,
],
IdpProfile.PROVIDER_GOOGLE: IdpProfile.MFA_ACCOUNTS + [
IdpProfile.PROVIDER_PASSWORDLESS,
IdpProfile.PROVIDER_GOOGLE,
],
IdpProfile.PROVIDER_GITHUB: IdpProfile.MFA_ACCOUNTS,
IdpProfile.PROVIDER_FIREFOX_ACCOUNTS: IdpProfile.MFA_ACCOUNTS,
IdpProfile.PROVIDER_LDAP: [
IdpProfile.PROVIDER_LDAP
]
}
def calculate_username(email):
"""Calculate username from email address."""
email = email.split('@')[0]
username = re.sub(r'[^\w.@+-]', '-', email)
username = username[:settings.USERNAME_MAX_LENGTH]
suggested_username = username
count = 0
while User.objects.filter(username=suggested_username).exists():
count += 1
suggested_username = '%s%d' % (username, count)
if len(suggested_username) > settings.USERNAME_MAX_LENGTH:
# We failed to calculate a name for you, default to a
# email digest.
return base64.urlsafe_b64encode(hashlib.sha1(email).digest()).rstrip('=')
return suggested_username
def create_random_username():
"""This function produces a random username that
will be used as default value when a user has not provided one.
"""
raise NotImplementedError
class MozilliansAuthBackend(OIDCAuthenticationBackend):
"""Override OIDCAuthenticationBackend to provide custom functionality."""
def create_mozillians_profile(self, user_id, idp):
# A new mozillians.org profile will be provisioned if there is not one,
# we need the self-view of profile which mean a private scope
# TODO: replace username with a random generator.
# Because we are using OIDC proxy, we assume always ldap. This functionality
# will be deprecated with the launch of DinoPark
profile = idp.profile
v2_profile = search_get_profile(self.request, user_id, 'private')
data = json.loads(v2_profile.content)
# Escape the middleware
profile.full_name = (data.get('first_name', {}).get('value') +
data.get('last_name', {}).get('value'))
location = data.get('location_preference', {}).get('value')
# TODO: Update this. It's wrong to create entries like this. We need to populate
# the Country table and match the incoming location. It's only for M1 beta.
if location:
country, _ = Country.objects.get_or_create(name=location)
profile.country = country
profile.timezone = data.get('timezone', {}).get('value')
profile.title = data.get('fun_title', {}).get('value')
worker_type = data.get('worker_type', {}).get('value')
if worker_type:
profile.is_staff = True
profile.auth0_user_id = user_id
profile.save()
if profile.is_staff:
profile.auto_vouch()
# redirect to /beta
self.request.session['oidc_login_next'] = '/beta'
def create_user(self, claims):
user = super(MozilliansAuthBackend, self).create_user(claims)
# Ensure compatibility with OIDC conformant mode
auth0_user_id = claims.get('user_id') or claims.get('sub')
idp = IdpProfile.objects.create(
profile=user.userprofile,
auth0_user_id=auth0_user_id,
email=claims.get('email'),
primary=True
)
# This is temporary for the beta version of DinoPark.
# and will be removed after that.
if switch_is_active('dino-park-automatic-profiles'):
self.create_mozillians_profile(auth0_user_id, idp)
return user
def filter_users_by_claims(self, claims):
"""Override default method to store claims."""
self.claims = claims
users = super(MozilliansAuthBackend, self).filter_users_by_claims(claims)
# Checking the primary email returned 0 users,
# before creating a new user we should check if the identity returned exists
if not users:
# Ensure compatibility with OIDC conformant mode
auth0_user_id = claims.get('user_id') or claims.get('sub')
idps = IdpProfile.objects.filter(auth0_user_id=auth0_user_id)
user_ids = idps.values_list('profile__user__id', flat=True).distinct()
return self.UserModel.objects.filter(id__in=user_ids)
return users
def check_authentication_method(self, user):
"""Check which Identity is used to login.
This method, depending on the current status of the IdpProfile
of a user, enforces MFA logins and creates the IdpProfiles.
Returns the object (user) it was passed unchanged.
"""
if not user:
return None
profile = user.userprofile
# Ensure compatibility with OIDC conformant mode
auth0_user_id = self.claims.get('user_id') or self.claims.get('sub')
email = self.claims.get('email')
# Grant an employee vouch if the user has the 'hris_is_staff' group
groups = self.claims.get('https://sso.mozilla.com/claim/groups')
if groups and 'hris_is_staff' in groups:
profile.auto_vouch()
# Get current_idp
current_idp = get_object_or_none(IdpProfile, profile=profile, primary=True)
# Get or create new `user_id`
obj, _ = IdpProfile.objects.get_or_create(
profile=profile,
email=email,
auth0_user_id=auth0_user_id)
# Update/Save the Github username
if 'github|' in auth0_user_id:
obj.username = self.claims.get('nickname', '')
obj.save()
# Do not allow downgrades.
if current_idp and obj.type < current_idp.type:
msg = u'Please use {0} as the login method to authenticate'
messages.error(self.request, msg.format(current_idp.get_type_display()))
return None
# Mark other `user_id` as `primary=False`
idp_q = IdpProfile.objects.filter(profile=profile)
with transaction.atomic():
idp_q.exclude(auth0_user_id=auth0_user_id, email=email).update(primary=False)
# Mark current `user_id` as `primary=True`
idp_q.filter(auth0_user_id=auth0_user_id, email=email).update(primary=True)
# Update CIS
send_userprofile_to_cis.delay(profile.pk)
return user
def authenticate(self, **kwargs):
"""Override default method to add multiple Identity Profiles in an account."""
user = super(MozilliansAuthBackend, self).authenticate(**kwargs)
return self.check_authentication_method(user)
|
Python
| 0
|
@@ -2873,18 +2873,16 @@
'value')
- +
%0A
@@ -2898,24 +2898,26 @@
+ +
data.get('l
|
3472cad9c116caf9a7638d44c7edf5194ee8b228
|
Allow passing of deployment strategy to disco_deploy.py
|
bin/disco_deploy.py
|
bin/disco_deploy.py
|
#!/usr/bin/env python
"""
Deploys newly baked hostclasses
Usage:
disco_deploy.py [options] test --pipeline PIPELINE
[--environment ENV] [--ami AMI | --hostclass HOSTCLASS] [--allow-any-hostclass]
disco_deploy.py [options] update --pipeline PIPELINE --environment ENV
[--ami AMI | --hostclass HOSTCLASS] [--allow-any-hostclass]
disco_deploy.py [options] list (--tested|--untested|--failed|--failures|--testable)
[--pipeline PIPELINE] [--environment ENV] [--ami AMI | --hostclass HOSTCLASS]
[--allow-any-hostclass]
disco_deploy.py [options] list --updatable --pipeline PIPELINE --environment ENV
[--ami AMI | --hostclass HOSTCLASS] [--allow-any-hostclass]
Commands:
test For CI and Build env only! Provision, Test, and Promote one new untested AMI if one exists
update For Production! Update one hostclass to a new passing AMI if one exists
list Provides information about AMIs in a pipeline
Options:
-h --help Show this screen
--debug Log in debug level
--dry-run Does not make any modifications
--pipeline PIPELINE File name of the pipeline definition
--ami AMI Limit command to a specific AMI
--hostclass HOSTCLASS Limit command to a specific hostclass
--environment ENV Environment to operate in
--allow-any-hostclass Do not limit command to hostclasses defined in pipeline
--tested List of latest tested AMI for each hostclass
--untested List of latest untested AMI for each hostclass
--failed List of latest failed AMI for each hostclass
--failures List of AMIs where the latest AMI for the hostclass has failed testing
--testable List of AMIs where the latest AMI for the hostclass is untested
--updatable List of AMIs where the latest AMI for the hostclass is newer than the
currently running AMI and its stage is either tested or untagged
"""
from __future__ import print_function
import csv
import sys
from docopt import docopt
from disco_aws_automation import DiscoAWS, DiscoAutoscale, DiscoBake, DiscoDeploy, read_config
from disco_aws_automation.disco_aws_util import run_gracefully
from disco_aws_automation.disco_logging import configure_logging
# R0912 Allow more than 12 branches so we can parse a lot of commands..
# pylint: disable=R0912
def run():
"""Parses command line and dispatches the commands"""
config = read_config()
args = docopt(__doc__)
configure_logging(args["--debug"])
env = args["--environment"] or config.get("disco_aws", "default_environment")
pipeline_definition = []
if args["--pipeline"]:
with open(args["--pipeline"], "r") as f:
reader = csv.DictReader(f)
pipeline_definition = [line for line in reader]
aws = DiscoAWS(config, env)
if config.has_option('test', 'env'):
test_env = config.get('test', 'env')
test_aws = DiscoAWS(config, test_env)
else:
test_aws = aws
deploy = DiscoDeploy(
aws, test_aws, DiscoBake(config, aws.connection), DiscoAutoscale(env),
pipeline_definition=pipeline_definition,
ami=args.get("--ami"), hostclass=args.get("--hostclass"),
allow_any_hostclass=args["--allow-any-hostclass"])
if args["test"]:
deploy.test(dry_run=args["--dry-run"])
elif args["update"]:
deploy.update(dry_run=args["--dry-run"])
elif args["list"]:
missing = "-" if len(pipeline_definition) else ""
if args["--tested"]:
for (_hostclass, ami) in deploy.get_latest_tested_amis().iteritems():
print("{} {:40} {}".format(
ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing))
elif args["--untested"]:
for (_hostclass, ami) in deploy.get_latest_untested_amis().iteritems():
print("{} {:40} {}".format(
ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing))
elif args["--failed"]:
for (_hostclass, ami) in deploy.get_latest_failed_amis().iteritems():
print("{} {:40} {}".format(
ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing))
elif args["--testable"]:
for ami in deploy.get_test_amis():
print("{} {:40} {}".format(
ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing))
elif args["--updatable"]:
for ami in deploy.get_update_amis():
print("{} {:40} {}".format(
ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing))
elif args["--failures"]:
failures = deploy.get_failed_amis()
for ami in failures:
print("{} {:40} {}".format(
ami.id, ami.name.split()[0], deploy.get_integration_test(ami.name.split()[0]) or missing))
sys.exit(1 if len(failures) else 0)
if __name__ == "__main__":
run_gracefully(run)
|
Python
| 0.000001
|
@@ -206,32 +206,74 @@
-any-hostclass%5D%0A
+ %5B--strategy STRATEGY%5D%0A
disco_deploy
@@ -402,32 +402,54 @@
w-any-hostclass%5D
+ %5B--strategy STRATEGY%5D
%0A disco_deplo
@@ -1599,16 +1599,124 @@
pipeline
+%0A --strategy STRATEGY The deployment strategy to use. Currently supported: 'classic' or 'blue_green'.
%0A%0A -
@@ -3717,32 +3717,72 @@
rgs%5B%22--dry-run%22%5D
+, deployment_strategy=args%5B%22--strategy%22%5D
)%0A elif args%5B
@@ -3839,16 +3839,56 @@
ry-run%22%5D
+, deployment_strategy=args%5B%22--strategy%22%5D
)%0A el
|
6e024501b76beaccc2daa46f045f4e3444aa146b
|
update python client library example
|
examples/send.py
|
examples/send.py
|
#!/usr/bin/env python
from alerta.client import Alert, ApiClient
client = ApiClient()
alert = Alert(resource='res1', event='event1')
print alert
print client.send(alert)
|
Python
| 0
|
@@ -28,21 +28,53 @@
alerta.
-clien
+api import ApiClient%0Afrom alerta.aler
t import
@@ -79,17 +79,23 @@
rt Alert
-,
+%0A%0Aapi =
ApiClie
@@ -100,29 +100,89 @@
ient
-%0A%0Aclient = ApiClient(
+(endpoint='http://localhost:8080', key='tUA6oBX6E5hUUQZ+dyze6vZbOMmiZWA7ke88Nvio'
)%0A%0Aa
@@ -194,16 +194,21 @@
= Alert(
+%0A
resource
@@ -213,65 +213,363 @@
ce='
-res1', event='event1')%0Aprint alert%0A%0Aprint client.send(alert)%0A
+web-server-01',%0A event='HttpError',%0A group='Web',%0A environment='Production',%0A service='theguardian.com',%0A severity='major',%0A value='Bad Gateway (502)',%0A text='Web server error.',%0A tags=%5B'web', 'dc1', 'london'%5D,%0A attributes=%7B'customer': 'The Guardian'%7D%0A)%0Aprint alert%0A%0Atry:%0A print api.send(alert)%0Aexcept Exception as e:%0A print e
|
ed31bbdf03f76967616fd272e5835d3ff313d81f
|
Update qlf_pipeline.py
|
bin/qlf_pipeline.py
|
bin/qlf_pipeline.py
|
import os
import sys
import logging
import subprocess
import datetime
import glob
import yaml
import configparser
from multiprocessing import Process, Manager
from qlf_models import QLFModels
# Project main directory
qlf_root = os.getenv('QLF_ROOT')
cfg = configparser.ConfigParser()
try:
cfg.read('%s/qlf/config/qlf.cfg' % qlf_root)
qlconfig = cfg.get('main', 'qlconfig')
desi_spectro_redux = cfg.get('namespace', 'desi_spectro_redux')
except Exception as error:
print(error)
print("Error reading %s/qlf/config/qlf.cfg" % qlf_root)
sys.exit(1)
logger = logging.getLogger(__name__)
class QLFPipeline(object):
""" Class responsible for managing Quick Look pipeline execution """
def __init__(self, data):
self.pipeline_name = 'Quick Look'
self.models = QLFModels()
self.data = data
def start_process(self):
""" Start pipeline """
logger.info('Started %s ...' % self.pipeline_name)
logger.info('Night: %s' % self.data.get('night'))
logger.info('Exposure ID: %s' % str(self.data.get('expid')))
self.data['start'] = datetime.datetime.now().replace(microsecond=0)
# create process in database and obtain the process id
process = self.models.insert_process(
self.data.get('expid'),
self.data.get('night'),
self.data.get('start'),
self.pipeline_name
)
# TODO: ingest configuration file used, this should be done by process
# self.models.insert_config(process.id)
logger.info('Process ID: %ii started ...' % process.id)
output_dir = os.path.join(
'exposures',
self.data.get('night'),
self.data.get('zfill')
)
output_full_dir = os.path.join(desi_spectro_redux, output_dir)
# Make sure output dir is created
if not os.path.isdir(output_full_dir):
os.makedirs(output_full_dir)
logger.info('Output dir: %s' % output_dir)
self.data['output_dir'] = output_dir
procs = list()
return_cameras = Manager().list()
for camera in self.data.get('cameras'):
camera['start'] = datetime.datetime.now().replace(
microsecond=0
)
logname = os.path.join(
self.data.get('output_dir'),
"run-%s.log" % camera.get('name')
)
camera['logname'] = logname
logger.info('Output log for camera %s: %s' % (
camera.get('name'), camera.get('logname')
))
job = self.models.insert_job(
process_id=process.id,
camera=camera.get('name'),
start=camera.get('start'),
logname=camera.get('logname')
)
camera['job_id'] = job.id
proc = Process(target=self.execute, args=(camera, return_cameras,))
procs.append(proc)
proc.start()
for proc in procs:
proc.join()
self.data['end'] = datetime.datetime.now().replace(microsecond=0)
self.data['duration'] = str(
self.data.get('end') - self.data.get('start')
)
logger.info("Process finished in %s." % self.data.get('duration'))
logger.info('Begin ingestion of results...')
start_ingestion = datetime.datetime.now().replace(microsecond=0)
# TODO: refactor?
camera_failed = 0
self.data['cameras'] = return_cameras
for camera in self.data.get('cameras'):
self.update_job(camera)
if not camera.get('status') == 0:
camera_failed += 1
status = 0
if camera_failed > 0:
status = 1
self.models.update_process(
process_id=process.id,
end=self.data.get('end'),
status=status
)
duration_ingestion = str(
datetime.datetime.now().replace(microsecond=0) - start_ingestion
)
logger.info("Ingestion finished in %s." % duration_ingestion)
def execute(self, camera, return_cameras):
""" Execute QL Pipeline by camera """
cmd = (
'desi_quicklook -i {qlconfig} -n {night} -c {camera} -e {exposure} '
'--rawdata_dir {desi_spectro_data} --specprod_dir {desi_spectro_redux} '
).format(**{
'qlconfig': qlconfig,
'night': self.data.get('night'),
'camera': camera.get('name'),
'exposure': str(self.data.get('expid')),
'desi_spectro_data': self.data.get('desi_spectro_data'),
'desi_spectro_redux': desi_spectro_redux
})
logger.info(
"Started job %i on exposure %s and camera %s ... " % (
camera.get('job_id'),
self.data.get('expid'),
camera.get('name')
))
logname = open(os.path.join(
desi_spectro_redux,
camera.get('logname')
), 'wb')
cwd = os.path.join(
desi_spectro_redux,
self.data.get('output_dir')
)
with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, cwd=cwd) as process:
for line in iter(process.stdout.readline, bytes()):
logname.write(line)
logname.flush()
for line in iter(process.stderr.readline, bytes()):
logname.write(line)
logname.flush()
retcode = process.wait()
logname.close()
camera['end'] = datetime.datetime.now().replace(microsecond=0)
camera['status'] = 0
camera['duration'] = str(
camera.get('end') - camera.get('start')
)
if retcode < 0:
camera['status'] = 1
msg = (
"Job on exposure %s and camera %s "
"finished with code %i in %s"
) % (
camera.get('name'),
self.data.get('expid'),
retcode,
camera.get('duration')
)
logger.error(msg)
return_cameras.append(camera)
logger.info("Finished job %i in %s" % (
camera.get('job_id'),
camera.get('duration')
))
def update_job(self, camera):
""" Update job and ingest QA results """
self.models.update_job(
job_id=camera.get('job_id'),
end=camera.get('end'),
status=camera.get('status')
)
output_path = os.path.join(
desi_spectro_redux,
self.data.get('output_dir'),
'ql-*-%s-%s.yaml' % (
camera.get('name'),
self.data.get('zfill')
)
)
for product in glob.glob(output_path):
try:
qa = yaml.load(open(product, 'r'))
name = os.path.basename(product)
paname = qa['PANAME']
metrics = qa['METRICS']
logger.info("Ingesting %s" % name)
self.models.insert_qa(name, paname, metrics, camera.get('job_id'))
except Exception:
logger.error("Error ingesting %s" % name, exc_info=True)
logger.info("Finished ingestion of pipeline results for camera {}.".format(camera.get('name')))
def was_processed(self):
""" Returns [<Process object>] if expid was processed else returns [] """
expid = self.data.get('expid')
return self.models.get_expid_in_process(expid)
|
Python
| 0
|
@@ -1585,17 +1585,16 @@
s ID: %25i
-i
started
|
cb69b004e46d8d08e6c2419c7e4a83383a14755b
|
remove reference-based infinite loop
|
shadho/backend/json/domain.py
|
shadho/backend/json/domain.py
|
from shadho.backend.base.domain import BaseDomain
from shadho.backend.json.value import Value
import uuid
class Domain(BaseDomain):
def __init__(self, id=None, domain=None, path=None, strategy=None,
scaling=None, model=None, values=None, exhaustive=False,
exhaustive_idx=None):
self.id = id if id is not None else str(uuid.uuid4())
self.exhaustive_idx = exhaustive_idx
if isinstance(domain, dict):
distribution = getattr(scipy.stats, domain['distribution'])
self.domain = distribution(*domain['args'],
**domain['kwargs'])
rng = np.random.RandomState()
if 'rng' in domain:
state = domain['rng']
rng.set_state(tuple([state[0], np.array(state[1]), state[2],
state[3], state[4]]))
self.domain.random_state = rng
elif exhaustive and isinstance(domain, list) and exhaustive_idx is None:
self.domain = domain
self.exhaustive_idx = 0
else:
self.domain = domain
self.exhaustive = exhaustive
self.path = path
self.strategy = strategy if strategy is not None else 'random'
self.scaling = scaling if scaling is not None else 'linear'
self.model = model.id if hasattr(model, 'id') else model
self.values = [] if values is None or len(values) == 0 else values
values = values if values is not None else []
for value in values:
self.add_value(value)
def add_value(self, value):
if isinstance(value, Value):
self.values.append(value.id)
elif isinstance(value, (int, str)):
self.values.add(value)
else:
raise InvalidObjectClassError
def to_json(self):
if hasattr(self.domain, dist):
domain = {
'domain': self.domain.dist.name,
'args': self.domain.args,
'kwargs': self.domain.kwds,
}
else:
domain = self.domain
return {
'domain': domain,
'path': self.path,
'strategy': self.strategy,
'scaling': self.scaling,
'model': self.model,
'values': self.values,
'exhaustive': self.exhaustive,
'exhaustive_idx': self.exhaustive_idx
}
|
Python
| 0.000056
|
@@ -86,16 +86,68 @@
rt Value
+%0Afrom shadho.backend.utils import InvalidObjectError
%0A%0Aimport
@@ -1482,58 +1482,8 @@
= %5B%5D
- if values is None or len(values) == 0 else values
%0A
@@ -1776,17 +1776,20 @@
values.a
-d
+ppen
d(value)
@@ -1838,18 +1838,20 @@
ject
-Class
Error
+(value)
%0A%0A
|
00d006d280960228f249480e7af990cf7df39b59
|
remove useless function
|
jsonapi_utils/alchemy.py
|
jsonapi_utils/alchemy.py
|
# -*- coding: utf-8 -*-
from sqlalchemy.sql.expression import desc, asc, text
from jsonapi_utils.constants import DEFAULT_PAGE_SIZE
def paginate_query(query, pagination_kwargs):
"""Paginate query result according to jsonapi rfc
:param sqlalchemy.orm.query.Query query: sqlalchemy queryset
:param dict pagination_kwargs: pagination informations
"""
page_size = int(pagination_kwargs.get('size', 0)) or DEFAULT_PAGE_SIZE
query = query.limit(page_size)
if pagination_kwargs.get('number'):
query = query.offset((int(pagination_kwargs['number']) - 1) * page_size)
return query
def sort_query(query, querystring):
"""
:param query: sqlalchemy query to sort
:param JSONAPIQueryString querystring: current querystring
"""
expressions = {'asc': asc, 'desc': desc}
order_items = []
for sort_opt in querystring.sorting:
field = text(sort_opt['field'])
order = expressions.get(sort_opt['order'])
order_items.append(order(field))
return query.order_by(*order_items)
def include_query(query, include_kwargs):
pass
|
Python
| 0.001107
|
@@ -1054,57 +1054,4 @@
ms)%0A
-%0A%0Adef include_query(query, include_kwargs):%0A pass%0A
|
2bf4742656dca9ea3b5ad6fc2c215a09bfb32594
|
Fix the expected "Dot not know how to make" error message.
|
test/Win32/bad-drive.py
|
test/Win32/bad-drive.py
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
"""
This test verifies (on Windows systems) that we fail gracefully and
provide informative messages if someone tries to use a path name
with an invalid drive letter.
"""
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os
import string
import sys
import TestSCons
test = TestSCons.TestSCons()
if sys.platform != 'win32':
msg = "Skipping drive-letter test on non-Windows platform '%s'\n" % sys.platform
test.skip_test(msg)
bad_drive = None
for i in range(len(string.uppercase)-1, -1, -1):
d = string.uppercase[i]
if not os.path.isdir(d + ':' + os.sep):
bad_drive = d + ':'
break
if bad_drive is None:
print "All drive letters appear to be in use."
print "Cannot test SCons handling of invalid Windows drive letters."
test.no_result(1);
test.write('SConstruct', """
def cat(env, source, target):
target = str(target[0])
source = map(str, source)
print 'cat(%%s) > %%s' %% (source, target)
f = open(target, "wb")
for src in source:
f.write(open(src, "rb").read())
f.close()
bad_drive = '%s'
env = Environment(BUILDERS={'Build':Builder(action=cat)})
env.Build('aaa.out', 'aaa.in')
env.Build(bad_drive + 'no_target_1', 'bbb.exists')
env.Build(bad_drive + 'no_target_2', 'ccc.does_not_exist')
env.Build('ddd.out', bad_drive + 'no_source')
""" % (bad_drive + '\\' + os.sep))
bad_drive = bad_drive + os.sep
test.write("aaa.in", "aaa.in\n")
test.write("bbb.exists", "bbb.exists\n")
test.write("no_target_1", "no_target_1\n")
test.write("no_target_2", "no_target_2\n")
test.write("no_source", "no_source\n")
test.run(arguments = 'aaa.out')
test.fail_test(test.read('aaa.out') != "aaa.in\n")
# This next test used to provide a slightly different error message:
# "scons: *** Do not know how to make File target `%snot_mentioned'. Stop.\n"
# Right now, it doesn't seem important enough to track down exactly
# why this changed and fix it, but we'll preserve it here in case it
# becomes an issue or some refactoring restores the old behavior.
test.run(arguments = bad_drive + 'not_mentioned',
stderr = "scons: *** Do not know how to make File target `%snot_mentioned'. Stop.\n" % (bad_drive),
status = 2)
expect = "scons: *** [%sno_target_1] No drive `%s' for target `%sno_target_1'.\n" % (bad_drive, bad_drive, bad_drive)
test.run(arguments=bad_drive + 'no_target_1', stderr=expect, status=2)
expect = "scons: *** [%sno_target_2] Source `ccc.does_not_exist' not found, needed by target `%sno_target_2'.\n" % (bad_drive, bad_drive)
test.run(arguments=bad_drive + 'no_target_2', stderr=expect, status=2)
expect = "scons: *** [ddd.out] Source `%sno_source' not found, needed by target `ddd.out'.\n" % bad_drive
test.run(arguments='ddd.out', stderr=expect, status=2)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
Python
| 0.000553
|
@@ -3265,32 +3265,50 @@
%25snot_mentioned'
+ (%25snot_mentioned)
. Stop.%5Cn%22 %25 (b
@@ -3298,32 +3298,43 @@
). Stop.%5Cn%22 %25 (
+bad_drive,
bad_drive),%0A
|
1f31ed22627cb1cf5b4323a18435bd9fb7fc7462
|
add trailing slash
|
web/url.py
|
web/url.py
|
from flask import Flask, render_template, jsonify, redirect, url_for
from web.clasher import Clasher
import os
xml_file = 'https://s3.amazonaws.com/navishack/PC-00-COMP-BBC.xml.gz'
clash_data = None
app = Flask(__name__)
app.debug = 'DEBUG' in os.environ
@app.route('/')
def index():
return redirect('/clash/')
@app.route('/projects/')
def projects():
return 'The project page'
@app.route('/about')
def about():
return 'The about page'
def get_clash_test():
global clash_data
if clash_data is None:
print 'loading'
clash_data = Clasher(xml_file)
return clash_data.data['exchange']['batchtest']['clashtests']['clashtest']
@app.route('/clash/')
def clash_index():
number_clashes = len(get_clash_test())
return render_template('clash_index.html', number_clashes=number_clashes)
@app.route('/clash/<int:number>')
def clash_by_number(number):
clash_info = get_clash_test()[number]
return jsonify(clash_info)
@app.route('/hello/')
@app.route('/hello/<name>')
def hello(name=None):
return render_template('hello.html', name=name)
@app.route('/time')
def time_series():
data_url = '/static/test-data.json'
return render_template('clash-summary-over-time.html', data_url=data_url)
|
Python
| 0.000008
|
@@ -1113,16 +1113,37 @@
/time')%0A
+@app.route('/time/')%0A
def time
|
4b3183ac17c9e4316afb535ddc408cd63aa5e2e1
|
Add solver registry metaclass
|
bulls_cows/bulls_cows.py
|
bulls_cows/bulls_cows.py
|
import argparse
import itertools
import logging
import multiprocessing
import random
def get_response(guess, secret):
bulls, cows = 0, 0
for g, s in zip(guess, secret):
if g == s:
bulls += 1
elif g in secret:
cows += 1
return (bulls, cows)
class Solver(object):
def __init__(self, possible_secrets):
self.possible_secrets = possible_secrets
def get_guess(self):
raise NotImplementedError
def update_response(self, guess, response):
"""Filter out all secrets that would not have yielded the given response.
"""
self.possible_secrets = [
s for s in self.possible_secrets if get_response(guess, s) == response]
class MiddleSolver(Solver):
def get_guess(self):
"""Return the middle possible secret.
"""
return self.possible_secrets[len(self.possible_secrets) >> 1]
class RandomSolver(Solver):
def get_guess(self):
"""Return a random possible secret.
"""
return random.choice(self.possible_secrets)
def solve(solver_class, possible_secrets, secret):
solver = solver_class(possible_secrets)
for move_count in itertools.count(1):
guess = solver.get_guess()
if guess == secret:
logging.debug('%s in %s moves', secret, move_count)
return move_count
else:
solver.update_response(guess, get_response(guess, secret))
def batch_solve(solver_class, possible_secrets, secrets):
return [solve(solver_class, possible_secrets, s) for s in secrets]
def main():
valid_solvers = {s.__name__: s for s in (MiddleSolver, RandomSolver)}
parser = argparse.ArgumentParser(description='Bulls and cows solver')
parser.add_argument('-a', '--alen', metavar='len', default=10, type=int,
help='alphabet length (default: %(default)s)')
parser.add_argument('-c', '--class', metavar='class', dest='solver_class',
default='RandomSolver', choices=valid_solvers.keys(),
help='solver class name (default: %(default)s)')
parser.add_argument('-m', '--multiprocess', action='store_true',
help='parallelize computation via multiprocessing')
parser.add_argument('-n', '--num', metavar='num', type=int,
help='number of secrets (default: all possible secrets)')
parser.add_argument('-s', '--slen', metavar='len', default=4, type=int,
help='secret length (default: %(default)s)')
parser.add_argument('-v', '--verbose', action='store_true',
help='verbose output')
args = parser.parse_args()
alphabet = tuple(range(args.alen))
secret_length = args.slen
num_secrets = args.num
solver_class = valid_solvers[args.solver_class]
multiprocess = args.multiprocess
verbose = args.verbose
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',
level=logging.DEBUG if verbose else logging.INFO)
possible_secrets = tuple(itertools.permutations(alphabet, secret_length))
shuffled_secrets = list(possible_secrets)
random.shuffle(shuffled_secrets)
secrets_cycle = itertools.cycle(shuffled_secrets)
if num_secrets is None:
num_secrets = len(possible_secrets)
logging.debug('alphabet length: %s', len(alphabet))
logging.debug('secret length: %s', secret_length)
logging.debug('possible secrets: %s', len(possible_secrets))
logging.debug('secrets to solve: %s', num_secrets)
logging.debug('solver class: %s', solver_class.__name__)
if multiprocess:
num_threads = multiprocessing.cpu_count()
batch_size = (num_secrets + num_threads - 1) // num_threads
pool = multiprocessing.Pool()
results = []
for i in range(num_threads):
batch = list(itertools.islice(secrets_cycle, batch_size))
results.append(pool.apply_async(batch_solve, args=(
solver_class, possible_secrets, batch)))
pool.close()
pool.join()
move_counts = [x for result in results for x in result.get()]
else:
batch = list(itertools.islice(secrets_cycle, num_secrets))
move_counts = batch_solve(solver_class, possible_secrets, batch)
mean = 1.0 * sum(move_counts) / len(move_counts)
logging.info('mean: %.6f', mean)
stdev = (sum((c - mean) ** 2 for c in move_counts) / len(move_counts)) ** 0.5
logging.info('stdev: %.6f', stdev)
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -267,29 +267,303 @@
)%0A%0A%0A
-class Solver(object):
+_solver_classes = set()%0A%0A%0Aclass SolverRegistry(type):%0A%0A def __init__(cls, name, bases, namespace):%0A super(SolverRegistry, cls).__init__(name, bases, namespace)%0A _solver_classes.add(cls)%0A _solver_classes.difference_update(bases)%0A%0A%0Aclass Solver(object):%0A%0A __metaclass__ = SolverRegistry
%0A%0A
@@ -1754,22 +1754,16 @@
in():%0A
-valid_
solvers
@@ -1792,36 +1792,23 @@
in
-(MiddleSolver, RandomSolver)
+_solver_classes
%7D%0A%0A
@@ -2153,22 +2153,16 @@
choices=
-valid_
solvers.
@@ -2912,14 +2912,8 @@
s =
-valid_
solv
|
7c0d8823c849033f5177da92ac42cdea542bd425
|
allow passing custom application options to HomogenizationEngine
|
sfepy/homogenization/engine.py
|
sfepy/homogenization/engine.py
|
from copy import copy, deepcopy
from sfepy.base.base import output, Struct
from sfepy.applications import SimpleApp, Application
from sfepy.fem.region import sort_by_dependency
from coefs_base import MiniAppBase
def insert_sub_reqs( reqs, levels, req_info ):
"""Recursively build all requirements in correct order."""
all_reqs = []
## print '>', levels, reqs
for ii, req in enumerate( reqs ):
try:
rargs = req_info[req]
except KeyError:
raise ValueError('requirement "%s" is not defined!' % req)
sub_reqs = rargs.get( 'requires', [] )
## print '*', ii, req, sub_reqs
if req in levels:
raise ValueError('circular requirement "%s"!' % (req))
if sub_reqs:
levels.append( req )
all_reqs.extend( insert_sub_reqs( sub_reqs, levels, req_info ) )
levels.pop()
if req in all_reqs:
raise ValueError('circular requirement "%s"!' % (req))
else:
all_reqs.append( req )
## print all_reqs
## pause()
return all_reqs
class HomogenizationEngine( SimpleApp ):
@staticmethod
def process_options(options):
get = options.get_default_attr
return Struct(coefs=get('coefs', None,
'missing "coefs" in options!'),
requirements=get('requirements', None,
'missing "requirements" in options!'),
save_format=get('save_format', 'vtk'),
dump_format=get('dump_format', 'h5'))
def __init__( self, problem, options,
volume = None, output_prefix = 'he:', **kwargs ):
"""Bypasses SimpleApp.__init__()!"""
Application.__init__( self, problem.conf, options, output_prefix,
**kwargs )
self.problem = problem
self.setup_options()
self.setup_output_info( self.problem, self.options )
if volume is None:
self.volume = self.problem.evaluate(self.app_options.total_volume)
else:
self.volume = volume
def setup_options( self ):
SimpleApp.setup_options( self )
po = HomogenizationEngine.process_options
self.app_options += po( self.conf.options )
def compute_requirements( self, requirements, dependencies, store ):
problem = self.problem
opts = self.app_options
req_info = getattr( self.conf, opts.requirements )
requires = insert_sub_reqs( copy( requirements ), [], req_info )
for req in requires:
if req in dependencies and (dependencies[req] is not None):
continue
output( 'computing dependency %s...' % req )
rargs = req_info[req]
mini_app = MiniAppBase.any_from_conf( req, problem, rargs )
mini_app.setup_output( save_format = opts.save_format,
dump_format = opts.dump_format,
post_process_hook = self.post_process_hook,
file_per_var = opts.file_per_var )
store( mini_app )
problem.clear_equations()
# Pass only the direct dependencies, not the indirect ones.
dep_requires = rargs.get('requires', [])
data = {}
for key in dep_requires:
data[key] = dependencies[key]
dep = mini_app(data=data)
dependencies[req] = dep
output( '...done' )
return dependencies
def call( self, ret_all = False ):
problem = self.problem
opts = self.app_options
coef_info = getattr( self.conf, opts.coefs )
is_store_filenames = coef_info.pop('filenames', None) is not None
dependencies = {}
save_names = {}
dump_names = {}
def store_filenames( app ):
if not '(not_set)' in app.get_save_name_base():
save_names[app.name] = app.get_save_name_base()
if not '(not_set)' in app.get_dump_name_base():
dump_names[app.name] = app.get_dump_name_base()
def _get_parents(req_list):
out = []
for req_name in req_list:
aux = req_name.split('.')
if len(aux) == 2:
out.append(aux[1])
return out
# Some coefficients can require other coefficients - resolve theirorder
# here.
graph = {}
for coef_name, cargs in coef_info.iteritems():
if not coef_name in graph:
graph[coef_name] = [0]
requires = cargs.get('requires', [])
for parent in _get_parents(requires):
graph[coef_name].append(parent)
requires.remove('c.' + parent)
sorted_coef_names = sort_by_dependency(deepcopy(graph))
## print graph
## print sorted_coef_names
coefs = Struct()
for coef_name in sorted_coef_names:
cargs = coef_info[coef_name]
output( 'computing %s...' % coef_name )
requires = cargs.get( 'requires', [] )
self.compute_requirements( requires, dependencies, store_filenames )
mini_app = MiniAppBase.any_from_conf( coef_name, problem, cargs )
if len(graph[coef_name]) > 1:
for name in graph[coef_name][1:]:
key = 'c.' + name
requires.append(key)
dependencies[key] = getattr(coefs, name)
problem.clear_equations()
# Pass only the direct dependencies, not the indirect ones.
data = {}
for key in requires:
data[key] = dependencies[key]
val = mini_app(self.volume, data=data)
setattr( coefs, coef_name, val )
output( '...done' )
# remove "auxiliary" coefs
for coef_name in sorted_coef_names:
cstat = coef_info[coef_name].get('status', 'main')
if cstat == 'auxiliary':
delattr(coefs, coef_name)
# Store filenames of all requirements as a "coefficient".
if is_store_filenames:
coefs.save_names = save_names
coefs.dump_names = dump_names
if ret_all:
return coefs, dependencies
else:
return coefs
|
Python
| 0.000001
|
@@ -61,16 +61,29 @@
output,
+ get_default,
Struct%0A
@@ -1634,33 +1634,32 @@
def __init__(
-
self, problem, o
@@ -1665,16 +1665,34 @@
options,
+ app_options=None,
%0A
@@ -1705,18 +1705,15 @@
-
volume
- =
+=
None
@@ -1727,19 +1727,17 @@
t_prefix
- =
+=
'he:', *
@@ -1743,17 +1743,16 @@
**kwargs
-
):%0A
@@ -1964,16 +1964,39 @@
options(
+app_options=app_options
)%0A
@@ -2228,22 +2228,38 @@
options(
-
self
-
+, app_options=None
):%0A
@@ -2289,15 +2289,80 @@
ons(
- self )
+self)%0A app_options = get_default(app_options, self.conf.options)%0A
%0A
@@ -2443,27 +2443,19 @@
po(
- self.conf.
+app_
options
-
)%0A%0A
|
61a582e968bec7344255d68d143551e1e6c7b278
|
Use wraps to preserve name, fixes #55
|
huey/djhuey/__init__.py
|
huey/djhuey/__init__.py
|
"""
This module contains a lot of cruft to handle instantiating a "Huey" object
using Django settings. Unlike more flexible python apps, the huey django
integration consists of a single global Huey instance configured via the
settings module.
"""
import sys
from django.conf import settings
from django.db import close_connection
from huey import crontab
from huey import Huey
from huey.utils import load_class
configuration_message = """
Configuring Huey for use with Django
====================================
Huey was designed to be simple to configure in the general case. For that
reason, huey will "just work" with no configuration at all provided you have
Redis installed and running locally.
On the other hand, you can configure huey manually using the following
setting structure. The following example uses Redis on localhost:
Simply point to a backend:
HUEY = {
'backend': 'huey.backends.redis_backend',
'name': 'unique name',
'connection': {'host': 'localhost', 'port': 6379}
'consumer_options': {'workers': 4},
}
If you would like to configure Huey's logger using Django's integrated logging
settings, the logger used by consumer is named "huey.consumer".
For more granular control, you can assign HUEY programmatically:
HUEY = Huey(RedisBlockingQueue('my-queue'))
"""
def default_queue_name():
try:
return settings.DATABASE_NAME
except AttributeError:
return settings.DATABASES['default']['NAME']
except KeyError:
return 'huey'
def config_error(msg):
print(configuration_message)
print('\n\n')
print(msg)
sys.exit(1)
def dynamic_import(obj, key, required=False):
try:
path = obj[key]
except KeyError:
if required:
config_error('Missing required configuration: "%s"' % key)
return None
try:
return load_class(path + '.Components')
except ImportError:
config_error('Unable to import %s: "%s"' % (key, path))
try:
HUEY = getattr(settings, 'HUEY', None)
except:
config_error('Error encountered reading settings.HUEY')
if HUEY is None:
try:
from huey import RedisHuey
except ImportError:
config_error('Error: Huey could not import the redis backend. '
'Install `redis-py`.')
HUEY = RedisHuey(default_queue_name())
if not isinstance(HUEY, Huey):
Queue, DataStore, Schedule, Events = dynamic_import(HUEY, 'backend')
name = HUEY.get('name') or default_queue_name()
conn = HUEY.get('connection', {})
always_eager = HUEY.get('always_eager', False)
HUEY = Huey(
Queue(name, **conn),
DataStore(name, **conn),
Schedule(name, **conn),
Events(name, **conn),
always_eager=always_eager)
task = HUEY.task
periodic_task = HUEY.periodic_task
def close_db(fn):
"""Decorator to be used with tasks that may operate on the database."""
def inner(*args, **kwargs):
try:
return fn(*args, **kwargs)
finally:
close_connection()
return inner
def db_task(*args, **kwargs):
def decorator(fn):
return task(*args, **kwargs)(close_db(fn))
return decorator
def db_periodic_task(*args, **kwargs):
def decorator(fn):
return periodic_task(*args, **kwargs)(close_db(fn))
return decorator
|
Python
| 0
|
@@ -241,16 +241,44 @@
le.%0A%22%22%22%0A
+from functools import wraps%0A
import s
@@ -2928,16 +2928,31 @@
ase.%22%22%22%0A
+ @wraps(fn)%0A
def
|
3dd84fc4dc6cff921329286485e287c13ebebdec
|
Update version.py
|
mirdata/version.py
|
mirdata/version.py
|
#!/usr/bin/env python
"""Version info"""
short_version = "0.3"
version = "0.3.4b0"
|
Python
| 0.000001
|
@@ -74,11 +74,11 @@
%220.3.4b
-0
+1
%22%0A
|
32b287b9d22b22262d291fb7e352a3502fe2e68f
|
Fix Docker test to wait fixed time (#5858)
|
test/assembly/docker.py
|
test/assembly/docker.py
|
#!/usr/bin/env python
import os
import socket
import subprocess as sp
import sys
import time
def wait_for_port(port, host='localhost', timeout=30.0):
start_time = time.time()
while True:
try:
socket.create_connection((host, port), timeout=timeout)
return
except OSError as ex:
time.sleep(0.01)
if time.time() - start_time >= timeout:
raise TimeoutError('Waited too long for the port {} on host {} to start accepting '
'connections.'.format(port, host))
print('Building the image...')
sp.check_call(['bazel', 'run', '//:assemble-docker'])
print('Starting the image...')
sp.check_call(['docker', 'run', '-v', '{}:/grakn-core-all-linux/logs/'.format(os.getcwd()), '--name', 'grakn','-d', '--rm', '-ti', '-p', '127.0.0.1:48555:48555/tcp', 'bazel:assemble-docker'])
print('Docker status:')
sp.check_call(['docker', 'ps'])
sys.stdout.write('Waiting for the instance to be ready')
sys.stdout.flush()
timeout = 0 # TODO: add timeout
# TODO: fail if the docker image is dead
wait_for_port(48555)
print('Running the test...')
sp.check_call(['bazel', 'test', '//test/common:grakn-application-test', '--test_output=streamed',
'--spawn_strategy=standalone', '--cache_test_results=no'])
print('Stopping the container...')
sp.check_call(['docker', 'kill', 'grakn'])
print('Done!')
|
Python
| 0.000715
|
@@ -92,494 +92,8 @@
me%0A%0A
-%0Adef wait_for_port(port, host='localhost', timeout=30.0):%0A start_time = time.time()%0A while True:%0A try:%0A socket.create_connection((host, port), timeout=timeout)%0A return%0A except OSError as ex:%0A time.sleep(0.01)%0A if time.time() - start_time %3E= timeout:%0A raise TimeoutError('Waited too long for the port %7B%7D on host %7B%7D to start accepting '%0A 'connections.'.format(port, host))%0A%0A%0A
prin
@@ -458,24 +458,13 @@
%5D)%0A%0A
-sys.stdout.write
+print
('Wa
@@ -469,16 +469,20 @@
Waiting
+30s
for the
@@ -508,119 +508,21 @@
y')%0A
-sys.stdout.flush()%0Atimeout = 0 # TODO: add timeout%0A# TODO: fail if the docker image is dead%0Await_for_port(48555
+time.sleep(30
)%0A%0Ap
|
d8a27a94d90e5611b24c26c331a0016bbbb87af0
|
update debug set default to false
|
web/web.py
|
web/web.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# web
#
import os
import flask
import models
app = flask.Flask(__name__)
app.secret_key = 'some_secret'
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024 # max 16M
#app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def register_routers():
from routers import home
app.register_blueprint(home.bp, url_prefix='')
for blueprint in 'home', 'repo', 'donate':
exec 'from routers import %s' %(blueprint)
bp = eval(blueprint+'.bp')
app.register_blueprint(bp, url_prefix='/'+blueprint)
#@app.route('/login/', methods=['GET', 'POST'])
#def login():
# app.logger.debug("login")
# error = None
# if request.method == 'POST':
# if request.form['username'] != 'admin' or \
# request.form['password'] != 'secret':
# error = 'Invalid credentials'
# else:
# flash('You are successfully logged in')
# return redirect(url_for('index'))
# return render_template('login.html', error=error)
port = os.getenv('PORT') or '5000'
if __name__ == '__main__':
register_routers()
app.run(debug=True, host='0.0.0.0', port=int(port))
|
Python
| 0.000046
|
@@ -1059,16 +1059,69 @@
'5000'%0A
+debug = os.getenv('DEBUG') in ('true', '1') or False%0A
if __nam
@@ -1184,12 +1184,13 @@
bug=
-True
+debug
, ho
|
e46d60d1dbcff1d6801a452f56eb638312bd408c
|
Allow empty-named keys on nix
|
keyboard/nixkeyboard.py
|
keyboard/nixkeyboard.py
|
# -*- coding: utf-8 -*-
import struct
import traceback
from time import time as now
from collections import namedtuple
from .keyboard_event import KeyboardEvent, KEY_DOWN, KEY_UP, normalize_name
from .nixcommon import EventDevice, EV_KEY
import os
if os.geteuid() != 0:
raise ImportError('You must be root to use this library on linux.')
# TODO: start by reading current keyboard state, as to not missing any already pressed keys.
# See: http://stackoverflow.com/questions/3649874/how-to-get-keyboard-state-in-linux
def cleanup_key(name):
""" Formats a dumpkeys format to our standard. """
name = name.lstrip('+')
is_keypad = name.startswith('KP_')
for mod in ('Meta_', 'Control_', 'dead_', 'KP_'):
if name.startswith(mod):
name = name[len(mod):]
# Dumpkeys is weird like that.
if name == 'Remove':
name = 'Delete'
elif name == 'Delete':
name = 'Backspace'
return normalize_name(name), is_keypad
"""
Use `dumpkeys --keys-only` to list all scan codes and their names. We
then parse the output and built a table. For each scan code we have
a list of names, and if each name is in the keypad or not.
"""
from subprocess import check_output
import re
from_scan_code = {}
to_scan_code = {}
keycode_template = r'\nkeycode\s+(\d+) = (\S+)(?: {2,}(\S+))?'
dump = check_output(['dumpkeys', '--keys-only'], universal_newlines=True)
for str_scan_code, str_regular_name, str_shifted_name in re.findall(keycode_template, dump):
scan_code = int(str_scan_code)
regular_name, is_keypad_regular = cleanup_key(str_regular_name)
shifted_name, is_keypad_shifted = cleanup_key(str_shifted_name)
assert is_keypad_regular == is_keypad_shifted
from_scan_code[scan_code] = ([regular_name, shifted_name], is_keypad_regular)
# Non-keypad keys are preferred.
if not is_keypad_regular or regular_name not in to_scan_code:
to_scan_code[regular_name] = (scan_code, False)
# Capitalize letters correctly to help reverse mapping.
if len(shifted_name) == 1:
shifted_name = shifted_name.upper()
if not is_keypad_regular or shifted_name not in to_scan_code:
to_scan_code[shifted_name] = (scan_code, True)
from glob import glob
paths = glob('/dev/input/by-path/*-event-kbd')
if paths:
device = EventDevice(paths[0])
else:
raise ImportError('No keyboard files found (/dev/input/by-path/*-event-kbd).')
shift_is_pressed = False
def listen(callback):
while True:
time, type, code, value = device.read_event()
if type != EV_KEY:
continue
scan_code = code
event_type = KEY_DOWN if value else KEY_UP # 0 = UP, 1 = DOWN, 2 = HOLD
names, is_keypad = from_scan_code[scan_code]
global shift_is_pressed
name = names[shift_is_pressed]
if event_type == KEY_DOWN and name == 'shift':
shift_is_pressed = True
elif event_type == KEY_UP and name == 'shift':
shift_is_pressed = False
event = KeyboardEvent(event_type, scan_code, is_keypad, name, time)
callback(event)
def write_event(scan_code, is_down):
device.write_event(EV_KEY, scan_code, int(is_down))
def map_char(character):
try:
return to_scan_code[character]
except KeyError:
raise ValueError('Character {} is not mapped to any known key.'.format(repr(character)))
def press(scan_code):
write_event(scan_code, True)
def release(scan_code):
write_event(scan_code, False)
def type_unicode(character):
codepoint = ord(character)
hexadecimal = hex(codepoint)[len('0x'):]
for key in ['ctrl', 'shift', 'u']:
scan_code, _ = map_char(key)
press(scan_code)
for key in hexadecimal:
scan_code, _ = map_char(key)
press(scan_code)
release(scan_code)
for key in ['ctrl', 'shift', 'u']:
scan_code, _ = map_char(key)
release(scan_code)
if __name__ == '__main__':
def p(e):
print(e)
listen(p)
|
Python
| 0.000183
|
@@ -1587,24 +1587,53 @@
gular_name)%0A
+ if str_shifted_name:%0A
shifted_
@@ -1684,24 +1684,108 @@
ifted_name)%0A
+ else:%0A shifted_name, is_keypad_shifted = regular_name, is_keypad_regular%0A
assert i
|
776671187a31c860afa1c065549596716e7c6ce9
|
Work around Python 3.5 limitation
|
shipyard/templates/volumes.py
|
shipyard/templates/volumes.py
|
"""Build rule helpers for pod data volumes.
Although strictly speaking volumes are a part of pods, their build
process is quite different from pod's; so we put it in this separate
module.
"""
__all__ = [
'fill_tarball',
]
import grp
import io
import pwd
import tarfile
from pathlib import Path
from garage import asserts
from garage import datetimes
def fill_tarball(parameters, spec, tarball):
"""Fill tarball content from the spec.
The spec object is usually loaded from a JSON or YAML file.
"""
for member_spec in spec.get('members', ()):
_add_member(parameters, member_spec, tarball)
# Only two kinds are supported at the moment.
MEMBER_KINDS = {
'file': (Path.is_file, tarfile.REGTYPE),
'dir': (Path.is_dir, tarfile.DIRTYPE),
}
def _add_member(parameters, member_spec, tarball):
"""Add a member to tarball from the spec."""
# Read member metadata.
path = member_spec['path']
asserts.precond(
not path.startswith('/'),
'expect relative path: %s', path,
)
mode = member_spec.get('mode') # This is the permission bits.
mtime = member_spec.get('mtime', int(datetimes.utcnow().timestamp()))
kind = member_spec.get('kind')
if kind is not None:
asserts.in_(kind, MEMBER_KINDS)
owner = member_spec.get('owner')
uid = member_spec.get('uid')
group = member_spec.get('group')
gid = member_spec.get('gid')
if owner is None and uid is not None:
owner = pwd.getpwuid(uid).pw_name
if uid is None and owner is not None:
uid = pwd.getpwnam(owner).pw_uid
asserts.precond(
(owner is None) == (uid is None),
'expect both or neither of owner and uid: %s, %s', owner, uid,
)
if group is None and gid is not None:
group = grp.getgrgid(gid).gr_name
if gid is None and group is not None:
gid = grp.getgrnam(group).gr_gid
asserts.precond(
(group is None) == (gid is None),
'expect both or neither of group and gid: %s, %s', group, gid,
)
#
# Read member content.
#
# We support two ways to specify member content at the moment:
# * Define in-place: `content`.
# * Read from a path parameter: `content_path_parameter`.
#
content = member_spec.get('content')
content_encoding = member_spec.get('content_encoding', 'utf-8')
content_path_parameter = member_spec.get('content_path_parameter')
asserts.precond(
content is None or content_path_parameter is None,
'expect at most one of content and content_path_parameter',
)
# Create TarInfo object.
if content is not None:
asserts.not_none(mode)
asserts.not_none(kind)
asserts.not_none(owner)
asserts.not_none(group)
content_bytes = content.encode(content_encoding)
content_path = None
tarinfo = tarfile.TarInfo(path)
fileobj = io.BytesIO(content_bytes)
tarinfo.size = len(content_bytes)
elif content_path_parameter is not None:
content_path = parameters[content_path_parameter]
tarinfo = tarball.gettarinfo(name=content_path, arcname=path)
fileobj = None
else:
content_path = None
tarinfo = tarfile.TarInfo(path)
fileobj = None
if mode is not None:
tarinfo.mode = mode
if not tarinfo.mtime:
tarinfo.mtime = mtime
if kind is not None:
predicate, member_type = MEMBER_KINDS[kind]
asserts.precond(
content_path is None or predicate(content_path),
'expect %s-kind: %s', kind, content_path,
)
tarinfo.type = member_type
if owner is not None:
asserts.not_none(uid)
tarinfo.uname = owner
tarinfo.uid = uid
if group is not None:
asserts.not_none(gid)
tarinfo.gname = group
tarinfo.gid = gid
# Finally, add TarInfo object to the tarball.
# Skip adding '.', which seems to be a nice thing to do.
if path != '.':
_add_tarinfo(tarinfo, content_path, fileobj, tarball)
if content_path and content_path.is_dir():
for child_path in content_path.rglob('*'):
child_tarinfo = tarball.gettarinfo(
name=child_path,
arcname=str(path / child_path.relative_to(content_path)),
)
if owner is not None:
asserts.not_none(uid)
child_tarinfo.uname = owner
child_tarinfo.uid = uid
if group is not None:
asserts.not_none(gid)
child_tarinfo.gname = group
child_tarinfo.gid = gid
_add_tarinfo(child_tarinfo, child_path, None, tarball)
def _add_tarinfo(tarinfo, path, fileobj, tarball):
asserts.precond(
path is None or fileobj is None,
'expect at most one of path and fileobj',
)
if path is not None and path.is_file():
with path.open('rb') as fileobj:
tarball.addfile(tarinfo, fileobj=fileobj)
else:
tarball.addfile(tarinfo, fileobj=fileobj)
|
Python
| 0.000402
|
@@ -3098,16 +3098,83 @@
ameter%5D%0A
+ # XXX %60gettarinfo%60 of Python 3.5 doesn't accept path-like.%0A
@@ -3207,16 +3207,20 @@
fo(name=
+str(
content_
@@ -3223,16 +3223,17 @@
ent_path
+)
, arcnam
@@ -4260,16 +4260,87 @@
('*'):%0A%0A
+ # XXX %60gettarinfo%60 of Python 3.5 doesn't accept path-like.%0A
@@ -4400,16 +4400,20 @@
name=
+str(
child_pa
@@ -4410,24 +4410,25 @@
r(child_path
+)
,%0A
|
2383648a0f107de92ec47dd0977661514f8024dc
|
Version bump
|
exfi/__init__.py
|
exfi/__init__.py
|
"""
Compute the splice graph from a transcriptome and raw genomic reads using bloom
filters.
"""
__version__ = '1.5.0'
|
Python
| 0.000001
|
@@ -112,9 +112,10 @@
'1.
-5.0
+4.13
'%0A
|
38f09decaeef91e52e765a3a1b0cf5cb03ee4747
|
Use a 10s timeout for requests to discourse
|
c2corg_api/views/user.py
|
c2corg_api/views/user.py
|
from functools import partial
from pyramid.httpexceptions import HTTPInternalServerError
from cornice.resource import resource
from c2corg_api.models.user import User, schema_user, schema_create_user
from c2corg_api.views import (
cors_policy, json_view, restricted_view, restricted_json_view,
to_json_dict)
from c2corg_api.views.validation import validate_id
from c2corg_api.models import DBSession
from c2corg_api.security.roles import (
try_login, remove_token, extract_token, renew_token)
from c2corg_api.security.discourse_sso_provider import (
discourse_redirect, discourse_redirect_without_nonce)
from pydiscourse.client import DiscourseClient
import colander
import datetime
import logging
log = logging.getLogger(__name__)
ENCODING = 'UTF-8'
# 1 second timeout for requests to discourse API
CLIENT_TIMEOUT = 1
def validate_json_password(request):
"""Checks if the password was given and encodes it.
This is done here as the password is not an SQLAlchemy field.
In addition, we can ensure the password is not leaked in the
validation error messages.
"""
if 'password' not in request.json:
request.errors.add('body', 'password', 'Required')
try:
# We receive a unicode string. The hashing function used
# later on requires plain string otherwise it raises
# the "Unicode-objects must be encoded before hashing" error.
password = request.json['password']
request.validated['password'] = password.encode(ENCODING)
except:
request.errors.add('body', 'password', 'Invalid')
def validate_unique_attribute(attrname, request):
"""Checks if the given attribute is unique.
"""
if attrname in request.json:
value = request.json[attrname]
attr = getattr(User, attrname)
count = DBSession.query(User).filter(attr == value).count()
if count == 0:
request.validated[attrname] = value
else:
request.errors.add('body', attrname, 'already used ' + attrname)
@resource(path='/users/{id}', cors_policy=cors_policy)
class UserRest(object):
def __init__(self, request):
self.request = request
@restricted_view(validators=validate_id)
def get(self):
id = self.request.validated['id']
user = DBSession. \
query(User). \
filter(User.id == id). \
first()
return to_json_dict(user, schema_user)
@resource(path='/users/register', cors_policy=cors_policy)
class UserRegistrationRest(object):
def __init__(self, request):
self.request = request
@json_view(schema=schema_create_user, validators=[
validate_json_password,
partial(validate_unique_attribute, "email"),
partial(validate_unique_attribute, "username")])
def post(self):
user = schema_create_user.objectify(self.request.validated)
user.password = self.request.validated['password']
DBSession.add(user)
try:
DBSession.flush()
except:
# TODO: log the error for debugging
raise HTTPInternalServerError('Error persisting user')
return to_json_dict(user, schema_user)
class LoginSchema(colander.MappingSchema):
username = colander.SchemaNode(colander.String())
password = colander.SchemaNode(colander.String())
login_schema = LoginSchema()
def token_to_response(user, token, request):
assert token is not None
expire_time = token.expire - datetime.datetime(1970, 1, 1)
roles = ['moderator'] if user.moderator else []
return {
'token': token.value,
'username': user.username,
'expire': int(expire_time.total_seconds()),
'roles': roles
}
@resource(path='/users/login', cors_policy=cors_policy)
class UserLoginRest(object):
def __init__(self, request):
self.request = request
@json_view(schema=login_schema, validators=[validate_json_password])
def post(self):
request = self.request
username = request.validated['username']
password = request.validated['password']
user = DBSession.query(User). \
filter(User.username == username).first()
token = try_login(user, password, request) if user else None
if token:
response = token_to_response(user, token, request)
if 'discourse' in request.json:
settings = request.registry.settings
if 'sso' in request.json and 'sig' in request.json:
sso = request.json['sso']
sig = request.json['sig']
redirect = discourse_redirect(user, sso, sig, settings)
response['redirect'] = redirect
else:
try:
r = discourse_redirect_without_nonce(user, settings)
response['redirect_internal'] = r
except:
# Any error with discourse should not prevent login
log.warning(
'Error logging into discourse for %d', user.id,
exc_info=True)
return response
else:
request.errors.status = 403
request.errors.add('body', 'user', 'Login failed')
return None
@resource(path='/users/renew', cors_policy=cors_policy)
class UserRenewRest(object):
def __init__(self, request):
self.request = request
@restricted_view(renderer='json')
def post(self):
request = self.request
userid = request.authenticated_userid
user = DBSession.query(User).filter(User.id == userid).first()
token = renew_token(user, request)
if token:
return token_to_response(user, token, request)
else:
raise HTTPInternalServerError('Error renewing token')
def get_discourse_client(settings):
api_key = settings['discourse.api_key']
url = settings['discourse.url']
# system is a built-in user available in all discourse instances.
return DiscourseClient(
url, api_username='system', api_key=api_key, timeout=CLIENT_TIMEOUT)
@resource(path='/users/logout', cors_policy=cors_policy)
class UserLogoutRest(object):
def __init__(self, request):
self.request = request
@restricted_json_view(renderer='json')
def post(self):
request = self.request
userid = request.authenticated_userid
result = {'user': userid}
remove_token(extract_token(request))
if 'discourse' in request.json:
try:
client = get_discourse_client(request.registry.settings)
client.log_out(userid)
except:
# Any error with discourse should not prevent logout
log.warning(
'Error logging out of discourse for %d', userid,
exc_info=True)
return result
|
Python
| 0
|
@@ -785,15 +785,17 @@
%0A# 1
+0
second
+s
tim
@@ -829,16 +829,109 @@
rse API%0A
+# Using a large value to take into account a possible slow restart (caching)%0A# of discourse.%0A
CLIENT_T
@@ -940,16 +940,17 @@
EOUT = 1
+0
%0A%0A%0Adef v
|
6029a6144d7f3c19a3ed556f7429dac24e8d1a4e
|
Version update -> 1.4.4
|
exfi/__init__.py
|
exfi/__init__.py
|
"""
Compute the splice graph from a transcriptome and raw genomic reads using bloom
filters.
"""
__version__ = '1.4.3'
|
Python
| 0
|
@@ -114,7 +114,7 @@
1.4.
-3
+4
'%0A
|
fe2718af4ba5f5a47b2b2fc59962025466e3caf2
|
fix some coding errors
|
mrp_supplier_price/models/mrp.py
|
mrp_supplier_price/models/mrp.py
|
# -*- coding: utf-8 -*-
# © 2016 Mikel Arregi Etxaniz - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import models, fields, api
from openerp.addons import decimal_precision as dp
class MrpProductionProductLine(models.Model):
_inherit = 'mrp.production.product.line'
@api.depends('product_id.uop_coeff', 'product_qty')
def _compute_uop_qty(self):
for line in self.filtered('product_id'):
line.uop_qty = line.product_qty * line.product_id.uop_coeff
@api.depends('cost', 'product_id.uop_coeff')
def _compute_uop_price(self):
for line in self.filtered('product_id'):
line.uop_price = line.cost / line.product_id.uop_coeff
@api.depends('product_id.uop_id', 'product_id.uom_po_id')
def _compute_product_uop(self):
for line in self.filtered('product_id'):
line.uop_id = line.product_id.uop_id or line.product_id.uom_po_id
@api.depends('product_id.seller_ids')
def _compute_variant_suppliers(self):
self.supplier_id_domain = self.product_id.seller_ids.mapped('name')
supplier_id = fields.Many2one(
comodel_name='res.partner', string='Supplier')
supplier_id_domain = fields.Many2many(
comodel_name='res.partner', compute='_compute_variant_suppliers')
cost = fields.Float(
string='Cost', digits=dp.get_precision('Product Price'))
subtotal = fields.Float(
string='Subtotal', compute='_compute_subtotal',
digits=dp.get_precision('Product Price'))
uop_id = fields.Many2one(comodel_name='product.uom',
compute='_compute_product_uop')
uop_qty = fields.Float(compute='_compute_uop_qty')
uop_price = fields.Float(compute='_compute_uop_price',
digits=dp.get_precision('Product Price'))
def _select_best_cost_price(self, supplier_id=None):
best_price = {}
if supplier_id:
supplier_ids = self.product_id.seller_ids.filtered(
lambda x: x.name == supplier_id)
else:
supplier_ids = self.product_id.seller_ids
for line in supplier_ids.mapped('pricelist_ids').filtered(
lambda l: l.min_quantity <= self.product_qty):
if not best_price or line.min_quantity <= \
self.product_qty and \
best_price['cost'] > line.price:
best_price = {'supplier_id': line.suppinfo_id.name,
'cost': line.price}
return best_price
@api.depends('cost', 'product_qty')
def _compute_subtotal(self):
for line in self:
line.subtotal = line.product_qty * line.cost
@api.onchange('product_tmpl_id', 'product_id', 'product_qty')
def onchange_product_product_qty(self):
for line in self:
best_supplier = line._select_best_cost_price()
if best_supplier:
line.supplier_id = best_supplier['supplier_id']
line.cost = best_supplier['cost']
else:
line.cost = line.product_id.standard_price
@api.onchange('supplier_id')
def onchange_supplier_id(self):
for line in self:
best_price = line._select_best_cost_price(
supplier_id=line.supplier_id)
if best_price:
line.supplier_id = best_price['supplier_id']
line.cost = best_price['cost']
class MrpProduction(models.Model):
_inherit = 'mrp.production'
scheduled_total = fields.Float(
string='Scheduled Total', compute='_compute_scheduled_total',
digits=dp.get_precision('Product Price'))
profit_percent = fields.Float(string='Profit percentage')
commercial_percent = fields.Float(string='Commercial percentage')
profit = fields.Float(
string='Profit', compute='_compute_cost_total',
digits=dp.get_precision('Product Price'))
commercial = fields.Float(
string='Commercial', compute='_compute_cost_total',
digits=dp.get_precision('Product Price'))
cost_total = fields.Float(
string='Total', compute='_compute_cost_total',
digits=dp.get_precision('Product Price'))
production_total = fields.Float(
string='Production Total', compute='_compute_production_total')
@api.depends('product_lines', 'product_lines.subtotal')
def _compute_scheduled_total(self):
for mrp in self:
subtotal = mrp.mapped('product_lines.subtotal')
mrp.scheduled_total = subtotal and sum(subtotal) or 0
@api.depends('profit_percent', 'scheduled_total', 'commercial_percent')
def _compute_cost_total(self):
for mrp in self:
mrp.profit = mrp.scheduled_total * (mrp.profit_percent / 100)
mrp.cost_total =\
mrp.scheduled_total * ((100 + mrp.profit_percent) / 100)
mrp.commercial =\
mrp.cost_total * (mrp.commercial_percent / 100)
@api.depends('cost_total')
def _compute_production_total(self):
for prod in self:
total = prod.cost_total
try:
total += prod.routing_total
except:
pass
prod.production_total = total
@api.multi
def button_recompute_total(self):
fields_list = ['production_total']
for field in fields_list:
self.env.add_todo(self._fields[field], self)
self.recompute()
@api.multi
def action_compute(self):
res = super(MrpProduction, self).action_compute()
for line in self.product_lines:
line.onchange_product_product_qty()
return res
|
Python
| 0.000839
|
@@ -1036,20 +1036,73 @@
-self
+for line in self.filtered('product_id'):%0A line
.supplie
@@ -1115,20 +1115,20 @@
omain =
-self
+line
.product
@@ -5580,24 +5580,41 @@
compute(self
+, properties=None
):%0A r
@@ -5660,16 +5660,37 @@
compute(
+properties=properties
)%0A
|
aea0d98fd7fb5eaa0fd547b5442af6984f8d78e0
|
fix bugs
|
exp/exp/tests.py
|
exp/exp/tests.py
|
from django.test import TestCase, RequestFactory
import json
from . import views_model, views_auth
class SearchTestCase(TestCase):
def setUp(self):
self.factory = RequestFactory()
def test_general_search(self):
post_data = {
'search_query': 'tp3ks',
'query_specifier': 'general',
}
request = self.factory.post('/search/', data=post_data)
response = views_model.search(request)
resp_data = json.loads(response.context, safe=False)
self.assertEqual(resp_data['status_code'], 200)
|
Python
| 0.000001
|
@@ -495,22 +495,26 @@
onte
-xt, safe=False
+nt.decode('utf-8')
)%0A%0A
|
b738dc7b888aa356e1b7a3dfeee9ff2ca4b14b9e
|
Add style option to mark_entities
|
metal/contrib/entity_classification/utils.py
|
metal/contrib/entity_classification/utils.py
|
def mark_entities(tokens, positions, markers=[]):
"""Adds special markers around tokens at specific positions (e.g., entities)
Args:
tokens: A list of tokens (the sentence)
positions:
1) A list of inclusive ranges (tuples) corresponding to the
token ranges of the entities in order. (Assumes each entity
has only one corresponding mention.)
OR
2) A dict of lists with keys corresponding to mention indices and
values corresponding to one or more inclusive ranges corresponding
to that mention. (Allows entities to potentially have multiple
mentions)
Returns:
toks: An extended list of tokens with markers around the mentions
WARNING: if the marked token set will be used with pretrained embeddings,
provide markers that will not result in UNK embeddings!
Example:
Input: (['The', 'cat', 'sat'], [(1,1)])
Output: ['The', '[[BEGIN0]]', 'cat', '[[END0]]', 'sat']
"""
if markers and len(markers) != 2 * len(positions):
msg = (
f"Expected len(markers) == 2 * len(positions), "
f"but {len(markers)} != {2 * len(positions)}."
)
raise ValueError(msg)
toks = list(tokens)
# markings will be of the form:
# [(position, entity_idx), (position, entity_idx), ...]
if isinstance(positions, list):
markings = [(position, idx) for idx, position in enumerate(positions)]
elif isinstance(positions, dict):
markings = []
for idx, v in positions.items():
for position in v:
markings.append((position, idx))
else:
msg = (
f"Argument _positions_ must be a list or dict. "
f"Instead, got {type(positions)}"
)
raise ValueError(msg)
markings = sorted(markings)
for i, ((si, ei), idx) in enumerate(markings):
if markers:
start_marker = markers[2 * idx]
end_marker = markers[2 * idx + 1]
else:
start_marker = f"[[BEGIN{idx}]]"
end_marker = f"[[END{idx}]]"
toks.insert(si + 2 * i, start_marker)
toks.insert(ei + 2 * (i + 1), end_marker)
return toks
|
Python
| 0.000001
|
@@ -40,16 +40,32 @@
rkers=%5B%5D
+, style=%22insert%22
):%0A %22
@@ -680,16 +680,547 @@
ntions)%0A
+ markers: A list of strings (length of 2 * the number of entities) to%0A use as markers of the entities.%0A style: Where to apply the markers:%0A 'insert': Insert the markers as new tokens before/after each entity%0A 'concatenate': Prepend/append the markers to the first/last token%0A of each entity%0A If the tokens are going to be input to an LSTM, then it is usually%0A best to use the 'insert' option; 'concatenate' may be better for%0A viewing.%0A%0A
Retu
@@ -2684,32 +2684,66 @@
f%22%5B%5BEND%7Bidx%7D%5D%5D%22%0A
+ if style == %22insert%22:%0A
toks.ins
@@ -2772,32 +2772,36 @@
marker)%0A
+
+
toks.insert(ei +
@@ -2822,24 +2822,205 @@
end_marker)%0A
+ elif style == %22concatenate%22:%0A toks%5Bsi%5D = start_marker + toks%5Bsi%5D%0A toks%5Bei%5D = toks%5Bei%5D + end_marker%0A else:%0A raise NotImplementedError%0A
return t
|
95a3734b244a09837e58a5c649cda80e6b242cda
|
call super() with proper args in UpdateActView class
|
calebasse/actes/views.py
|
calebasse/actes/views.py
|
# -*- coding: utf-8 -*-
import datetime
from django.http import HttpResponseRedirect, HttpResponse
from django.db.models import Q
from django.shortcuts import redirect
from calebasse.cbv import ListView, UpdateView, DeleteView
from calebasse.agenda.views import NewAppointmentView
from calebasse.agenda.models import EventWithAct
from calebasse.agenda.forms import UpdateAppointmentForm, NewAppointmentForm
import copy
import models
import forms
def redirect_today(request, service):
'''If not date is given we redirect on the agenda for today'''
return redirect(act_listing, date=datetime.date.today().strftime('%Y-%m-%d'),
service=service)
class ActListingView(ListView):
model = models.Act
template_name = 'actes/act_listing.html'
def get_queryset(self):
qs = super(ActListingView, self).get_queryset()
qs = qs.filter(patient__service=self.service)
qs = qs.filter(date=self.date)
self.search_form = forms.ActSearchForm(data=self.request.GET or None)
last_name = self.request.GET.get('last_name')
group = self.request.GET.get('group')
patient_record_id = self.request.GET.get('patient_record_id')
social_security_number = self.request.GET.get('social_security_number')
doctor_name = self.request.GET.get('doctor_name')
filters = self.request.GET.getlist('filters')
if last_name:
qs = qs.filter(patient__last_name__istartswith=last_name)
if patient_record_id:
qs = qs.filter(patient__id=int(patient_record_id))
if doctor_name:
qs = qs.filter(doctors__last_name__icontains=doctor_name)
if 'valide' in filters:
qs = qs.exclude(last_validation_state__state_name__exact='VALIDE')
if 'group' in filters:
qs = qs.filter(act_type__group=True)
if 'pointe' in filters:
qs = qs.filter(last_validation_state__isnull=False). \
exclude(last_validation_state__state_name__exact='NON_VALIDE')
if 'non-pointe' in filters:
qs = qs.filter(Q(last_validation_state__isnull=True) | \
Q(last_validation_state__state_name__exact='NON_VALIDE'))
if 'absent-or-canceled' in filters:
qs = qs.filter(last_validation_state__state_name__in=('ABS_NON_EXC',
'ABS_EXC', 'ABS_INTER', 'ANNUL_NOUS',
'ANNUL_FAMILLE', 'REPORTE', 'ABS_ESS_PPS', 'ENF_HOSP'))
if 'is-billable' in filters:
qs = qs.filter(
(Q(act_type__billable=True) & Q(switch_billable=False)) | \
(Q(act_type__billable=False) & Q(switch_billable=True))
)
if 'switch-billable' in filters:
qs = qs.filter(switch_billable=True)
if 'lost' in filters:
qs = qs.filter(is_lost=True)
if 'pause-invoicing' in filters:
qs = qs.filter(pause=True)
if 'invoiced' in filters:
qs = qs.filter(is_billed=True)
return qs.select_related()
def get_context_data(self, **kwargs):
ctx = super(ActListingView, self).get_context_data(**kwargs)
ctx['search_form'] = self.search_form
self.request.record('acts-view', 'act listing by {user} from {ip}')
return ctx
class NewAct(NewAppointmentView):
success_url = '.'
success_msg = u'Acte enregistré avec succès.'
model = EventWithAct
form_class = UpdateAppointmentForm
def form_valid(self, form):
result = super(NewAct, self).form_valid(form)
self.object.act.save()
return result
act_listing = ActListingView.as_view()
act_new = NewAct.as_view()
class DeleteActView(DeleteView):
model = models.Act
template_name = 'actes/confirm_delete.html'
success_url = '..'
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
if self.object.event:
self.object.event.delete()
if not self.object.is_billed:
self.object.delete()
return HttpResponse(status=204)
delete_act = DeleteActView.as_view()
class UpdateActView(UpdateView):
model = models.Act
form_class = forms.ActUpdate
template_name = 'actes/act_update.html'
success_url = '..'
def form_valid(self, form):
result = super(UpdateView, self).form_valid(form)
if self.object.event:
doctors = copy.copy(self.object.doctors.all())
self.object.event.participants = doctors
self.object.event.act_type = self.object.act_type
self.object.event.save()
self.request.record('act-update',
'{obj_id} updated by {user} from {ip} with: {changes}',
obj_id=self.object.id,
changes={'participants': doctors, 'act_type': self.object.act_type})
return result
update_act = UpdateActView.as_view()
class RebillActView(UpdateView):
model = models.Act
template_name = 'actes/act_rebill.html'
success_url = '..'
def post(self, request, *args, **kwarg):
act = models.Act.objects.get(pk=kwarg['pk'])
act.is_billed = False
act.healthcare = None
act.save()
self.request.record('rebill-act', '{obj_id} rebilled by {user} from {ip}', obj_id=act.id)
return super(RebillActView, self).post(request, *args, **kwarg)
rebill_act = RebillActView.as_view()
|
Python
| 0.000001
|
@@ -4344,24 +4344,27 @@
super(Update
+Act
View, self).
|
ebfac180c04d24ea8ff93583eac52e6c0bc8d553
|
Add contract test for 'v2' email notification
|
tests/app/public_contracts/test_GET_notification.py
|
tests/app/public_contracts/test_GET_notification.py
|
from . import return_json_from_response, validate_v0, validate
from app.models import ApiKey, KEY_TYPE_NORMAL
from app.dao.api_key_dao import save_model_api_key
from app.v2.notifications.notification_schemas import get_notification_response
from tests import create_authorization_header
def _get_notification(client, notification, url):
save_model_api_key(ApiKey(
service=notification.service,
name='api_key',
created_by=notification.service.created_by,
key_type=KEY_TYPE_NORMAL
))
auth_header = create_authorization_header(service_id=notification.service_id)
return client.get(url, headers=[auth_header])
def test_get_v2_notification(client, sample_notification):
response_json = return_json_from_response(_get_notification(
client, sample_notification, '/v2/notifications/{}'.format(sample_notification.id)
))
validate(response_json, get_notification_response)
def test_get_api_sms_contract(client, sample_notification):
response_json = return_json_from_response(_get_notification(
client, sample_notification, '/notifications/{}'.format(sample_notification.id)
))
validate_v0(response_json, 'GET_notification_return_sms.json')
def test_get_api_email_contract(client, sample_email_notification):
response_json = return_json_from_response(_get_notification(
client, sample_email_notification, '/notifications/{}'.format(sample_email_notification.id)
))
validate_v0(response_json, 'GET_notification_return_email.json')
def test_get_job_sms_contract(client, sample_notification):
response_json = return_json_from_response(_get_notification(
client, sample_notification, '/notifications/{}'.format(sample_notification.id)
))
validate_v0(response_json, 'GET_notification_return_sms.json')
def test_get_job_email_contract(client, sample_email_notification):
response_json = return_json_from_response(_get_notification(
client, sample_email_notification, '/notifications/{}'.format(sample_email_notification.id)
))
validate_v0(response_json, 'GET_notification_return_email.json')
def test_get_notifications_contract(client, sample_notification, sample_email_notification):
response_json = return_json_from_response(_get_notification(
client, sample_notification, '/notifications'
))
validate_v0(response_json, 'GET_notifications_return.json')
|
Python
| 0
|
@@ -671,35 +671,322 @@
_v2_
-notification(client, sample
+sms_contract(client, sample_notification):%0A response_json = return_json_from_response(_get_notification(%0A client, sample_notification, '/v2/notifications/%7B%7D'.format(sample_notification.id)%0A ))%0A validate(response_json, get_notification_response)%0A%0A%0Adef test_get_v2_email_contract(client, sample_email
_not
@@ -1077,32 +1077,38 @@
client, sample_
+email_
notification, '/
@@ -1134,32 +1134,38 @@
'.format(sample_
+email_
notification.id)
|
fc6ad89460dca9e1e5b1a4effe14c868cafc0e54
|
add looping around math
|
test.py
|
test.py
|
__author__ = 'mike.lyons'
print "Hello World!"
x = raw_input("Enter any number: ")
y = raw_input("Enter another number: ")
try:
x = float(x)
y = float(y)
except ValueError:
x = 0.0
y = 0.0
print x+y
print x/2
print y**2
|
Python
| 0.000027
|
@@ -41,16 +41,33 @@
World!%22%0A
+%0Awhile True:%0A
x = raw_
@@ -94,16 +94,20 @@
ber: %22)%0A
+
y = raw_
@@ -143,13 +143,21 @@
%22)%0A%0A
+
try:%0A
+
@@ -169,16 +169,20 @@
loat(x)%0A
+
y =
@@ -190,16 +190,20 @@
loat(y)%0A
+
except V
@@ -213,16 +213,20 @@
eError:%0A
+
x =
@@ -229,16 +229,20 @@
x = 0.0%0A
+
y =
@@ -246,16 +246,20 @@
= 0.0%0A%0A
+
print x+
@@ -260,16 +260,20 @@
int x+y%0A
+
print x/
@@ -274,16 +274,20 @@
int x/2%0A
+
print y*
@@ -289,8 +289,134 @@
nt y**2%0A
+%0A user_exit = raw_input(%22Exit? (y/n): %22)%0A if user_exit == 'y' or user_exit == 'Y':%0A break%0Aprint %22Goodbye World!%22%0A
|
e33bbf8cfb7a6aa1a32ff99185f97582cfa60a78
|
support scans without scan parameter
|
silab_online_monitor/producer_sim/pybar_fei4.py
|
silab_online_monitor/producer_sim/pybar_fei4.py
|
''' This is a producer faking data coming from pyBAR by taking real data and sending these in chunks'''
import time
import numpy as np
import tables as tb
import zmq
import logging
from online_monitor.utils.producer_sim import ProducerSim
class pyBarFEI4Sim(ProducerSim):
def setup_producer_device(self):
ProducerSim.setup_producer_device(self)
with tb.openFile(self.config['data_file'], mode="r") as in_file_h5:
self.meta_data = in_file_h5.root.meta_data[:]
self.raw_data = in_file_h5.root.raw_data[:]
self.scan_parameter_name = in_file_h5.root.scan_parameters.dtype.names
self.scan_parameters = in_file_h5.root.scan_parameters[:]
self.readout_word_indeces = np.column_stack((self.meta_data['index_start'], self.meta_data['index_stop']))
self.actual_readout = 0
self.last_readout_time = None
def get_data(self): # Return the data of one readout
if self.actual_readout < self.scan_parameters.shape[0]:
index_start, index_stop = self.readout_word_indeces[self.actual_readout]
data = []
data.append(self.raw_data[index_start:index_stop])
data.extend((float(self.meta_data[self.actual_readout]['timestamp_start']), float(self.meta_data[self.actual_readout]['timestamp_stop']), int(self.meta_data[self.actual_readout]['error'])))
# FIXME: Simple syncronization to have replay with similar timing, does not really work
now = time.time()
if self.last_readout_time is not None:
delay = now - self.last_readout_time
additional_delay = self.meta_data[self.actual_readout]['timestamp_stop'] - self.meta_data[self.actual_readout]['timestamp_start'] - delay
if additional_delay > 0:
time.sleep(additional_delay)
self.last_readout_time = now
return data, {str(self.scan_parameter_name): int(self.scan_parameters[self.actual_readout][0])}
def send_data(self):
'''Sends the data of every read out (raw data and meta data) via ZeroMQ to a specified socket
'''
time.sleep(float(self.config['delay'])) # Delay is given in seconds
try:
data, scan_parameters = self.get_data() # Get data of actual readout
except TypeError: # Data is fully replayes
logging.warning('%s producer: No data to replay anymore!' % self.name)
time.sleep(10)
return
self.actual_readout += 1
data_meta_data = dict(
name='ReadoutData',
dtype=str(data[0].dtype),
shape=data[0].shape,
timestamp_start=data[1], # float
timestamp_stop=data[2], # float
readout_error=data[3], # int
scan_parameters=scan_parameters # dict
)
try:
self.sender.send_json(data_meta_data, flags=zmq.SNDMORE | zmq.NOBLOCK)
self.sender.send(data[0], flags=zmq.NOBLOCK) # PyZMQ supports sending numpy arrays without copying any data
except zmq.Again:
pass
|
Python
| 0
|
@@ -537,32 +537,108 @@
oot.raw_data%5B:%5D%0A
+ self.n_readouts = self.meta_data.shape%5B0%5D%0A%0A try:%0A
self
@@ -708,32 +708,36 @@
mes%0A
+
self.scan_parame
@@ -774,24 +774,166 @@
rameters%5B:%5D%0A
+ except tb.NoSuchNodeError:%0A self.scan_parameter_name = 'No parameter'%0A self.scan_parameters = None%0A%0A
@@ -1218,32 +1218,18 @@
elf.
-scan_parameters.shape%5B0%5D
+n_readouts
:%0A
@@ -1648,13 +1648,8 @@
to
-have
repl
@@ -1723,16 +1723,17 @@
.time()%0A
+%0A
@@ -2113,16 +2113,70 @@
e = now%0A
+%0A if self.scan_parameters is not None:%0A
@@ -2274,16 +2274,83 @@
ut%5D%5B0%5D)%7D
+%0A else:%0A return data, %7B'No parameter': 0%7D
%0A%0A de
@@ -3443,16 +3443,16 @@
.Again:%0A
-
@@ -3459,8 +3459,9 @@
pass
+%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.