commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
0a725db8e8d7f1e73a84fb0d0acc181603e706cb
|
Refactor to readability test
|
kboard/functional_test/test_post_delete.py
|
kboard/functional_test/test_post_delete.py
|
from .base import FunctionalTest, login_test_user_with_browser, NotFoundPostError
class DeletePostTest(FunctionalTest):
@login_test_user_with_browser
def test_delete_post(self):
# μ§νμ΄λ κ²μκΈμ μμ νλ κΈ°λ₯μ΄ μ λλ‘ λμνλμ§ νμΈνκΈ° μν΄ κΈ°λ³Έ κ²μνμΌλ‘ μ΄λνλ€.
self.move_to_default_board()
# 'django' λν κ²μκΈκ³Ό 'spring'μ λν κ²μκΈμ μμ±νλ€.
self.add_post(title='django', content='Hello django')
self.add_post(title='spring', content='Hello spring')
# λμ€μ 보λ 'spring' κ²μκΈμ΄ λ§μμ λ€μ§ μμμ μμ λ₯Ό νλ€.
# 'spring' κ²μκΈμ λλ¬μ κ²μκΈ νμ΄μ§λ‘ μ΄λν ν 'μμ ' λ²νΌμ λλ₯Έλ€.
try:
self.open_post(title='spring')
except NotFoundPostError as notFoundPostError:
self.fail(notFoundPostError.message)
delete_post_button = self.browser.find_element_by_id('id_delete_post_button')
delete_post_button.click()
# 'spring' κ²μκΈμ΄ μ μμ λΌμ λͺ©λ‘μ 보μ΄μ§ μλλ€.
self.assertPostNotIn('spring')
# 'django' κ²μκΈμ μμ λμ§ μκ³ μ λ¨μμλ€.
self.assertPostIn('django')
|
Python
| 0.000004
|
@@ -503,16 +503,20 @@
%EC%95%8A%EC%95%84%EC%84%9C %EC%82%AD%EC%A0%9C%EB%A5%BC
+%ED%95%98%EB%A0%A4%EA%B3%A0
%ED%95%9C%EB%8B%A4.%0A
@@ -557,22 +557,8 @@
%EB%8F%99%ED%95%9C %ED%9B%84
- '%EC%82%AD%EC%A0%9C' %EB%B2%84%ED%8A%BC%EC%9D%84 %EB%88%84%EB%A5%B8%EB%8B%A4.
%0A
@@ -727,28 +727,31 @@
-delete_post_button =
+# '%EC%82%AD%EC%A0%9C' %EB%B2%84%ED%8A%BC%EC%9D%84 %EB%88%84%EB%A5%B8%EB%8B%A4.%0A
sel
@@ -807,35 +807,8 @@
on')
-%0A delete_post_button
.cli
|
4f0657272afd693549d65b23ef9badbb59ae4395
|
use File to record in db
|
plugins/feeds/public/hybdrid_analysis.py
|
plugins/feeds/public/hybdrid_analysis.py
|
import logging
from datetime import timedelta
from core.errors import ObservableValidationError
from core.feed import Feed
from core.observables import Hash, Hostname
class Hybrid_Analysis(Feed):
default_values = {
"frequency":
timedelta(minutes=5),
"name":
"Hybdrid-Analysis",
"source":
"https://www.hybrid-analysis.com/feed?json",
"description":
"Hybrid Analysis Public Feeds",
}
def update(self):
for item in self.update_json(headers={'User-agent': 'VxApi Connector'})['data']:
self.analyze(item)
def analyze(self, item):
sha256 = Hash.get_or_create(value=item['sha256'])
tags = []
context = {'source': self.name}
if 'vxfamily' in item:
tags.append(' '.join(item['vxfamily'].split('.')))
if 'tags' in item:
tags.extend(item['tags'])
if 'threatlevel_human' in item:
context['threatlevel_human'] = item['threatlevel_human']
if 'threatlevel' in item:
context['threatlevel'] = item['threatlevel']
if 'type' in item:
context['type'] = item['type']
if 'size' in item:
context['size'] = item['size']
if 'vt_detect' in item:
context['virustotal_score'] = item['vt_detect']
if 'et_alerts_total' in item:
context['et_alerts_total'] = item['et_alerts_total']
if 'process_list' in item:
context['count process spawn'] = len(item['process_list'])
context['url'] = 'https://www.hybrid-analysis.com' + item['reporturl']
sha256.add_context(context)
sha256.tag(tags)
md5 = Hash.get_or_create(value=item['md5'])
md5.tag(tags)
md5.add_context(context)
sha1 = Hash.get_or_create(value=item['sha1'])
sha1.tag(tags)
sha1.add_context(context)
sha256.active_link_to(md5, 'md5', self.name)
sha256.active_link_to(sha1, 'sha1', self.name)
if 'domains' in item:
for domain in item['domains']:
try:
new_host = Hostname.get_or_create(value=domain)
sha256.active_link_to(new_host, 'C2', self.name)
sha1.active_link_to(new_host, 'C2', self.name)
md5.active_link_to(new_host, 'C2', self.name)
new_host.add_context({'source': self.name, 'contacted by': sha256})
except ObservableValidationError as e:
logging.error(e)
if 'extracted_files' in item:
for extracted_file in item['extracted_files']:
context_file_dropped = {'source': self.name}
if not 'sha256' in extracted_file:
logging.error(extracted_file)
continue
new_file = Hash.get_or_create(value=extracted_file['sha256'])
context_file_dropped['virustotal_score'] = 0
context_file_dropped['size'] = extracted_file['file_size']
if 'av_matched' in extracted_file:
context_file_dropped['virustotal_score'] = extracted_file['av_matched']
if 'threatlevel_readable' in extracted_file:
context_file_dropped['threatlevel'] = extracted_file['threatlevel_readable']
if 'av_label' in extracted_file:
new_file.tag(extracted_file['av_label'])
if 'type_tags' in extracted_file:
new_file.tag(extracted_file['type_tags'])
new_file.add_context(context_file_dropped)
new_file.active_link_to(sha256, 'drop', self.name)
new_file.active_link_to(md5, 'drop', self.name)
new_file.active_link_to(sha1, 'drop', self.name)
|
Python
| 0
|
@@ -160,16 +160,22 @@
Hostname
+, File
%0A%0A%0Aclass
@@ -646,16 +646,93 @@
item):%0A
+ f_hyb = File.get_or_create(value='FILE: %7B%7D'.format(item%5B'sha256'%5D))%0A%0A
@@ -1737,22 +1737,21 @@
-sha256
+f_hyb
.add_con
@@ -1772,22 +1772,21 @@
-sha256
+f_hyb
.tag(tag
@@ -1845,61 +1845,59 @@
'%5D)%0A
- md5.tag(tags)%0A md5.add_context(context
+%0A f_hyb.active_link_to(md5, 'md5', self.name
)%0A%0A
@@ -1953,133 +1953,22 @@
'%5D)%0A
- sha1.tag(tags)%0A sha1.add_context(context)%0A%0A sha256.active_link_to(md5, 'md5', self.name)%0A sha256
+%0A f_hyb
.act
@@ -2187,22 +2187,21 @@
-sha256
+f_hyb
.active_
@@ -2259,119 +2259,28 @@
-sha1.active_link_to(new_host, 'C2', self.name)%0A md5.active_link_to(new_host, 'C2', self.name
+logging.debug(domain
)%0A%0A
@@ -2357,22 +2357,21 @@
ed by':
-sha256
+f_hyb
%7D)%0A
@@ -2779,58 +2779,241 @@
e =
-Hash.get_or_create(value=extracted_file%5B'sha256'%5D)
+File.get_or_create(value='FILE: %7B%7D'.format(extracted_file%5B'sha256'%5D))%0A sha256_new_file = Hash.get_or_create(value=extracted_file%5B'sha256'%5D)%0A new_file.active_link_to(sha256_new_file, 'sha256', self.name)%0A
%0A
@@ -3753,167 +3753,37 @@
-new_file.active_link_to(sha256, 'drop', self.name)%0A new_file.active_link_to(md5, 'drop', self.name)%0A new_file.active_link_to(sha1
+f_hyb.active_link_to(new_file
, 'd
|
fcf10e2a5a29bbe268c1405e1755b1e6aed1f417
|
test rm unused imports for one file
|
rj_gameplay/stp/utils/fsm.py
|
rj_gameplay/stp/utils/fsm.py
|
import logging
from enum import Enum
from typing import (
Union,
Callable,
Dict,
Optional,
Iterable,
List,
Any,
TypedDict,
)
State = Enum
TransitionFunction = Callable[[], bool] # Takes no args, returns a bool
class Event(TypedDict):
condition: TransitionFunction
name: str
TransitionTable = Dict[State, Dict[State, Event]] # [from][to] = Event
StateMethod = Callable[[], None] # Takes nothing, returns nothing
OnEnterMethod = Callable[[], None] # Takes nothing, returns nothing
OnExitMethod = Callable[[], None] # Takes nothing, returns nothing
## @brief generic hierarchial state machine class.
#
# states can have substates. If the machine is in a state, then it is also implicitly in that state's parent state
# this basically provides for polymorphism/subclassing of state machines
#
# There are three methods corresponding to each state:
# * on_enter_STATE
# * execute_STATE
# * on_exit_STATE
#
# Subclasses of StateMachine can optionally implement them and they will automatically be called at the appropriate times.
class StateMachine:
def __init__(self, start_state: State):
self._transitions: TransitionTable = {}
self._start_state: State = start_state
self._state: Optional[State] = None
@property
def start_state(self) -> State:
return self._start_state
## Resets the FSM back into the start state
def restart(self) -> None:
self.transition(self.start_state)
## Runs the FSM
# checks transition conditions for all edges leading away from the current state
# if one evaluates to true, we transition to it
# if more than one evaluates to true, we throw a RuntimeError
def tick(self) -> None:
# call execute_STATENAME
if self.state is not None:
method_name = "execute_" + self.state.name
exec_callback: Optional[StateMethod] = None
try:
exec_callback = getattr(self, method_name)
except AttributeError:
pass
if exec_callback is not None:
exec_callback()
if self.state is None:
self.transition(self.start_state)
else:
# transition if an 'event' fires
next_states = []
if self.state in self._transitions:
for next_state, transition in self._transitions[self.state].items():
if transition["condition"]():
next_states += [next_state]
if len(next_states) > 1:
logging.warning(
"Ambiguous fsm transitions from state'"
+ str(self.state)
+ "'. The following states are reachable now: "
+ str(next_states)
+ "; Proceeding by taking the first option."
)
if len(next_states) > 0:
self.transition(next_states[0])
# if you add a transition that already exists, the old one will be overwritten
def add_transition(
self,
from_state: State,
to_state: State,
condition: Union[bool, TransitionFunction],
event_name: str,
) -> None:
if isinstance(condition, bool):
condition_fn = lambda: condition
else:
condition_fn = condition
if from_state not in self._transitions:
self._transitions[from_state] = {}
self._transitions[from_state][to_state] = {
"condition": condition_fn,
"name": event_name,
}
# sets @state to the new_state given
# calls 'on_exit_STATENAME()' if it exists
# calls 'on_enter_STATENAME()' if it exists
def transition(self, new_state: State) -> None:
if self.state is not None:
method_name = "on_exit_" + self.state.name
exit_callback: Optional[OnExitMethod] = None
try:
exit_callback = getattr(
self, method_name
) # call the transition FROM method if it exists
except AttributeError:
pass
if exit_callback is not None:
exit_callback()
method_name = "on_enter_" + new_state.name # pylint: disable=no-member
enter_callback: Optional[OnEnterMethod] = None
try:
enter_callback = getattr(
self, method_name
) # call the transition TO method if it exists
except AttributeError:
pass
if enter_callback is not None:
enter_callback()
self._state = new_state
@property
def state(self) -> Optional[State]:
return self._state
|
Python
| 0
|
@@ -104,41 +104,8 @@
al,%0A
- Iterable,%0A List,%0A Any,%0A
|
bbd7266a9e228ac111497b12d00ea71b3e0f4f5a
|
fix imports
|
edx_proctoring/management/commands/set_attempt_status.py
|
edx_proctoring/management/commands/set_attempt_status.py
|
"""
Django management command to manually set the attempt status for a user in a proctored exam
"""
from optparse import make_option
from django.core.management.base import BaseCommand
from edx_proctoring.api import (
update_attempt_status,
get_exam_by_id
)
from edx_proctoring.models import ProctoredExamStudentAttemptStatus
class Command(BaseCommand):
"""
Django Management command to force a background check of all possible notifications
"""
option_list = BaseCommand.option_list + (
make_option('-e', '--exam',
metavar='EXAM_ID',
dest='exam_id',
help='exam_id to change'),
make_option('-u', '--user',
metavar='USER',
dest='user',
help="user_id of user to affect"),
make_option('-t', '--to',
metavar='TO_STATUS',
dest='to_status',
help='the status to set'),
)
def handle(self, *args, **options):
"""
Management command entry point, simply call into the signal firiing
"""
exam_id = options['exam_id']
user_id = options['user_id']
to_status = options['to_status']
msg = (
'Running management command to update user {user_id} '
'attempt status on exam_id {exam_id} to {to_status}'.format(
user_id=user_id,
exam_id=exam_id,
to_status=to_status
)
)
print msg
if not ProctoredExamStudentAttemptStatus.is_valid_status(to_status):
raise Exception('{to_status} is not a valid attempt status!'.format(to_status=to_status))
# get exam, this will throw exception if does not exist, so let it bomb out
get_exam_by_id(exam_id)
update_attempt_status(exam_id, user_id, to_status)
print 'Completed!'
|
Python
| 0.000002
|
@@ -185,90 +185,8 @@
nd%0A%0A
-from edx_proctoring.api import (%0A update_attempt_status,%0A get_exam_by_id%0A)%0A%0A
from
@@ -686,24 +686,27 @@
dest='user
+_id
',%0A
@@ -1055,24 +1055,138 @@
%22%22%22%0A%0A
+ from edx_proctoring.api import (%0A update_attempt_status,%0A get_exam_by_id%0A )%0A%0A
exam
|
2c23fa4cf151964d7a170430efdd16a7471ac3bd
|
Version bump
|
mixins/__init__.py
|
mixins/__init__.py
|
__version__ = "0.0.2"
|
Python
| 0.000001
|
@@ -16,7 +16,7 @@
0.0.
-2
+3
%22%0A
|
9c0d62d7b08d63b7daf338a16fc34896856aefb2
|
Test code for encoding password in postgresql uri
|
controllers/lms.py
|
controllers/lms.py
|
import sys
import os
import subprocess
from gluon import current
import paramiko
from ednet.ad import AD
from ednet.canvas import Canvas
from ednet.appsettings import AppSettings
# Needed for remote connection?
auth.settings.allow_basic_login = True
#auth.settings.actions_disabled.append('login')
#auth.settings.allow_basic_login_only = True
#auth.settings.actions.login_url=URL('your_own_error_page')
@auth.requires_membership("Administrators")
def credential_student():
response.view = 'generic.json'
db = current.db
key = ""
msg = ""
hash = ""
user_name = ""
full_name = ""
# Get the user in question
if len(request.args) > 0:
user_name = request.args[0]
if user_name is not None:
# First - does the user exist?
user_exists = False
rows = db(db.auth_user.username == user_name).select(db.auth_user.id)
for row in rows:
user_exists = True
if user_exists is True:
key, msg, hash, full_name = Canvas.EnsureStudentAccessToken(user_name)
else:
# User doesn't exit!
msg = "Invalid User!"
return dict(key=key, msg=msg, hash=hash, full_name=full_name)
def get_firewall_list():
response.view = 'default/index.json'
db = current.db
rs = db(db.ope_laptop_firewall_rules).select(db.ope_laptop_firewall_rules.ALL).as_list()
return response.json(rs)
|
Python
| 0
|
@@ -32,16 +32,29 @@
process%0A
+import urllib
%0Afrom gl
@@ -415,16 +415,605 @@
page')%0A%0A
+@auth.requires_membership(%22Administrators%22)%0Adef test():%0A try:%0A canvas_db_pw = str(os.environ%5B%22IT_PW%22%5D) + %22%22%0A except KeyError as ex:%0A # IT_PW not set?%0A canvas_db_pw = %22%3CIT_PW_NOT_SET%3E%22%0A db_canvas = None%0A err = None%0A try:%0A db_canvas = DAL('postgres://postgres:' + urllib.quote_plus(canvas_db_pw) + '@postgresql/canvas_production', decode_credentials=True, migrate=False)%0A except RuntimeError as ex:%0A # Error connecting, move on and return None%0A db_canvas = None%0A err = str(ex)%0A return dict(db_canvas=db_canvas, err=err)%0A%0A
@auth.re
|
30eaa94cb788bc8eeb85aab7edc4bd544ce3e811
|
Fix Chrome 46 window management
|
lg_common/src/lg_common/managed_browser.py
|
lg_common/src/lg_common/managed_browser.py
|
import sys
import rospy
import socket
import shutil
from lg_common import ManagedApplication, ManagedWindow
from tornado.websocket import websocket_connect
DEFAULT_BINARY = '/usr/bin/google-chrome'
DEFAULT_ARGS = [
'--no-first-run',
'--allow-file-access-from-files',
'--disable-default-apps',
'--disable-java',
'--disable-session-storage',
'--disable-translate',
'--touch-events=enabled',
'--disable-pinch',
'--overscroll-history-navigation=0',
'--disable-touch-editing',
'--log-level=0',
'--enable-logging',
'--v=1',
]
class ManagedBrowser(ManagedApplication):
def __init__(self, url=None, slug=None, kiosk=True, geometry=None,
binary=DEFAULT_BINARY, remote_debugging_port=None, app=False,
shell=True, **kwargs):
cmd = [binary]
# If no debug port provided, pick one.
if remote_debugging_port is None:
remote_debugging_port = ManagedBrowser.get_os_port()
self.debug_port = remote_debugging_port
cmd.append('--remote-debugging-port={}'.format(self.debug_port))
# If no slug provided, attempt to use the node name.
if slug is None:
try:
slug = rospy.get_name().lstrip('/')
except Exception as e:
sys.stderr.write('Could not resolve slug for this browser!')
sys.stderr.write(' * Has your node been initialized?')
raise e
tmp_dir = '/tmp/lg_browser-{}'.format(slug)
try:
rospy.loginfo("Purging ManagedBrowser directory: %s" % tmp_dir)
shutil.rmtree(tmp_dir)
except OSError, e:
rospy.logerr("Could not purge the %s directory because %s" % (tmp_dir, e))
cmd.append('--user-data-dir={}'.format(tmp_dir))
cmd.append('--disk-cache-dir={}'.format(tmp_dir))
cmd.append('--crash-dumps-dir={}/crashes'.format(tmp_dir))
cmd.extend(DEFAULT_ARGS)
# All remaining kwargs are mapped to command line args.
# _ is replaced with -.
def consume_kwarg(item):
key, value = item
arg = '--{}'.format(key.replace('_', '-'))
if value is None:
return arg
if isinstance(value, bool):
arg += '=' + str(value).lower()
else:
arg += '=' + str(value)
return arg
args = map(consume_kwarg, kwargs.iteritems())
cmd.extend(args)
if app:
cmd.append('--app={}'.format(url))
else:
if kiosk:
cmd.append('--kiosk')
if url is not None:
cmd.append(url)
cmd.append('&')
w_instance = 'Google-chrome \\({}\\)'.format(tmp_dir)
window = ManagedWindow(w_instance=w_instance, geometry=geometry)
rospy.loginfo("Command {}".format(cmd))
super(ManagedBrowser, self).__init__(cmd=cmd, window=window)
@staticmethod
def get_os_port():
"""
Lets the OS assign a port number.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('', 0))
port = sock.getsockname()[1]
sock.close()
return port
def send_debug_sock_msg(self, msg):
"""
Writes a string to the browser's debug web socket.
"""
rospy.warn(
'ManagedBrowser.send_debug_sock_msg() probably not yet working'
)
ws_url = 'ws://localhost:{}'.format(self.debug_port)
conn = yield websocket_connect(ws_url, connect_timeout=1)
conn.write_message(msg)
conn.close()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
Python
| 0
|
@@ -2722,16 +2722,227 @@
nd('&')%0A
+ # In Chrome 46 the instance changes from%0A # Google-chrome (...) to%0A # google-chrome (...)%0A # Since all regex is escaped further down,%0A # just don't match the 'g' for now.%0A
@@ -2955,17 +2955,16 @@
ance = '
-G
oogle-ch
|
a619f703b2d259877e30d3e1ede11813c014f3ad
|
Fix the AvailableActionsPrinter to support the new multiplayer action spec.
|
pysc2/env/available_actions_printer.py
|
pysc2/env/available_actions_printer.py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An env wrapper to print the available actions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from pysc2.env import base_env_wrapper
class AvailableActionsPrinter(base_env_wrapper.BaseEnvWrapper):
"""An env wrapper to print the available actions."""
def __init__(self, env):
super(AvailableActionsPrinter, self).__init__(env)
self._seen = set()
self._action_spec = self.action_spec()
def step(self, *args, **kwargs):
all_obs = super(AvailableActionsPrinter, self).step(*args, **kwargs)
for obs in all_obs:
for avail in obs.observation["available_actions"]:
if avail not in self._seen:
self._seen.add(avail)
self._print(self._action_spec.functions[avail].str(True))
return all_obs
def _print(self, s):
print(s)
|
Python
| 0
|
@@ -1062,16 +1062,19 @@
n_spec()
+%5B0%5D
%0A%0A def
|
e67dc6d0cb46bcce0750246f99c6f99cd3e1ccbc
|
fix tempita.py for distutils build
|
scipy/_build_utils/tempita.py
|
scipy/_build_utils/tempita.py
|
import sys
import os
import argparse
from Cython import Tempita as tempita
# XXX: If this import ever fails (does it really?), vendor either
# cython.tempita or numpy/npy_tempita.
def process_tempita(fromfile, outfile=None):
"""Process tempita templated file and write out the result.
The template file is expected to end in `.c.in` or `.pyx.in`:
E.g. processing `template.c.in` generates `template.c`.
"""
if outfile is None:
# We're dealing with a distitutils build here, write in-place
outfile = os.path.join(os.path.dirname(__file__),
os.path.splitext(os.path.split(fromfile)[1])[0])
from_filename = tempita.Template.from_filename
template = from_filename(fromfile,
encoding=sys.getdefaultencoding())
content = template.substitute()
with open(outfile, 'w') as f:
f.write(content)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("infile", type=str,
help="Path to the input file")
parser.add_argument("-o", "--outdir", type=str,
help="Path to the output directory")
parser.add_argument("-i", "--ignore", type=str,
help="An ignored input - may be useful to add a "
"dependency between custom targets")
args = parser.parse_args()
if not args.infile.endswith('.in'):
raise ValueError(f"Unexpected extension: {args.infile}")
outdir_abs = os.path.join(os.getcwd(), args.outdir)
outfile = os.path.join(outdir_abs,
os.path.splitext(os.path.split(args.infile)[1])[0])
process_tempita(args.infile, outfile)
if __name__ == "__main__":
main()
|
Python
| 0
|
@@ -480,18 +480,16 @@
h a dist
-it
utils bu
@@ -543,100 +543,15 @@
ath.
-join(os.path.dirname(__file__),%0A os.path.splitext(os.path.
spli
+tex
t(fr
@@ -558,24 +558,18 @@
omfile)%5B
-1%5D)%5B0%5D)%0A
+0%5D
%0A%0A fr
|
c489157661b5738f37c5213f64736d7e88a0700b
|
Update application_filter.py
|
Data-Science-Tools/application_filter.py
|
Data-Science-Tools/application_filter.py
|
'''
Decision Tree V.1
Simple tool for a company to screen applications in the hiring process
Coded By: Tyler Linne
Date: 4/27/16
'''
#Import required packages
import numpy as np
import pandas as pd
import pydot
from sklearn import tree
from IPython.display import Image
from sklearn.externals.six import StringIO
from sklearn.ensemble import RandomForestClassifier
#Import a csv file holding the data the tree needs
input_file = "c:/UserName/user/documents/hire_data.csv"
df = pd.read_csv(input_file, header = 0)
#Set text values in csv to numerical values
d = {'Y': 1, 'N': 0}
df['Hired'] = df['Hired'].map(d)
df['Currently Employed?'] = df['Currently Employed?'].map(d)
df['Private School'] = df['Private School'].map(d)
df['State School'] = df['State School'].map(d)
df['Top-tier school'] = df['Top-tier school'].map(d)
df['Internship'] = df['Internship'].map(d)
d = { 'AS': 0 'BS': 1, 'MS': 2, 'PhD': 3}
df['Level of Education'] = df['Level of Education'].map(d)
df.head()
#Filter out headers that hold various canidate data
features = list(df.columns[:7])
#Create the tree using the desired header and the seperated headers list
y = df["Hired"]
X = df[features]
clf = tree.DecisionTreeClassifier()
clf = clf.fit(X,y)
#Displaying the tree in a readable format
dot_data = StringIO()
tree.export_graphviz(clf, out_file=dot_data,
feature_names=features)
graph = pydot.graph_from_dot_data(dot_data.getvalue())
Image(graph.create_png())
#The code below this point would be ran in a seperate file, with this one imported to it. It is placed in this file for display
#Test the validity of the tree using the "Random Forest" methood with a factor of 25 seperate tests
clf = RandomForestClassifier(n_estimators=25)
clf = clf.fit(X, y)
#Predict employment of an employed 10-year veteran with an AS Degree
print clf.predict([[10, 1, 4, 0, 0, 0, 0]])
#...and an unemployed 10-year veteran with an AS Degree
print clf.predict([[10, 0, 4, 0, 0, 0, 0]])
|
Python
| 0.000001
|
@@ -1067,17 +1067,17 @@
olumns%5B:
-7
+6
%5D)%0A%0A#Cre
|
8f1a446f3ddd00dd68206e3f66f7e4355aa56da1
|
test cmd scan
|
bmd_test.py
|
bmd_test.py
|
import os
import bottle
from os import path
from unittest import TestCase
import bmd
import utils
class TestBMD(TestCase):
def setUp(self):
utils._G.scan_mode = False
@bmd.tpl_utils
def fakeview(self):
return dict()
def dirpath(self, dname):
return path.join(path.dirname(__file__), 'testdata', dname)
def chdir(self, dname):
os.chdir(self.dirpath(dname))
def test_tpl_utils(self):
d = self.fakeview()
self.assertIsInstance(d, dict)
def test_static(self):
f = bmd.static('bmd.css')
self.assertIsInstance(f, bottle.HTTPResponse)
self.assertEqual(f.status, '200 OK')
self.assertEqual(f.content_type, 'text/css; charset=UTF-8')
self.assertEqual(f.charset, 'UTF-8')
self.assertEqual(len(f.body.read()), 561)
f.body.close()
def test_static404(self):
with self.assertRaises(bottle.HTTPError) as cm:
bmd.static('nonexistent.css')
self.assertIsInstance(cm.exception, bottle.HTTPResponse)
self.assertEqual(cm.exception.status, '404 Not Found')
self.assertEqual(cm.exception.charset, 'UTF-8')
def test_gendoc(self):
self.chdir('gendoc')
d = bmd.gendoc('index.md')
self.assertIsInstance(d, str)
self.assertEqual(len(d), 373)
def test_gendoc404(self):
self.chdir('gendoc')
with self.assertRaises(bottle.HTTPError) as cm:
bmd.gendoc('nonexistent.md')
r = cm.exception
self.assertIsInstance(r, bottle.HTTPResponse)
self.assertEqual(r.status, '404 Not Found')
def test_index(self):
self.chdir('gendoc')
d = bmd.index()
self.assertIsInstance(d, str)
self.assertEqual(len(d), 373)
def test_index_readme(self):
self.chdir('gendoc-readme')
d = bmd.index()
self.assertIsInstance(d, str)
self.assertEqual(len(d), 375)
def test_noindex(self):
self.chdir('gendoc-noindex')
with self.assertRaises(bottle.HTTPError) as cm:
bmd.index()
r = cm.exception
self.assertIsInstance(r, bottle.HTTPResponse)
self.assertEqual(r.status, '404 Not Found')
def test_sync_static(self):
srcdir = self.dirpath('sync-static')
src_f = path.join(srcdir, 'file.txt')
dstdir = self.dirpath('sync-static.out')
dst_f = path.join(dstdir, 'file.txt')
bmd.sync_static(srcdir, dstdir)
src_fh = open(src_f, 'r')
dst_fh = open(dst_f, 'r')
self.assertEqual(src_fh.read(), dst_fh.read())
src_fh.close()
dst_fh.close()
os.unlink(dst_f)
os.rmdir(dstdir)
def test_sync_static_nosrc(self):
with self.assertRaises(FileNotFoundError):
bmd.sync_static(self.dirpath('sync-static.nonexistent'),
self.dirpath('sync-static.out'))
def test_sync_static_dsterror(self):
srcdir = self.dirpath('sync-static')
dstdir = '/nonexistent'
with self.assertRaises(PermissionError):
bmd.sync_static(srcdir, dstdir)
def rmdir_scanout(self):
dstdir = self.dirpath('scan.out')
for n in ('static/bmd.css', 'index.html'):
f = path.join(dstdir, n)
os.unlink(f)
for n in ('static', ''):
d = path.join(dstdir, n)
os.rmdir(d)
def test_scan(self):
srcdir = self.dirpath('gendoc')
dstdir = self.dirpath('scan.out')
r = bmd.scan(srcdir, dstdir)
self.assertEqual(r, 0)
self.rmdir_scanout()
def test_scan_out_exists(self):
srcdir = self.dirpath('gendoc')
dstdir = self.dirpath('scan.out')
os.makedirs(dstdir)
r = bmd.scan(srcdir, dstdir)
self.assertEqual(r, 0)
self.rmdir_scanout()
def test_cmd_noargs(self):
devnull = open(os.devnull, 'w')
s = bmd.cmd(argv = [], outs = devnull)
self.assertEqual(s, 1)
devnull.close()
#~ def test_cmd_scan(self):
#~ argv = ['scan', '-i', self.dirpath('gendoc'), '-o', self.dirpath('scan.out')]
#~ bmd.cmd(argv)
#~ self.rmdir_scanout()
|
Python
| 0.000001
|
@@ -4052,19 +4052,16 @@
e()%0A%0A
- #~
def tes
@@ -4085,19 +4085,16 @@
%0A
- #~
argv =
@@ -4172,18 +4172,19 @@
-#~
+r =
bmd.cmd
@@ -4202,10 +4202,38 @@
-#~
+self.assertEqual(r, 0)%0A
sel
|
78856b3752dc1db113074c848a0c3f664c55fcd9
|
fix rebooting after a partially interactive install
|
subiquity/server/controllers/reboot.py
|
subiquity/server/controllers/reboot.py
|
# Copyright 2020 Canonical, Ltd.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import asyncio
import logging
import os
import platform
import subprocess
from subiquitycore.context import with_context
from subiquitycore.utils import arun_command, run_command
from subiquity.common.apidef import API
from subiquity.server.controller import SubiquityController
from subiquity.server.controllers.install import ApplicationState
log = logging.getLogger("subiquity.controllers.restart")
class RebootController(SubiquityController):
endpoint = API.reboot
def __init__(self, app):
super().__init__(app)
self.user_reboot_event = asyncio.Event()
self.rebooting_event = asyncio.Event()
async def POST(self):
self.app.controllers.Install.stop_uu()
self.user_reboot_event.set()
await self.rebooting_event.wait()
def start(self):
self.app.aio_loop.create_task(self._run())
async def _run(self):
Install = self.app.controllers.Install
await Install.install_task
await self.app.controllers.Late.run_event.wait()
await self.copy_logs_to_target()
if self.app.interactive:
await self.user_reboot_event.wait()
self.reboot()
elif self.app.state == ApplicationState.DONE:
self.reboot()
@with_context()
async def copy_logs_to_target(self, context):
if self.opts.dry_run and 'copy-logs-fail' in self.app.debug_flags:
raise PermissionError()
target_logs = os.path.join(
self.app.base_model.target, 'var/log/installer')
if self.opts.dry_run:
os.makedirs(target_logs, exist_ok=True)
else:
await arun_command(
['cp', '-aT', '/var/log/installer', target_logs])
journal_txt = os.path.join(target_logs, 'installer-journal.txt')
try:
with open(journal_txt, 'w') as output:
await arun_command(
['journalctl', '-b'],
stdout=output, stderr=subprocess.STDOUT)
except Exception:
log.exception("saving journal failed")
@with_context()
def reboot(self, context):
self.rebooting_event.set()
if self.opts.dry_run:
self.app.exit()
else:
if platform.machine() == 's390x':
run_command(["chreipl", "/target/boot"])
run_command(["/sbin/reboot"])
|
Python
| 0.000001
|
@@ -1469,24 +1469,88 @@
ent.wait()%0A%0A
+ def interactive(self):%0A return self.app.interactive%0A%0A
def star
|
bfa1a36d1c731f730cabbc914d52c51d5ddf61dc
|
Add token auth and session
|
api/evas_api/settings.py
|
api/evas_api/settings.py
|
import os
import dj_database_url
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_KEY = '-@y4mtyhs=i-uc7q9b==ur(zhs%gu628wtku%q8d8$-vp1qbza'
DEBUG = bool(os.environ.get('DEBUG', False))
TEMPLATE_DEBUG = bool(os.environ.get('TEMPLATE_DEBUG', False))
DATABASES = {'default': dj_database_url.config(default='postgres://localhost/evas_api')}
AUTH_USER_MODEL = 'users.User'
ALLOWED_HOSTS = []
STATIC_URL = '/static/'
ROOT_URLCONF = 'evas_api.urls'
WSGI_APPLICATION = 'evas_api.wsgi.application'
INSTALLED_APPS = (
# local
'users',
# third party apps
'rest_framework',
'south',
'user_management.api',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
|
Python
| 0
|
@@ -1191,16 +1191,208 @@
re',%0A)%0A%0A
+REST_FRAMEWORK = %7B%0A 'DEFAULT_AUTHENTICATION_CLASSES': (%0A 'rest_framework.authentication.BasicAuthentication',%0A 'rest_framework.authentication.SessionAuthentication',%0A )%0A%7D%0A%0A
LANGUAGE
|
d38a1fd25c9ffbfb8599c81e69617a1cb271c068
|
Add timeout
|
certserver.py
|
certserver.py
|
#!/usr/bin/python3
import argparse
import json
from http import server
import socket
import ssl
import subprocess
parser = argparse.ArgumentParser(description='certserver')
parser.add_argument(
'--ca-cert',
dest='ca_cert',
action='store',
required=True)
parser.add_argument(
'--listen-host',
dest='listen_host',
action='store',
default='::')
parser.add_argument(
'--listen-port',
dest='listen_port',
type=int,
action='store',
default=443)
parser.add_argument(
'--server-key',
dest='server_key',
action='store',
required=True)
parser.add_argument(
'--server-cert',
dest='server_cert',
action='store',
required=True)
parser.add_argument(
'--sign-command',
dest='sign_command',
action='store',
required=True)
FLAGS = parser.parse_args()
class HTTPServer6(server.HTTPServer):
address_family = socket.AF_INET6
class CertServer(object):
def __init__(self, listen_host, listen_port, server_key, server_cert, ca_cert, sign_command):
class RequestHandler(server.BaseHTTPRequestHandler):
def do_POST(self):
print('Request from: [%s]:%d' % (self.client_address[0], self.client_address[1]))
peer_cert = json.dumps(dict(x[0] for x in self.request.getpeercert()['subject']), sort_keys=True)
print('Client cert:\n\t%s' % peer_cert.replace('\n', '\n\t'))
assert self.headers['Content-Type'] == 'application/x-pem-file'
size = int(self.headers['Content-Length'])
cert = self.rfile.read(size)
with subprocess.Popen(sign_command, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as proc:
proc.stdin.write(cert)
proc.stdin.close()
signed = proc.stdout.read()
stderr = proc.stderr.read().decode('ascii')
print('OpenSSL output:\n\t%s' % stderr.replace('\n', '\n\t').strip())
if proc.wait() == 0:
self.send_response(200)
self.send_header('Content-Type', 'application/x-pem-file')
self.end_headers()
self.wfile.write(signed)
else:
self.send_response(500)
self.end_headers()
self._httpd = HTTPServer6((listen_host, listen_port), RequestHandler)
self._httpd.socket = ssl.wrap_socket(
self._httpd.socket,
keyfile=server_key,
certfile=server_cert,
ca_certs=ca_cert,
server_side=True,
cert_reqs=ssl.CERT_REQUIRED,
ssl_version=ssl.PROTOCOL_TLSv1_2,
ciphers='ECDHE-ECDSA-AES256-GCM-SHA384')
def Serve(self):
self._httpd.serve_forever()
def main():
server = CertServer(
FLAGS.listen_host,
FLAGS.listen_port,
FLAGS.server_key,
FLAGS.server_cert,
FLAGS.ca_cert,
FLAGS.sign_command)
server.Serve()
if __name__ == '__main__':
main()
|
Python
| 0.000015
|
@@ -2581,16 +2581,48 @@
SHA384')
+%0A self._httpd.settimeout(5.0)
%0A%0A def
|
130ce365a0d7c709004da5dedf0e0e35ff6ad3d0
|
Fix reference error and add script for packing pypi package (#1172)
|
python/dllib/src/bigdl/utils/engine.py
|
python/dllib/src/bigdl/utils/engine.py
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
import glob
def __prepare_spark_env():
modules = sys.modules
if "pyspark" not in modules or "py4j" not in modules:
spark_home = os.environ.get('SPARK_HOME', None)
if not spark_home:
raise ValueError(
"""Could not find Spark. Pls make sure SPARK_HOME env is set:
export SPARK_HOME=path to your spark home directory""")
py4j = glob.glob(os.path.join(spark_home, 'python/lib', 'py4j-*.zip'))[0]
pyspark = glob.glob(os.path.join(spark_home, 'python/lib', 'pyspark*.zip'))[0]
sys.path.insert(0, py4j)
sys.path.insert(0, pyspark)
def __prepare_bigdl_env():
import bigdl.nn.layer
jar_dir = os.path.abspath(bigdl.nn.layer.__file__ + "/../../")
jar_paths = glob.glob(os.path.join(jar_dir, "share/lib/*.jar"))
conf_paths = glob.glob(os.path.join(jar_dir, "share/conf/*.conf"))
def append_path(env_var_name, path):
try:
os.environ[env_var_name] = path + ":" + os.environ[
env_var_name] # noqa
except KeyError:
os.environ[env_var_name] = path
if conf_paths and conf_paths:
assert len(conf_paths) == 1, "Expecting one jar: %s" % len(jar_paths)
assert len(conf_paths) == 1, "Expecting one conf: %s" % len(conf_paths)
print("Adding %s to spark.driver.extraClassPath" % jar_paths[0])
print("Adding %s to spark.executor.extraClassPath" % jar_paths[0])
append_path("spark.driver.extraClassPath", jar_paths[0])
append_path("spark.executor.extraClassPath", jar_paths[0])
append_path("SPARK_CLASSPATH", jar_paths[0])
print("Prepending %s to sys.path" % conf_paths[0])
sys.path.insert(0, conf_paths[0])
def prepare_env():
__prepare_spark_env()
__prepare_bigdl_env()
|
Python
| 0
|
@@ -991,16 +991,55 @@
ory%22%22%22)%0A
+ print(%22Using %25s%22 %25 spark_home)%0A
@@ -1104,32 +1104,32 @@
y4j-*.zip'))%5B0%5D%0A
-
pyspark
@@ -1301,34 +1301,8 @@
():%0A
- import bigdl.nn.layer%0A
@@ -1331,23 +1331,8 @@
ath(
-bigdl.nn.layer.
__fi
@@ -1543,16 +1543,84 @@
try:%0A
+ print(%22Adding %25s to %25s%22 %25 (jar_paths%5B0%5D, env_var_name))%0A
@@ -1887,32 +1887,32 @@
len(jar_paths)%0A
+
assert l
@@ -1979,288 +1979,8 @@
hs)%0A
- print(%22Adding %25s to spark.driver.extraClassPath%22 %25 jar_paths%5B0%5D)%0A print(%22Adding %25s to spark.executor.extraClassPath%22 %25 jar_paths%5B0%5D)%0A append_path(%22spark.driver.extraClassPath%22, jar_paths%5B0%5D)%0A append_path(%22spark.executor.extraClassPath%22, jar_paths%5B0%5D)%0A
|
b0a948e2036ff232c2026e742a42ab9e9c4fbc07
|
Drop unused IndexTypeVar
|
cfgrib/abc.py
|
cfgrib/abc.py
|
"""Abstract Base Classes for GRIB messsages and containers"""
import abc
import typing as T
MessageIdTypeVar = T.TypeVar("MessageIdTypeVar")
MessageTypeVar = T.TypeVar("MessageTypeVar", bound="Message")
IndexTypeVar = T.TypeVar("IndexTypeVar", bound="Index") # type: ignore
Message = T.Mapping[str, T.Any]
MutableMessage = T.MutableMapping[str, T.Any]
Container = T.Mapping[MessageIdTypeVar, MessageTypeVar]
class Index(T.Mapping[str, T.List[T.Any]], T.Generic[MessageIdTypeVar, MessageTypeVar]):
container: Container[MessageIdTypeVar, MessageTypeVar]
index_keys: T.List[str]
message_id_index: T.List[T.Tuple[T.Tuple[T.Any, ...], T.List[MessageIdTypeVar]]]
filter_by_keys: T.Dict[str, T.Any] = {}
@abc.abstractmethod
def subindex(
self, filter_by_keys: T.Mapping[str, T.Any] = {}, **query: T.Any
) -> "Index[MessageIdTypeVar, MessageTypeVar]":
pass
@abc.abstractmethod
def getone(self, item: str) -> T.Any:
pass
@abc.abstractmethod
def first(self) -> MessageTypeVar:
pass
|
Python
| 0
|
@@ -200,80 +200,8 @@
ge%22)
-%0AIndexTypeVar = T.TypeVar(%22IndexTypeVar%22, bound=%22Index%22) # type: ignore
%0A%0AMe
|
54f978c3ed960997b8a5fa9f0e443b17e702ec85
|
Update build_flags to point to proper libdir
|
python/scannerpy/stdlib/build_flags.py
|
python/scannerpy/stdlib/build_flags.py
|
import os.path
import sys
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
def get_include():
return os.path.abspath(os.path.join(SCRIPT_DIR, '..', 'include'))
def print_include():
sys.stdout.write(get_include())
def get_lib():
return os.path.abspath(os.path.join(SCRIPT_DIR, '..'))
def print_lib():
sys.stdout.write(get_lib())
def get_cmake():
return os.path.abspath(os.path.join(SCRIPT_DIR, '..', 'cmake', 'Op.cmake'))
def print_cmake():
sys.stdout.write(get_cmake())
def get_flags():
return (
'-std=c++11 -I{include} -L{libdir} -lscanner'.format(
include=get_include(),
libdir=get_lib()))
def print_flags():
sys.stdout.write(get_flags())
|
Python
| 0
|
@@ -297,16 +297,23 @@
IR, '..'
+, 'lib'
))%0A%0Adef
|
acf64bd5b91822e449427d3bab24995d1187526c
|
add fx stubs to account CLI
|
quantrocket/cli/subcommands/account.py
|
quantrocket/cli/subcommands/account.py
|
# Copyright 2017 QuantRocket - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..utils.parse import parse_date
from quantrocket.constants.account import ACCOUNT_FIELDS
def add_subparser(subparsers):
_parser = subparsers.add_parser("account", description="QuantRocket account CLI", help="quantrocket account -h")
_subparsers = _parser.add_subparsers(title="subcommands", dest="subcommand")
_subparsers.required = True
parser = _subparsers.add_parser("balance", help="get a snapshot of current account balance info")
group = parser.add_mutually_exclusive_group()
group.add_argument("-n", "--nlv", action="store_true", help="only show NLV")
group.add_argument("-c", "--cushion", action="store_true", help="only show margin cushion")
group.add_argument("-f", "--fields", metavar="FIELD", nargs="*", choices=ACCOUNT_FIELDS, help="only display these fields")
parser.add_argument("-a", "--accounts", nargs="*", metavar="ACCOUNT", help="only show these accounts")
group = parser.add_mutually_exclusive_group()
group.add_argument("-b", "--below-cushion", type=float, metavar="CUSHION", help="only show accounts where the cushion is below this level (e.g. 0.05)")
group.add_argument("-s", "--save", action="store_true", help="save a snapshot of account balance info to the account database")
parser.set_defaults(func="quantrocket.account.get_balance")
parser = _subparsers.add_parser("history", help="get historical account balance snapshots from the account database")
parser.add_argument("-s", "--start-date", metavar="YYYY-MM-DD", help="limit to history on or after this date")
parser.add_argument("-e", "--end-date", metavar="YYYY-MM-DD", help="limit to history on or before this date")
group = parser.add_mutually_exclusive_group()
group.add_argument("-n", "--nlv", action="store_true", help="only show NLV")
group.add_argument("-f", "--fields", metavar="FIELD", nargs="*", choices=ACCOUNT_FIELDS, help="only display these fields")
parser.add_argument("-a", "--accounts", nargs="*", metavar="ACCOUNT", help="only show these accounts")
parser.add_argument("-l", "--latest", action="store_true", help="only show the latest historical snapshot in the date range")
parser.set_defaults(func="quantrocket.account.get_balance_history")
parser = _subparsers.add_parser("portfolio", help="get current account portfolio")
parser.add_argument("-a", "--accounts", nargs="*", metavar="ACCOUNT", help="limit to these accounts")
parser.set_defaults(func="quantrocket.account.get_portfolio")
parser = _subparsers.add_parser("set-nickname", help="Set account nickname")
parser.add_argument("account", help="the account ID, e.g. U123456")
parser.add_argument("nickname", help="the nickname (letters, numbers, hyphens, and underscores only)")
parser.set_defaults(func="quantrocket.account.set_account_nickname")
|
Python
| 0
|
@@ -3134,228 +3134,1368 @@
er(%22
-set-nickname%22, help=%22Set account nickname%22)%0A parser.add_argument(%22account%22, help=%22the account ID, e.g. U123456%22)%0A parser.add_argument(%22nickname%22, help=%22the nickname (letters, numbers, hyphens, and underscores only)
+default%22, help=%22view or set the default account%22)%0A parser.add_argument(%22account%22, nargs=%22?%22, metavar=%22ACCOUNT%22, help=%22set this account as the default (omit to view current default account)%22)%0A parser.set_defaults(func=%22quantrocket.account._get_or_set_default_account%22)%0A%0A parser = _subparsers.add_parser(%22fx%22, help=%22fetch base currency exchange rates from Google and optionally save to account database%22)%0A parser.add_argument(%22-c%22, %22--currencies%22, nargs=%22*%22, metavar=%22CURRENCY%22, help=%22limit to these currencies (default all IB-tradeable currencies)%22)%0A parser.add_argument(%22-s%22, %22--save%22, action=%22store_true%22, help=%22save exhange rates to the account database%22)%0A parser.set_defaults(func=%22quantrocket.account.get_exchange_rates%22)%0A%0A parser = _subparsers.add_parser(%22fxhistory%22, help=%22return historical exchange rates from the account database%22)%0A parser.add_argument(%22-c%22, %22--currencies%22, nargs=%22*%22, metavar=%22CURRENCY%22, help=%22limit to these currencies (default all IB-tradeable currencies)%22)%0A parser.add_argument(%22-s%22, %22--start-date%22, metavar=%22YYYY-MM-DD%22, help=%22limit to history on or after this date%22)%0A parser.add_argument(%22-e%22, %22--end-date%22, metavar=%22YYYY-MM-DD%22, help=%22limit to history on or before this date%22)%0A parser.add_argument(%22-l%22, %22--latest%22, action=%22store_true%22, help=%22only show the latest exchange rate in the date range
%22)%0A
@@ -4547,27 +4547,32 @@
unt.
-set_account_nickname
+get_exchange_rate_history
%22)%0A
|
1bf4ae32aa5f423b4124a89108df27f04dca8f71
|
Include environment at lambda function qualifier
|
kappa/event_source/s3.py
|
kappa/event_source/s3.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014, 2015 Mitch Garnaat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kappa.event_source.base
import logging
LOG = logging.getLogger(__name__)
class S3EventSource(kappa.event_source.base.EventSource):
def __init__(self, context, config):
super(S3EventSource, self).__init__(context, config)
self._s3 = kappa.awsclient.create_client('s3', context.session)
def _make_notification_id(self, function_name):
return 'Kappa-%s-notification' % function_name
def _get_bucket_name(self):
return self.arn.split(':')[-1]
def add(self, function):
notification_spec = {
'LambdaFunctionConfigurations': [
{
'Id': self._make_notification_id(function.name),
'Events': [e for e in self._config['events']],
'LambdaFunctionArn': function.arn,
}
]
}
try:
response = self._s3.call(
'put_bucket_notification_configuration',
Bucket=self._get_bucket_name(),
NotificationConfiguration=notification_spec)
LOG.debug(response)
except Exception as exc:
LOG.debug(exc.response)
LOG.exception('Unable to add S3 event source')
enable = add
def update(self, function):
self.add(function)
def remove(self, function):
LOG.debug('removing s3 notification')
response = self._s3.call(
'get_bucket_notification',
Bucket=self._get_bucket_name())
LOG.debug(response)
if 'CloudFunctionConfiguration' in response:
fn_arn = response['CloudFunctionConfiguration']['CloudFunction']
if fn_arn == function.arn:
del response['CloudFunctionConfiguration']
del response['ResponseMetadata']
response = self._s3.call(
'put_bucket_notification',
Bucket=self._get_bucket_name(),
NotificationConfiguration=response)
LOG.debug(response)
disable = remove
def status(self, function):
LOG.debug('status for s3 notification for %s', function.name)
response = self._s3.call(
'get_bucket_notification',
Bucket=self._get_bucket_name())
LOG.debug(response)
if 'CloudFunctionConfiguration' not in response:
response = None
return response
|
Python
| 0
|
@@ -1402,16 +1402,27 @@
onArn':
+'%25s:%25s' %25 (
function
@@ -1426,16 +1426,48 @@
ion.arn,
+ function._context.environment),
%0A
|
2027d3347a839ca30abbcb778b34b3f261b266ef
|
Update Naive Bayes classifier
|
mla/naive_bayes.py
|
mla/naive_bayes.py
|
import numpy as np
from mla.base import BaseEstimator
from mla.neuralnet.activations import softmax
class NaiveBayesClassifier(BaseEstimator):
"""Gaussian Naive Bayes."""
# Binary problem.
n_classes = 2
def fit(self, X, y=None):
self._setup_input(X, y)
# Check target labels
assert list(np.unique(y)) == [0, 1]
self._mean = np.zeros((self.n_classes, self.n_features), dtype=np.float64)
self._var = np.zeros((self.n_classes, self.n_features), dtype=np.float64)
self._priors = np.zeros(self.n_classes, dtype=np.float64)
for c in range(self.n_classes):
# Filter features by class
X_c = X[y == c]
# Calculate mean, variance, prior for each class
self._mean[c, :] = X_c.mean(axis=0)
self._var[c, :] = X_c.var(axis=0)
self._priors[c] = X_c.shape[0] / float(X.shape[0])
def _predict(self, X=None):
# Apply _predict_proba for each row
predictions = np.apply_along_axis(self._predict_proba, 1, X)
# Normalize probabilities
return softmax(predictions)
def _predict_proba(self, x):
"""Predict log likelihood for given row."""
output = []
for y in range(self.n_classes):
prior = np.log(self._priors[y])
posterior = np.sum([self._pdf(y, d, x) for d in range(self.n_features)])
prediction = prior + posterior
output.append(prediction)
return output
def _pdf(self, n_class, n_feature, x):
"""Calculate probability density function for normal distribution."""
# Take specific values
mean = self._mean[n_class, n_feature]
var = self._var[n_class, n_feature]
x = x[n_feature]
# Avoid division by zero
if var < 1e-15:
return 0.0
numerator = np.exp(-(float(x) - float(mean)) ** 2 / (2 * var))
denominator = np.sqrt(2 * np.pi * var)
return numerator / denominator
|
Python
| 0.000006
|
@@ -1340,16 +1340,8 @@
r =
-np.sum(%5B
self
@@ -1353,46 +1353,15 @@
(y,
-d, x) for d in range(self.n_features)%5D
+x).sum(
)%0A
@@ -1494,19 +1494,8 @@
ass,
- n_feature,
x):
@@ -1520,65 +1520,43 @@
ate
-probability density function for normal distribution
+Gaussian PDF for each feature
.%22%22%22%0A
+%0A
@@ -1611,35 +1611,24 @@
mean%5Bn_class
-, n_feature
%5D%0A va
@@ -1652,126 +1652,9 @@
lass
-, n_feature%5D%0A x = x%5Bn_feature%5D%0A%0A # Avoid division by zero%0A if var %3C 1e-15:%0A return 0.0
+%5D
%0A%0A
@@ -1684,31 +1684,17 @@
p(-(
-float(x) - float(
+x -
mean)
-)
**
|
3fb860aa233f46046ef826360e10a467d3c9a866
|
Version 0.0.2
|
covimerage/__version__.py
|
covimerage/__version__.py
|
__version__ = '0.0.2.dev0'
|
Python
| 0.000001
|
@@ -17,11 +17,6 @@
.0.2
-.dev0
'%0A
|
fd5c84a1272ded54afaa6323fc10c637474096a3
|
Increment version to 0.2.5
|
approvaltests/version.py
|
approvaltests/version.py
|
version_number = "0.2.4"
|
Python
| 0.998918
|
@@ -19,7 +19,7 @@
0.2.
-4
+5
%22%0A
|
1c756d13efb874f075b9413722f042f0dbcc3721
|
Use default settings
|
readthedocs/settings/docker_compose.py
|
readthedocs/settings/docker_compose.py
|
import os
from .dev import CommunityDevSettings
class DockerBaseSettings(CommunityDevSettings):
"""Settings for local development with Docker"""
DOCKER_ENABLE = True
RTD_DOCKER_COMPOSE = True
RTD_DOCKER_COMPOSE_VOLUME = 'community_build-user-builds'
RTD_DOCKER_USER = f'{os.geteuid()}:{os.getegid()}'
DOCKER_LIMITS = {'memory': '1g', 'time': 900}
USE_SUBDOMAIN = True
STATIC_URL = 'http://community.dev.readthedocs.io/devstoreaccount1/static/'
PRODUCTION_DOMAIN = 'community.dev.readthedocs.io'
PUBLIC_DOMAIN = 'community.dev.readthedocs.io'
PUBLIC_API_URL = 'http://community.dev.readthedocs.io'
RTD_PROXIED_API_URL = PUBLIC_API_URL
SLUMBER_API_HOST = 'http://web:8000'
RTD_EXTERNAL_VERSION_DOMAIN = 'org.dev.readthedocs.build'
# In the local docker environment, nginx should be trusted to set the host correctly
USE_X_FORWARDED_HOST = True
MULTIPLE_APP_SERVERS = ['web']
MULTIPLE_BUILD_SERVERS = ['build']
# https://docs.docker.com/engine/reference/commandline/run/#add-entries-to-container-hosts-file---add-host
# export HOSTIP=`ip -4 addr show scope global dev wlp4s0 | grep inet | awk '{print \$2}' | cut -d / -f 1`
HOSTIP = os.environ.get('HOSTIP')
import platform
if platform.system() == 'Darwin':
# On Mac, host.docker.internal always point to the host's IP
HOSTIP = 'host.docker.internal'
ADSERVER_API_BASE = f'http://{HOSTIP}:5000'
# Create a Token for an admin User and set it here.
ADSERVER_API_KEY = None
# Enable auto syncing elasticsearch documents
ELASTICSEARCH_DSL_AUTOSYNC = True if 'SEARCH' in os.environ else False
ELASTICSEARCH_DSL = {
'default': {
'hosts': 'search:9200',
},
}
RTD_CLEAN_AFTER_BUILD = True
@property
def LOGGING(self):
logging = super().LOGGING
logging['loggers'].update({
# Disable azurite logging
'azure.storage.common': {
'handlers': ['null'],
'propagate': False,
},
# Disable gitpython logging
'git.cmd': {
'handlers': ['null'],
'propagate': False,
},
})
return logging
@property
def DATABASES(self): # noqa
return {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": "docs_db",
"USER": os.environ.get("DB_USER", "docs_user"),
"PASSWORD": os.environ.get("DB_PWD", "docs_pwd"),
"HOST": os.environ.get("DB_HOST", "database"),
"PORT": "",
}
}
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK': lambda request: True,
}
ACCOUNT_EMAIL_VERIFICATION = "none"
SESSION_COOKIE_DOMAIN = None
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': 'cache:6379',
}
}
BROKER_URL = "redis://cache:6379/0"
CELERY_RESULT_BACKEND = "redis://cache:6379/0"
CELERY_RESULT_SERIALIZER = "json"
CELERY_ALWAYS_EAGER = False
CELERY_TASK_IGNORE_RESULT = False
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
# Avoid syncing to the web servers
FILE_SYNCER = "readthedocs.builds.syncers.NullSyncer"
# https://github.com/Azure/Azurite/blob/master/README.md#default-storage-account
AZURE_ACCOUNT_NAME = 'devstoreaccount1'
AZURE_ACCOUNT_KEY = 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=='
AZURE_CONTAINER = 'static'
AZURE_STATIC_STORAGE_CONTAINER = AZURE_CONTAINER
AZURE_MEDIA_STORAGE_HOSTNAME = 'community.dev.readthedocs.io'
# We want to replace files for the same version built
AZURE_OVERWRITE_FILES = True
# Storage backend for build media artifacts (PDF, HTML, ePub, etc.)
RTD_BUILD_MEDIA_STORAGE = 'readthedocs.storage.azure_storage.AzureBuildMediaStorage'
AZURE_STATIC_STORAGE_HOSTNAME = 'community.dev.readthedocs.io'
# Storage for static files (those collected with `collectstatic`)
STATICFILES_STORAGE = 'readthedocs.storage.azure_storage.AzureStaticStorage'
STATICFILES_DIRS = [
os.path.join(CommunityDevSettings.SITE_ROOT, 'readthedocs', 'static'),
os.path.join(CommunityDevSettings.SITE_ROOT, 'media'),
]
AZURE_BUILD_STORAGE_CONTAINER = 'builds'
BUILD_COLD_STORAGE_URL = 'http://storage:10000/builds'
AZURE_EMULATED_MODE = True
AZURE_CUSTOM_DOMAIN = 'storage:10000'
AZURE_SSL = False
# Remove the checks on the number of fields being submitted
# This limit is mostly hit on large forms in the Django admin
DATA_UPLOAD_MAX_NUMBER_FIELDS = None
|
Python
| 0.000002
|
@@ -1671,108 +1671,8 @@
alse
-%0A ELASTICSEARCH_DSL = %7B%0A 'default': %7B%0A 'hosts': 'search:9200',%0A %7D,%0A %7D
%0A%0A
|
aa1772c013cad2fe423613cb88e37fbf1958bd82
|
Update retrosign.py
|
retrosign/retrosign.py
|
retrosign/retrosign.py
|
import discord, aiohttp, re
from bs4 import BeautifulSoup as b_s
from io import BytesIO
from discord.ext import commands
import os
import random
from random import choice
import lxml
class retrosign:
def __init__(self, bot):
self.bot = bot
@commands.group()
async def retrosign(self, content : str):
"""Make a Retrosign"""
texts = [t.strip() for t in content.split('|')]
if len(texts) < 3:
lenstr = len(texts[0])
await self.bot.say(lenstr)
if lenstr <= 12:
global data
data = dict(
bcg=choice([1, 2, 3, 4, 5]),
txt=choice([1, 2, 3, 4]),
text1="",
text2=texts[0],
text3=""
)
do_it()
else:
await self.bot.say("\N{CROSS MARK} too many Characters for one Line")
return
elif len(texts) != 3:
await self.bot.say("\N{CROSS MARK} please provide three strings seperated by `|`")
return
else:
global data
data = dict(
bcg=choice([1, 2, 3, 4, 5]),
txt=choice([1, 2, 3, 4]),
text1=texts[0],
text2=texts[1],
text3=texts[2]
)
return(data)
@retrosign.command()
async def top(self, content : str):
"""Make a Retrosign with top and middle Text"""
texts = [t.strip() for t in content.split('|')]
if len(texts) != 2:
await self.bot.say("\N{CROSS MARK} please provide two strings seperated by `|`")
return
else:
global data
data = dict(
bcg=choice([1, 2, 3, 4, 5]),
txt=choice([1, 2, 3, 4]),
text1=texts[0],
text2=texts[1],
text3=""
)
do_it(data)
@retrosign.command()
async def bottom(self, content : str):
"""Make a Retrosign with middle and bottom Text"""
texts = [t.strip() for t in content.split('|')]
if len(texts) != 2:
await self.bot.say("\N{CROSS MARK} please provide two strings seperated by `|`")
return
else:
global data
data = dict(
bcg=choice([1, 2, 3, 4, 5]),
txt=choice([1, 2, 3, 4]),
text1="",
text2=texts[0],
text3=[1]
)
do_it(data)
def do_it(self, data):
await self.bot.type()
with aiohttp.ClientSession() as session:
async with session.post("http://photofunia.com/effects/retro-wave", data=data) as response:
if response.status == 200:
soup = b_s(await response.text(), "lxml")
download_url = soup.find("div", class_="downloads-container").ul.li.a["href"]
async with session.get(download_url) as image_response:
if image_response.status == 200:
image_data = await image_response.read()
with BytesIO(image_data) as temp_image:
await self.bot.upload(temp_image, filename="retro.jpg")
def setup(bot):
n = retrosign(bot)
bot.add_cog(n)
|
Python
| 0
|
@@ -2626,24 +2626,30 @@
a)%0A %0A
+async
def do_it(se
|
4569821be22e8b0818f6817fa908fb11c5158e85
|
Improve cafferesnet compatibility to API
|
pretrainedmodels/models/cafferesnet.py
|
pretrainedmodels/models/cafferesnet.py
|
from __future__ import print_function, division, absolute_import
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
pretrained_settings = {
'cafferesnet101': {
'imagenet': {
'url': 'http://data.lip6.fr/cadene/pretrainedmodels/cafferesnet101-9d633cc0.pth',
'input_space': 'BGR',
'input_size': [3, 224, 224],
'input_range': [0, 255],
'mean': [102.9801, 115.9465, 122.7717],
'std': [1, 1, 1],
'num_classes': 1000
}
}
}
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False) # change
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, # change
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0, ceil_mode=True) # change
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
# it is slightly better whereas slower to set stride = 1
# self.layer4 = self._make_layer(block, 512, layers[3], stride=1)
self.avgpool = nn.AvgPool2d(7)
self.last_linear = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.last_linear(x)
return x
def cafferesnet101(num_classes=1000, pretrained='imagenet'):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes)
if pretrained is not None:
settings = pretrained_settings['cafferesnet101'][pretrained]
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
model.load_state_dict(model_zoo.load_url(settings['url']))
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
return model
|
Python
| 0
|
@@ -4312,38 +4312,39 @@
layers)%0A%0A def f
-orward
+eatures
(self, x):%0A x
@@ -4514,24 +4514,60 @@
f.layer4(x)%0A
+ return x%0A%0A def logits(self, x):
%0A x = sel
@@ -4633,24 +4633,109 @@
t_linear(x)%0A
+ return x%0A%0A def forward(self, x):%0A x = self.features(x)%0A x = self.logits(x)
%0A return
|
959dfe1dafb81523365fc30bfa4cd7e6c2b9eecb
|
CHANGE GcodeFile() function names
|
GcodeParser.py
|
GcodeParser.py
|
#!/usr/bin/env python
# coding=UTF-8
"""Module containing Gcode parsing functions"""
__author__ = "Dylan Armitage"
__email__ = "d.armitage89@gmail.com"
####---- Imports ----####
from pygcode import Line, GCodeLinearMove
class GcodeFile(object):
"""A file of gcode"""
def __init__(self, gcode_file=[]):
self.file = gcode_file
self.gcode = []
self.extrema = dict(X=(None, None), Y=(None, None),
UL=(None, None), DR=(None, None),
)
self.mids = dict(X=None, Y=None)
if self.file and not self.gcode:
self.__convert_gcode_internal()
def add_file(self, gcode_file):
"""Read in a file of Gcode"""
self.file = gcode_file
self.__convert_gcode_internal()
def __convert_gcode_internal(self):
"""Convert gcode into format that can be easily manipulated"""
for line in self.file:
self.gcode.append(Line(line))
def bounding_box(self):
"""Take in file of gcode, return tuples of min/max bounding values"""
if self.file or self.gcode:
logger.error("Load file first")
return None
if (None, None) in self.extrema.values():
params = [p.get_param_dict()
for p in self.gcode
if p.word == "G01"]
x_pos = [p["X"] for p in params]
y_pos = [p["Y"] for p in params]
self.extrema["X"] = (min(x_pos), max(x_pos))
self.extrema["Y"] = (min(y_pos), max(y_pos))
self.extrema["UL"] = (self.extrema["X"][0],
self.extrema["Y"][0])
self.extrema["DR"] = (self.extrema["X"][1],
self.extrema["Y"][1])
return (self.extrema["UL"], self.extrema["DR"])
def box_gcode(self):
"""Return G0 commands to bound gcode file"""
gcode = []
gcode.append(GCodeLinearMove(X=self.extrema["X"][0],
Y=self.extrema["Y"][0])) #UL
gcode.append(GCodeLinearMove(X=self.extrema["X"][0],
Y=self.extrema["Y"][1])) #UR
gcode.append(GCodeLinearMove(X=self.extrema["X"][1],
Y=self.extrema["Y"][1])) #DR
gcode.append(GCodeLinearMove(X=self.extrema["X"][1],
Y=self.extrema["Y"][0])) #DL
gcode.append(GCodeLinearMove(X=self.extrema["X"][0],
Y=self.extrema["Y"][0])) #UL cycle
# Convert from GCodeLinearMove class to string
gcode = [str(line) for line in gcode]
return gcode
def mid_gcode(min_xy, max_xy):
raise NotImplemented
|
Python
| 0.000001
|
@@ -991,16 +991,23 @@
ding_box
+_coords
(self):%0A
@@ -2721,21 +2721,22 @@
def mid_
-g
co
-de
+ords
(min_xy,
|
ec2d66e85275313c206f760364724108f9508204
|
Add execute permission
|
scripts/contrib/model_info.py
|
scripts/contrib/model_info.py
|
#!/usr/bin/env python3
import sys
import argparse
import numpy as np
import yaml
DESC = "Prints keys and values from model.npz file."
S2S_SPECIAL_NODE = "special:model.yml"
def main():
args = parse_args()
model = np.load(args.model)
if args.special:
if S2S_SPECIAL_NODE not in model:
print("No special Marian YAML node found in the model")
exit(1)
yaml_text = bytes(model[S2S_SPECIAL_NODE]).decode('ascii')
if not args.key:
print(yaml_text)
exit(0)
# fix the invalid trailing unicode character '#x0000' added to the YAML
# string by the C++ cnpy library
try:
yaml_node = yaml.load(yaml_text)
except yaml.reader.ReaderError:
yaml_node = yaml.load(yaml_text[:-1])
print(yaml_node[args.key])
else:
if args.key:
if args.key not in model:
print("Key not found")
exit(1)
if args.full_matrix:
for (x, y), val in np.ndenumerate(model[args.key]):
print(val)
else:
print(model[args.key])
else:
for key in model:
print(key)
def parse_args():
parser = argparse.ArgumentParser(description=DESC)
parser.add_argument("-m", "--model", help="model file", required=True)
parser.add_argument("-k", "--key", help="print value for specific key")
parser.add_argument("-s", "--special", action="store_true",
help="print values from special:model.yml node")
parser.add_argument("-f", "--full-matrix", action="store_true",
help="force numpy to print full arrays")
return parser.parse_args()
if __name__ == "__main__":
main()
|
Python
| 0.000001
| |
fee8e6371c2884ba8cdc587ecae06093e5f6e4de
|
restructure python 2 to use a generator
|
01-50/02/2.py
|
01-50/02/2.py
|
a, b = 0, 1
total = 0
while b < 4000000:
a, b = b, a + b
if b % 2 == 0:
total += b
print total
|
Python
| 0.000486
|
@@ -1,72 +1,162 @@
-a, b = 0, 1%0Atotal = 0%0Awhile b %3C 4000000:%0A a, b = b, a + b
+def fib():%0A a, b = 0, 1%0A while True:%0A yield b%0A a, b = b, a + b%0A%0Atotal = 0%0Ai = 0%0Afor i in fib():%0A if i %3E= 4000000:%0A break
%0A if
b %25
@@ -151,17 +151,17 @@
%0A if
-b
+i
%25 2 ==
@@ -180,17 +180,17 @@
otal +=
-b
+i
%0A%0Aprint
|
0f7b1a82cc74b5b7238753a3e8d6ac53c5d64c9c
|
Create wallpost on new donation
|
apps/wallposts/models.py
|
apps/wallposts/models.py
|
from django.db import models
from django.utils.text import Truncator
from django.utils.translation import ugettext_lazy as _
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django_extensions.db.fields import ModificationDateTimeField, CreationDateTimeField
from django.conf import settings
from polymorphic import PolymorphicModel
from .managers import ReactionManager, WallPostManager
WALLPOST_TEXT_MAX_LENGTH = getattr(settings, 'WALLPOST_TEXT_MAX_LENGTH', 300)
WALLPOST_REACTION_MAX_LENGTH = getattr(settings, 'WALLPOST_REACTION_MAX_LENGTH', 300)
class WallPost(PolymorphicModel):
"""
The WallPost base class. This class will never be used directly because the content of a WallPost is always defined
in the child classes.
Implementation Note: Normally this would be an abstract class but it's not possible to make this an abstract class
and have the polymorphic behaviour of sorting on the common fields.
"""
# The user who wrote the wall post. This can be empty to support wall posts without users (e.g. anonymous
# TextWallPosts, system WallPosts for donations etc.)
author = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('author'), related_name="%(class)s_wallpost", blank=True, null=True)
editor = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('editor'), blank=True, null=True, help_text=_("The last user to edit this wallpost."))
# The metadata for the wall post.
created = CreationDateTimeField(_('created'))
updated = ModificationDateTimeField(_('updated'))
deleted = models.DateTimeField(_('deleted'), blank=True, null=True)
ip_address = models.IPAddressField(_('IP address'), blank=True, null=True, default=None)
# Generic foreign key so we can connect it to any object.
content_type = models.ForeignKey(ContentType, verbose_name=_('content type'), related_name="content_type_set_for_%(class)s")
object_id = models.PositiveIntegerField(_('object ID'))
content_object = generic.GenericForeignKey('content_type', 'object_id')
# Manager
objects = WallPostManager()
class Meta:
ordering = ('created',)
def __unicode__(self):
return str(self.id)
class MediaWallPost(WallPost):
# The content of the wall post.
# TODO: When this is set to 'deleted' set connected MediaWallPostPhotos to deleted too.
title = models.CharField(max_length=60)
text = models.TextField(max_length=WALLPOST_REACTION_MAX_LENGTH, blank=True, default='')
video_url = models.URLField(max_length=100, blank=True, default='')
def __unicode__(self):
return Truncator(self.text).words(10)
class MediaWallPostPhoto(models.Model):
mediawallpost = models.ForeignKey(MediaWallPost, related_name='photos', null=True, blank=True)
photo = models.ImageField(upload_to='mediawallpostphotos')
deleted = models.DateTimeField(_('deleted'), blank=True, null=True)
ip_address = models.IPAddressField(_('IP address'), blank=True, null=True, default=None)
author = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('author'), related_name="%(class)s_wallpost_photo", blank=True, null=True)
editor = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('editor'), blank=True, null=True, help_text=_("The last user to edit this wallpost photo."))
class TextWallPost(WallPost):
# The content of the wall post.
text = models.TextField(max_length=WALLPOST_REACTION_MAX_LENGTH)
def __unicode__(self):
return Truncator(self.text).words(10)
class SystemWallPost(WallPost):
# The content of the wall post.
text = models.TextField(max_length=WALLPOST_REACTION_MAX_LENGTH, blank=True)
# Generic foreign key so we can connect any object to it.
related_type = models.ForeignKey(ContentType, verbose_name=_('related type'))
related_id = models.PositiveIntegerField(_('related ID'))
related_object = generic.GenericForeignKey('related_type', 'related_id')
def __unicode__(self):
return Truncator(self.text).words(10)
class Reaction(models.Model):
"""
A user reaction or comment to a WallPost. This model is based on the Comments model from django.contrib.comments.
"""
# Who posted this reaction. User will need to be logged in to make a reaction.
author = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('author'), related_name='wallpost_reactions')
editor = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('editor'), blank=True, null=True, related_name='+', help_text=_("The last user to edit this reaction."))
# The reaction text and the wallpost it's a reaction to.
text = models.TextField(_('reaction text'), max_length=WALLPOST_REACTION_MAX_LENGTH)
wallpost = models.ForeignKey(WallPost, related_name='reactions')
# Metadata for the reaction.
created = CreationDateTimeField(_('created'))
updated = ModificationDateTimeField(_('updated'))
deleted = models.DateTimeField(_('deleted'), blank=True, null=True)
ip_address = models.IPAddressField(_('IP address'), blank=True, null=True, default=None)
# Manager
objects = ReactionManager()
objects_with_deleted = models.Manager()
class Meta:
ordering = ('created',)
verbose_name = _('Reaction')
verbose_name_plural = _('Reactions')
def __unicode__(self):
s = "{0}: {1}".format(self.author.get_full_name(), self.text)
return Truncator(s).words(10)
|
Python
| 0
|
@@ -22,16 +22,111 @@
models%0A
+from django.db.models.signals import post_save%0Afrom django.dispatch.dispatcher import receiver%0A
from dja
@@ -442,16 +442,72 @@
ettings%0A
+from apps.fund.models import Donation, DonationStatuses%0A
from pol
@@ -5667,20 +5667,795 @@
cator(s).words(10)%0A%0A
+%0A@receiver(post_save, weak=False, sender=Donation)%0Adef create_donation_post(sender, instance, **kwargs):%0A donation = instance%0A if donation.status in %5BDonationStatuses.paid, DonationStatuses.pending%5D:%0A try:%0A donation_type = ContentType.objects.get_for_model(donation)%0A post = SystemWallPost.objects.filter(related_id=donation.id).filter(related_type=donation_type).get()%0A except SystemWallPost.DoesNotExist:%0A if donation.donation_type == Donation.DonationTypes.one_off:%0A post = SystemWallPost()%0A post.content_object = donation.project%0A post.related_object = donation%0A post.author = donation.user%0A post.ip = '127.0.0.1'%0A post.save()%0A%0A
|
51fa99659a0b975175a0a33ace21021de7de4b45
|
Add load_json
|
saau/sections/image_provider.py
|
saau/sections/image_provider.py
|
import json
import inspect
from os.path import join, exists
def not_implemented():
frame_info = inspect.currentframe().f_back
msg = ''
if 'self' in frame_info.f_locals:
self = frame_info.f_locals['self']
try:
msg += self.__name__ + '#' # for static/class methods
except AttributeError:
msg += self.__class__.__name__ + '.'
msg += frame_info.f_code.co_name + '()'
return NotImplementedError(msg)
class RequiresData:
def __init__(self, data_dir):
self.data_dir = data_dir
def has_required_data(self):
raise not_implemented()
def obtain_data(self):
raise not_implemented()
def data_dir_exists(self, name):
return exists(self.data_dir_join(name))
def data_dir_join(self, name):
return join(self.data_dir, name)
def save_json(self, name, data):
with open(self.data_dir_join(name), 'w') as fh:
json.dump(data, fh, indent=4)
return True
class ImageProvider(RequiresData):
def build_image(self, output_filename):
raise not_implemented()
|
Python
| 0.00001
|
@@ -997,16 +997,132 @@
n True%0A%0A
+ def load_json(self, name):%0A with open(self.data_dir_join(name)) as fh:%0A return json.load(fh)%0A%0A
%0Aclass I
|
7907edc07b7607e1892a35a4ad49e8eb7340ed4e
|
Update __openerp__.py
|
sale_order_types/__openerp__.py
|
sale_order_types/__openerp__.py
|
# -*- encoding: utf-8 -*-
##############################################################################
# #
# OpenERP, Open Source Management Solution. #
# #
# @author Carlos SΓ‘nchez Cifuentes <csanchez@grupovermon.com> #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as #
# published by the Free Software Foundation, either version 3 of the #
# License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
##############################################################################
{
'name': 'Sale Order Types',
'version': '1.0',
'category': 'Sales',
'description': """This module adds a typology for the sale orders.""",
'author': 'OdooMRP team',
'website': 'www.odoomrp.com',
'license': 'AGPL-3',
'depends': ['sale','stock'],
'data': ['views/sale_order_view.xml', 'views/sale_order_type_view.xml'],
'installable': True,
}
|
Python
| 0.000024
|
@@ -1868,16 +1868,17 @@
%5B'sale',
+
'stock'%5D
|
ee50bea2810676ef655e7ea57565070f7e715741
|
Validate quotation is non-empty before allowing conversion to template.
|
sale_template_quotation/sale.py
|
sale_template_quotation/sale.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Template Quotations
# Copyright (C) 2015 OpusVL (<http://opusvl.com/>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api, exceptions
class TemplateQuotation(models.Model):
_inherit = "sale.order"
is_template = fields.Boolean(default=False)
quotation_ref = fields.Char()
@api.one
def convert_to_template(self):
self.is_template = True
@api.one
def write(self, data):
if self.is_template and ('state' not in data or data['state'] != 'cancelled'):
raise exceptions.Warning('You cannot edit or change state of a quotation template')
return super(TemplateQuotation, self).write(data)
@api.one
def copy(self, default=None):
new_default = default or {'is_template': False}
return super(TemplateQuotation, self).copy(default=new_default)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Python
| 0
|
@@ -1162,16 +1162,225 @@
(self):%0A
+ if not (self.quotation_ref and self.quotation_ref.strip()):%0A raise exceptions.Warning('Quotation Ref is blank.%5CnYou must set a Quotation Ref before you convert the quotation to a template')%0A
|
10862c50b52ad5da6de9a1bd076bf50c738c9a97
|
Implement object serialization
|
salt/modules/inspectlib/fsdb.py
|
salt/modules/inspectlib/fsdb.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2016 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import csv
import datetime
class CsvDBEntity(object):
'''
Serializable object for the table.
'''
def bind_table(self, table):
self.__table = table
def serialize(self):
'''
Serialize the object to a row for CSV.
:return:
'''
class CsvDB(object):
'''
File-based CSV database.
This database is in-memory operating plain text csv files.
'''
def __init__(self, path):
'''
Constructor to store the database files.
:param path:
'''
self._prepare(path)
self._opened = False
self.db_path = None
self._opened = False
self._tables = {}
def _prepare(self, path):
self.path = path
if not os.path.exists(self.path):
os.makedirs(self.path)
def _label(self):
'''
Create label of the database, based on the date-time.
:return:
'''
return datetime.datetime.utcnow().strftime('%Y%m%d.%H%M%S')
def new(self):
'''
Create a new database and opens it.
:return:
'''
dbname = self._label()
self.db_path = os.path.join(self.path, dbname)
if not os.path.exists(self.db_path):
os.makedirs(self.db_path)
self._opened = True
return dbname
def get_tables(self):
'''
Get a list of existin tables in this database.
:return:
'''
def purge(self, dbid):
'''
Purge the database.
:param dbid:
:return:
'''
def list(self):
'''
List all the databases on the given path.
:return:
'''
databases = []
for dbname in os.listdir(self.path):
databases.append(dbname)
return list(reversed(sorted(databases)))
def list_tables(self):
'''
Load existing tables and their descriptions.
:return:
'''
if not self._tables:
for table_name in os.listdir(self.db_path):
self._tables[table_name] = self._load_table(table_name)
return self._tables.keys()
def _load_table(self, table_name):
with open(os.path.join(self.db_path, table_name), 'rb') as table:
return dict([tuple(elm.split(':')) for elm in csv.reader(table).next()])
def open(self, dbname=None):
'''
Open database from the path with the name or latest.
If there are no yet databases, create a new implicitly.
:return:
'''
databases = self.list()
if self.is_closed():
self.db_path = os.path.join(self.path, dbname or (databases and databases[0] or self.new()))
self._opened = True
def close(self):
'''
Close the database.
:return:
'''
self._opened = False
def is_closed(self):
'''
Return if the database is closed.
:return:
'''
return not self._opened
def table_from_object(self, obj):
'''
Create a table from the object.
NOTE: This method doesn't stores anything.
:param obj:
:return:
'''
get_type = lambda item: str(type(item)).split("'")[1]
if not os.path.exists(os.path.join(self.db_path, obj._TABLE)):
with open(os.path.join(self.db_path, obj._TABLE), 'wb') as table_file:
csv.writer(table_file).writerow(['{col}:{type}'.format(col=elm[0], type=get_type(elm[1]))
for elm in tuple(obj.__dict__.items())])
def store(self, obj):
'''
Store an object in the table.
:param obj:
:return:
'''
def get(self, table_name, matches=None, mt=None, lt=None, eq=None):
'''
Get objects from the table.
:param table_name:
:param matches: Regexp.
:param mt: More than.
:param lt: Less than.
:param eq: Equals.
:return:
'''
objects = []
return objects
|
Python
| 0.000109
|
@@ -790,24 +790,37 @@
rialize(self
+, description
):%0A '
@@ -871,39 +871,135 @@
CSV
-.%0A%0A :return:%0A '''
+ according to the table description.%0A%0A :return:%0A '''%0A return %5Bgetattr(self, attr) for attr in description%5D
%0A%0A%0Ac
|
01050a3a4e2c091d7141499fb4f8ca9eae83d4cc
|
disable swap for cassandra nodes (http://www.datastax.com/documentation/cassandra/2.0/cassandra/install/installRecommendSettings.html)
|
salvus/scripts/first_boot.py
|
salvus/scripts/first_boot.py
|
#!/usr/bin/env python
# This script is run by /etc/rc.local when booting up. It does special configuration
# depending on what images are mounted, etc.
import os, socket, sys
hostname = socket.gethostname()
if hostname == "salvus-base":
# no special config -- this is our template machine
sys.exit(0)
# Enable swap
if not os.path.exists("/mnt/home/"):
os.system("swapon /dev/salvus-base/swap")
# Mount tmp
os.system("mount /dev/salvus-base/tmp /tmp; chmod +t /tmp; chmod a+rwx /tmp/")
if os.path.exists('/mnt/home/'):
# Delete secrets that aren't needed for the *compute machines* (only web machines)
os.system('rm -rf /home/salvus/salvus/salvus/data/secrets')
# Delete ssh private key not needed for the *compute machines*; not deleting this
# would be a security risk, since this key could provide access to a database node
# (say) to a user on the compute node who cracks the salvus account. As it is, there
# is nothing stored on a compute node that directly gives access to any other
# nodes. The one dangerous thing is the tinc vpn private key, which gets the
# machine on the VPN. However, even that is destroyed when the machine is restarted
# (at least at UW) and I think being on the vpn doesn't immediately provide a way
# to break in; it's just a step.
os.system('rm -rf /home/salvus/.ssh/id_rsa')
# Restore existing user accounts
if os.path.exists('/mnt/home/etc/'):
os.system("cp -rv /mnt/home/etc/* /etc/")
else:
os.system("mkdir -p /mnt/home/etc/")
# Store crontabs in persistent storage, so they don't vanish on VM restart
if False:
# disabled -- need to do something that takes into account how projects can move. Pretty tricky.
if not os.path.exists("/mnt/home/crontabs/"):
os.system("mkdir -p /mnt/home/crontabs/; chmod a+rx /mnt/home/; chgrp crontab /mnt/home/crontabs; chmod 1730 /mnt/home/crontabs")
os.system("cd /var/spool/cron/; rm -rf crontabs; ln -s /mnt/home/crontabs .")
# Setup /tmp so it is on the external disk image (has that quota) and is clean, since this is a fresh boot.
# os.system("rm -rf /mnt/home/tmp; mkdir -p /mnt/home/tmp/; chmod +t /mnt/home/tmp; mount -o bind /mnt/home/tmp /tmp; chmod a+rwx /mnt/home/tmp/")
# Scratch is persistent but not backed up.
#os.system("mkdir -p /mnt/home/scratch; mkdir -p /scratch; chmod +t /mnt/home/tmp; mount -o bind /mnt/home/scratch /scratch; chmod a+rwx /mnt/home/scratch/")
# Copy over newest version of sudo project creation script, and ensure permissions are right.
os.system("cp /home/salvus/salvus/salvus/scripts/create_project_user.py /usr/local/bin/; chmod og-w /usr/local/bin/create_project_user.py; chmod og+rx /usr/local/bin/create_project_user.py")
# Re-create the storage user
os.system("groupadd -g 999 -o storage")
os.system("useradd -u 999 -g 999 -o -d /home/storage storage")
os.system("chown -R storage. /home/storage")
os.system("chmod og-rwx -R /home/storage/&")
# Copy over newest version of storage management script to storage user.
os.system("cp /home/salvus/salvus/salvus/scripts/smc_storage.py /home/storage/; chown storage. /home/storage/smc_storage.py")
# Remove the temporary ZFS send/recv streams -- they can't possibly be valid since we're just booting up.
os.system("rm /home/storage/.storage*")
# Import the ZFS pool -- without mounting!
os.system("/home/salvus/salvus/salvus/scripts/mount_zfs_pools.py & ")
else:
# not a compute node, so no need for the storage account, which provides some ssh stuff we might not need...
os.system('rm -rf /home/storage/')
# Lock down some perms a little, just in case I were to mess up somehow at some point
os.system("chmod og-rwx -R /home/salvus/&")
# Configure the backup machine(s)
if hostname.startswith('backup'):
# create a /home/storage directory owned by salvus
os.system("mkdir -p /home/storage; chown -R salvus. /home/storage")
# delete the .ssh/authorized_keys file for the salvus user -- no passwordless login to backup vm's
os.system("rm /home/salvus/.ssh/authorized_keys")
# add lines to sudo control
os.system("echo 'salvus ALL=(ALL) NOPASSWD: /sbin/zfs *' >> /etc/sudoers.d/salvus ")
os.system("echo 'salvus ALL=(ALL) NOPASSWD: /sbin/zpool *' >> /etc/sudoers.d/salvus ")
os.system("chmod 0440 /etc/sudoers.d/salvus ")
# import the projects pool
os.system("/home/salvus/salvus/salvus/scripts/mount_zfs_pools.py & ")
if hostname.startswith('cassandra'):
# import and mount the relevant ZFS pool -- do this blocking, since once the machine is up we had better
# be able to start cassandra itself.
os.system("zpool import -f cassandra ")
if hostname.startswith('compute'):
# Create a firewall so that only the hub nodes can connect to things like ipython and the raw server.
os.system("/home/salvus/salvus/salvus/scripts/compute_firewall.sh")
|
Python
| 0
|
@@ -357,17 +357,186 @@
/home/%22)
-:
+ and not hostname.startswith('cassandra'): # no swap on cassandra -- http://www.datastax.com/documentation/cassandra/2.0/cassandra/install/installRecommendSettings.html
%0A os.
|
a537f049bfb61488a056333d362d9983e8e9f88d
|
Fix minor issues in 2020.10.1 file
|
2020/10/p1.py
|
2020/10/p1.py
|
# Python 3.8.3
def get_input():
with open('input.txt', 'r') as f:
return set(int(i) for i in f.read().split())
def main():
puzzle = get_input()
last_joltage = 0
one_jolt = 0
three_jolts = 1 # this is bad lmao
while len(puzzle) != 0:
if last_joltage + 1 in puzzle:
last_joltage = last_joltage + 1
one_jolt += 1
elif last_joltage + 2 in puzzle:
last_joltage = last_joltage + 2
elif last_joltage + 3 in puzzle:
last_joltage = last_joltage + 3
three_jolts += 1
puzzle.remove(last_joltage)
print(one_jolt, three_jolts)
return one_jolt * three_jolts
if __name__ == '__main__':
import time
start = time.perf_counter()
print(main())
print(time.perf_counter() - start)
|
Python
| 0
|
@@ -217,28 +217,8 @@
= 1
- # this is bad lmao
%0A
@@ -660,17 +660,16 @@
jolts%0A%0A%0A
-%0A
if __nam
|
43534d48a063693b4083af85ea5ad8c024491a8f
|
Fix decoder
|
scripts/decoder_functional.py
|
scripts/decoder_functional.py
|
import numpy as np
import matplotlib.pyplot as plt
import pll as PLL
j = (0 + 1j)
def apply_U(stream1, stream2, U, PLOT=True, title="Apply U.H"):
fixed1 = np.zeros(len(stream1), dtype=np.complex64)
fixed2 = np.zeros(len(stream2), dtype=np.complex64)
for idx, val1 in enumerate(stream1):
val2 = stream2[idx]
vec = np.matrix( [ [val1], [val2] ]) # 2x1 Data Vector
out = U.H * vec # Correct with U_Hermitian
fixed1[idx] = out[0]
fixed2[idx] = out[1]
if PLOT:
plt.subplot(2, 1, 1)
plt.plot(fixed1.real)
plt.plot(fixed1.imag)
plt.subplot(2, 1, 2)
plt.plot(fixed2.real)
plt.plot(fixed2.imag)
plt.title(title)
plt.show()
return fixed1, fixed2
def find_offsets_bpsk(data, PLOT=True, title="Offset BPSK"):
# Prep data
normalized = data/np.linalg.norm(data)
squared = np.square(normalized)
# Take FFT and find max
fft_data = np.fft.fft( squared )
fft_shape = np.fft.fftfreq( squared.shape[-1] )
max_idx = np.absolute(fft_data).argmax()
# Plot ?
if PLOT:
plt.plot(fft_shape, fft_data)
plt.title(title)
plt.show()
# Extract
frequency_offset = ( fft_shape[max_idx]/2 ) * (2 * np.pi)
phase_offset = np.sqrt( fft_data[max_idx] )
return frequency_offset, phase_offset
def apply_offsets(data, freq_offset, phase_offset, PLOT=True, title="Applied Offsets"):
fixed = np.zeros(len(data), dtype=np.complex64)
for idx, val in enumerate(data):
fixed[idx] = val * np.exp( -j * freq_offset * idx ) / phase_offset
if PLOT:
plt.plot(fixed.real)
plt.plot(fixed.imag)
plt.title(title)
plt.show()
return fixed
def index_of_first_data(data, PLOT=True, title="Find First Data"):
max_compare = np.max(data)
beginning = 0
for idx, val in enumerate(np.absolute(data)):
if np.absolute(val) >= max_compare/4:
beginning = idx
break
if PLOT:
plt.plot(data.real[beginning:])
plt.plot(data.imag[beginning:])
plt.show()
return beginning
def extract_binary(data, start, end, T, PLOT=True, title="Sample"):
binary = []
raw = []
offset = T//8
for val in data[(start+offset):(end+offset):T]:
if val > 0:
binary.append( 1 )
else:
binary.append( 0 )
raw.append(val)
indicies = range(start+offset, end+offset, T)
if PLOT:
plt.plot(data.real)
plt.plot(data.imag)
plt.plot(indicies, raw, '.', ms=10)
plt.show()
return np.array(binary)
def flip_data_if_needed( data ):
flip = sum(data[:8]) < 4
if flip:
data = 1 - data
return data
def compare_to_sent(received, sent):
print( len(received) )
print( len(sent) )
print(received)
print(sent)
num_errors = np.count_nonzero( received - sent )
print("Number of Errors: %d" % num_errors)
print("Percent Error: %.2f%%" % (num_errors/len(sent) * 100) )
if __name__ == "__main__":
# Load data
raw_data_1 = np.fromfile("../data/received_1_trimmed.bin", dtype=np.complex64)
raw_data_2 = np.fromfile("../data/received_2_trimmed.bin", dtype=np.complex64)
true_data_1 = np.load("../data/data_1.npy")
true_data_2 = np.load("../data/data_2.npy")
U = np.matrix(np.load("../data/U_approx.npy"))
E = np.load("../data/E_approx.npy")
# Apply U (technically U.H)
data_1, data_2 = apply_U(raw_data_1, raw_data_2, U, PLOT=False)
# Correct for frequency & phase offset
freq_off_1, phase_off_1 = find_offsets_bpsk(data_1, PLOT=True)
freq_off_2, phase_off_2 = find_offsets_bpsk(data_2, PLOT=True)
# Apply offsets
data_1 = apply_offsets(data_1, freq_off_1, phase_off_1, PLOT=False)
data_2 = apply_offsets(data_2, freq_off_2, phase_off_2, PLOT=False)
# Apply PLL
data_1 = data_1 / np.std(data_1)
data_2 = data_2 / np.std(data_2)
kp = 0.3
ki = 0.05
kd = 0.0
pll = PLL.PLL(data_1, kp, ki, kd)
pll.correct_phase_offset()
data_1 *= np.exp( -pll.phase_list * j ) * j
pll = PLL.PLL(data_2, kp, ki, kd)
pll.correct_phase_offset()
data_2 *= np.exp( -pll.phase_list * j ) * j
## Compare to actual values we sent
start1 = index_of_first_data(data_1, PLOT=False)
start2 = index_of_first_data(data_2, PLOT=False)
end1 = len(data_1) - index_of_first_data(data_1[::-1], PLOT=False)
end2 = len(data_2) - index_of_first_data(data_2[::-1], PLOT=False)
bin1 = extract_binary(data_1, start1, end1, 400, PLOT=True)
bin2 = extract_binary(data_2, start2, end2, 400, PLOT=True)
bin1 = flip_data_if_needed(bin1)
bin2 = flip_data_if_needed(bin2)
compare_to_sent(bin1, true_data_1)
compare_to_sent(bin2, true_data_2)
|
Python
| 0.000258
|
@@ -4573,24 +4573,93 @@
LOT=False)%0A%0A
+ start1, start2 = 0, 0%0A end1, end2 = len(data_1), len(data_2)%0A%0A
bin1 = e
|
b51e51dc8b1ce66815980bf2e8424f6fe282af66
|
test type declare with resolve
|
metaconfig/tests/test_simple.py
|
metaconfig/tests/test_simple.py
|
from nose.tools import *
from io import StringIO
from textwrap import dedent
from metaconfig import Config
def test_declare_empty():
source = """
--- !declare {}
...
"""
config = Config()
with StringIO(dedent(source)) as stream:
config.load(stream)
|
Python
| 0
|
@@ -279,8 +279,586 @@
stream)%0A
+%0Adef test_declare_resolve():%0A%0A source = %22%22%22%0A --- !declare%0A type:%0A type: !resolve builtins.type%0A load: !resolve metaconfig.construct_from_sequence%0A ...%0A%0A --- !let%0A integer: !type %0A - 0%0A string: !type %0A - %22%22%0A float: !type %0A - 1.0%0A %22null%22: !type%0A - ~%0A ...%0A %22%22%22%0A%0A config = Config()%0A%0A with StringIO(dedent(source)) as stream:%0A config.load(stream)%0A%0A eq_(int, config.get(%22integer%22))%0A eq_(str, config.get(%22string%22))%0A eq_(float, config.get(%22float%22))%0A eq_(type(None), config.get(%22null%22))%0A
|
52c6b6013c7904c5fcc142e7c69f9c53dd0815c3
|
Fix broken foreign key in dashboard schema
|
lib/pegasus/python/Pegasus/netlogger/analysis/schema/stampede_dashboard_schema.py
|
lib/pegasus/python/Pegasus/netlogger/analysis/schema/stampede_dashboard_schema.py
|
"""
Contains the code to create and map objects to the Stampede DB schema
via a SQLAlchemy interface.
"""
__author__ = "Monte Goode"
__author__ = "Karan Vahi"
import time
import warnings
import logging
from sqlalchemy import *
from sqlalchemy import orm, exc
from Pegasus.netlogger.analysis.schema._base import SABase
log = logging.getLogger(__name__)
CURRENT_SCHEMA_VERSION = 4.0
# Empty classes that will be populated and mapped
# to tables via the SQLAlch mapper.
class DashboardWorkflow(SABase):
pass
class DashboardWorkflowstate(SABase):
pass
from Pegasus.service.catalogs import ReplicaCatalog, RC_FORMATS
from Pegasus.service.catalogs import SiteCatalog, SC_FORMATS
from Pegasus.service.catalogs import TransformationCatalog, TC_FORMATS
def initializeToDashboardDB(db, metadata, kw={}):
"""
Function to create the Workflow schema that just tracks the root
level workflows, if it does not exist,
if it does exist, then connect and set up object mappings.
@type db: SQLAlch db/engine object.
@param db: Engine object to initialize.
@type metadata: SQLAlch metadata object.
@param metadata: Associated metadata object to initialize.
@type kw: dict
@param kw: Keywords to pass to Table() functions
"""
KeyInt = Integer
# MySQL likes using BIGINT for PKs but some other
# DB don't like it so swap as needed.
if db.name == 'mysql':
KeyInt = BigInteger
kw['mysql_charset'] = 'latin1'
if db.name == 'sqlite':
warnings.filterwarnings('ignore', '.*does \*not\* support Decimal*.')
# pg_workflow definition
# ==> Information comes from braindump.txt file
# wf_uuid = autogenerated by database wfuuid, submitted, directory, database connection
# dax_label = label
# timestamp = pegasus_wf_time
# submit_hostname = (currently missing)
# submit_dir = run
#
pg_workflow = Table('master_workflow', metadata,
Column('wf_id', KeyInt, primary_key=True, nullable=False),
Column('wf_uuid', VARCHAR(255), nullable=False),
Column('dax_label', VARCHAR(255), nullable=True),
Column('dax_version', VARCHAR(255), nullable=True),
Column('dax_file', VARCHAR(255), nullable=True),
Column('dag_file_name', VARCHAR(255), nullable=True),
Column('timestamp', NUMERIC(precision=16,scale=6), nullable=True),
Column('submit_hostname', VARCHAR(255), nullable=True),
Column('submit_dir', TEXT, nullable=True),
Column('planner_arguments', TEXT, nullable=True),
Column('user', VARCHAR(255), nullable=True),
Column('grid_dn', VARCHAR(255), nullable=True),
Column('planner_version', VARCHAR(255), nullable=True),
Column('db_url', TEXT, nullable=True),
**kw
)
Index('KEY_MASTER_WF_ID', pg_workflow.c.wf_id, unique=True)
Index('UNIQUE_MASTER_WF_UUID', pg_workflow.c.wf_uuid, unique=True)
try:
orm.mapper(DashboardWorkflow, pg_workflow )
except exc.ArgumentError, e:
log.warning(e)
pg_workflowstate = Table('master_workflowstate', metadata,
# All three columns are marked as primary key to produce the desired
# effect - ie: it is the combo of the three columns that make a row
# unique.
Column('wf_id', KeyInt, ForeignKey('workflow.wf_id', ondelete='CASCADE'),
nullable=False, primary_key=True),
Column('state', Enum('WORKFLOW_STARTED', 'WORKFLOW_TERMINATED'),
nullable=False, primary_key=True),
Column('timestamp', NUMERIC(precision=16,scale=6), nullable=False, primary_key=True,
default=time.time()),
Column('restart_count', INT, nullable=False),
Column('status', INT, nullable=True),
**kw
)
Index('UNIQUE_MASTER_WORKFLOWSTATE',
pg_workflowstate.c.wf_id,
pg_workflowstate.c.state,
pg_workflowstate.c.timestamp, unique=True)
try:
orm.mapper(DashboardWorkflowstate, pg_workflowstate)
except exc.ArgumentError, e:
log.warning(e)
pg_replica_catalog = Table("replica_catalog", metadata,
Column('id', Integer, primary_key=True),
Column('name', String(100), nullable=False),
Column('format', Enum(*RC_FORMATS), nullable=False),
Column('created', DateTime, nullable=False),
Column('updated', DateTime, nullable=False),
Column('username', String(100), nullable=False),
mysql_engine = 'InnoDB',
**kw
)
Index('UNIQUE_REPLICA_CATALOG',
pg_replica_catalog.c.username,
pg_replica_catalog.c.name)
try:
orm.mapper(ReplicaCatalog, pg_replica_catalog)
except exc.ArgumentError, e:
log.warning(e)
pg_site_catalog = Table('site_catalog', metadata,
Column('id', Integer, primary_key=True),
Column('name', String(100), nullable=False),
Column('format', Enum(*SC_FORMATS), nullable=False),
Column('created', DateTime, nullable=False),
Column('updated', DateTime, nullable=False),
Column('username', String(100), nullable=False),
mysql_engine = 'InnoDB',
**kw
)
Index('UNIQUE_SITE_CATALOG',
pg_site_catalog.c.username,
pg_site_catalog.c.name)
try:
orm.mapper(SiteCatalog, pg_site_catalog)
except exc.ArgumentError, e:
log.warning(e)
pg_transformation_catalog = Table('transformation_catalog', metadata,
Column('id', Integer, primary_key=True),
Column('name', String(100), nullable=False),
Column('format', Enum(*TC_FORMATS), nullable=False),
Column('created', DateTime, nullable=False),
Column('updated', DateTime, nullable=False),
Column('username', String(100), nullable=False),
mysql_engine = 'InnoDB',
**kw
)
Index('UNIQUE_TRANSFORMATION_CATALOG',
pg_transformation_catalog.c.username,
pg_transformation_catalog.c.name)
try:
orm.mapper(TransformationCatalog, pg_transformation_catalog)
except exc.ArgumentError, e:
log.warning(e)
metadata.create_all(db)
|
Python
| 0.000029
|
@@ -3596,16 +3596,23 @@
ignKey('
+master_
workflow
|
ac0856d79fc948733e80347a6de09ed0ed26802d
|
Fix pylint error.
|
app/handlers/tests/test_batch_handler.py
|
app/handlers/tests/test_batch_handler.py
|
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Test module for the BatchHandler handler."""
import concurrent.futures
import json
import mock
import mongomock
import tornado
import tornado.testing
import handlers.app
import urls
# Default Content-Type header returned by Tornado.
DEFAULT_CONTENT_TYPE = 'application/json; charset=UTF-8'
class TestBatchHandler(
tornado.testing.AsyncHTTPTestCase, tornado.testing.LogTrapTestCase):
def setUp(self):
self.mongodb_client = mongomock.Connection()
super(TestBatchHandler, self).setUp()
patched_find_token = mock.patch("handlers.base.BaseHandler._find_token")
self.find_token = patched_find_token.start()
self.find_token.return_value = "token"
patched_validate_token = mock.patch("handlers.common.validate_token")
self.validate_token = patched_validate_token.start()
self.validate_token.return_value = True
self.addCleanup(patched_find_token.stop)
self.addCleanup(patched_validate_token.stop)
def get_app(self):
dboptions = {
'dbpassword': "",
'dbuser': ""
}
settings = {
'dboptions': dboptions,
'client': self.mongodb_client,
'executor': concurrent.futures.ThreadPoolExecutor(max_workers=2),
'default_handler_class': handlers.app.AppHandler,
'debug': False
}
return tornado.web.Application([urls._BATCH_URL], **settings)
def get_new_ioloop(self):
return tornado.ioloop.IOLoop.instance()
def test_delete_no_token(self):
response = self.fetch('/batch', method='DELETE')
self.assertEqual(response.code, 501)
self.assertEqual(
response.headers['Content-Type'], DEFAULT_CONTENT_TYPE)
def test_delete_with_token(self):
headers = {'Authorization': 'foo'}
response = self.fetch(
'/batch', method='DELETE', headers=headers,
)
self.assertEqual(response.code, 501)
self.assertEqual(
response.headers['Content-Type'], DEFAULT_CONTENT_TYPE)
def test_get_no_token(self):
response = self.fetch('/batch', method='GET')
self.assertEqual(response.code, 501)
self.assertEqual(
response.headers['Content-Type'], DEFAULT_CONTENT_TYPE)
def test_get_with_token(self):
headers = {'Authorization': 'foo'}
response = self.fetch(
'/batch', method='GET', headers=headers,
)
self.assertEqual(response.code, 501)
self.assertEqual(
response.headers['Content-Type'], DEFAULT_CONTENT_TYPE)
def test_post_without_token(self):
self.find_token.return_value = None
batch_dict = {
"batch": [
{"method": "GET", "collection": "count", "operation_id": "foo"}
]
}
body = json.dumps(batch_dict)
response = self.fetch('/batch', method='POST', body=body)
self.assertEqual(response.code, 403)
self.assertEqual(
response.headers['Content-Type'], DEFAULT_CONTENT_TYPE)
def test_post_not_json_content(self):
headers = {'Authorization': 'foo', 'Content-Type': 'application/json'}
response = self.fetch(
'/batch', method='POST', body='', headers=headers
)
self.assertEqual(response.code, 422)
self.assertEqual(
response.headers['Content-Type'], DEFAULT_CONTENT_TYPE)
def test_post_wrong_content_type(self):
headers = {'Authorization': 'foo'}
response = self.fetch(
'/batch', method='POST', body='', headers=headers
)
self.assertEqual(response.code, 415)
self.assertEqual(
response.headers['Content-Type'], DEFAULT_CONTENT_TYPE)
def test_post_wrong_json(self):
headers = {'Authorization': 'foo', 'Content-Type': 'application/json'}
body = json.dumps(dict(foo='foo', bar='bar'))
response = self.fetch(
'/batch', method='POST', body=body, headers=headers
)
self.assertEqual(response.code, 400)
self.assertEqual(
response.headers['Content-Type'], DEFAULT_CONTENT_TYPE)
@mock.patch("taskqueue.tasks.run_batch_group")
def test_post_correct(self, mocked_run_batch):
headers = {'Authorization': 'foo', 'Content-Type': 'application/json'}
batch_dict = {
"batch": [
{"method": "GET", "collection": "count", "operation_id": "foo"}
]
}
body = json.dumps(batch_dict)
mocked_run_batch.return_value = {}
response = self.fetch(
'/batch', method='POST', body=body, headers=headers
)
self.assertEqual(response.code, 200)
self.assertEqual(
response.headers['Content-Type'], DEFAULT_CONTENT_TYPE)
mocked_run_batch.assert_called_once_with(
[
{
'operation_id': 'foo',
'method': 'GET',
'collection': 'count'
}
],
{
'dbuser': '',
'dbpassword': ''
}
)
|
Python
| 0.000001
|
@@ -1202,32 +1202,45 @@
en = mock.patch(
+%0A
%22handlers.base.B
|
8c2ac2e2af173c071f895879f078fa8cf4c39173
|
Change the docstring indentation
|
bears/python/PEP8Bear.py
|
bears/python/PEP8Bear.py
|
import autopep8
import sys
from coalib.bearlib import deprecate_settings
from coalib.bearlib.spacing.SpacingHelper import SpacingHelper
from coalib.bears.LocalBear import LocalBear
from dependency_management.requirements.PipRequirement import PipRequirement
from coalib.results.Diff import Diff
from coalib.results.Result import Result
from coalib.settings.Setting import typed_list
class PEP8Bear(LocalBear):
LANGUAGES = {'Python', 'Python 2', 'Python 3'}
REQUIREMENTS = {PipRequirement('autopep8', '1.2')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_FIX = {'Formatting'}
ASCIINEMA_URL = 'https://asciinema.org/a/165394'
@deprecate_settings(indent_size='tab_width')
def run(self, filename, file,
max_line_length: int = 79,
indent_size: int = SpacingHelper.DEFAULT_TAB_WIDTH,
pep_ignore: typed_list(str) = (),
pep_select: typed_list(str) = (),
local_pep8_config: bool = False,
):
"""
Detects and fixes PEP8 incompliant code. This bear will not change
functionality of the code in any way.
:param max_line_length: Maximum number of characters for a line.
When set to 0 allows infinite line length.
:param indent_size: Number of spaces per indentation level.
:param pep_ignore: A list of errors/warnings to ignore.
:param pep_select: A list of errors/warnings to exclusively
apply.
:param local_pep8_config: Set to true if autopep8 should use a config
file as if run normally from this directory.
"""
if not max_line_length:
max_line_length = sys.maxsize
options = {'ignore': pep_ignore,
'select': pep_select,
'max_line_length': max_line_length,
'indent_size': indent_size}
corrected = autopep8.fix_code(''.join(file),
apply_config=local_pep8_config,
options=options).splitlines(True)
diffs = Diff.from_string_arrays(file, corrected).split_diff()
for diff in diffs:
yield Result(self,
'The code does not comply to PEP8.',
affected_code=(diff.range(filename),),
diffs={filename: diff})
|
Python
| 0
|
@@ -1216,16 +1216,26 @@
_length:
+%0A
Maxim
@@ -1270,38 +1270,16 @@
a line.%0A
-
@@ -1352,23 +1352,29 @@
nt_size:
+%0A
+
Number o
@@ -1431,24 +1431,26 @@
_ignore:
+%0A
A list o
@@ -1433,32 +1433,35 @@
gnore:%0A
+
A list of errors
@@ -1507,24 +1507,25 @@
_select:
+%0A
A list o
@@ -1508,32 +1508,36 @@
select:%0A
+
A list of errors
@@ -1564,42 +1564,8 @@
vely
-%0A
app
@@ -1601,16 +1601,28 @@
_config:
+%0A
Set to
@@ -1661,42 +1661,8 @@
nfig
-%0A
fil
@@ -1681,16 +1681,28 @@
normally
+%0A
from th
|
60b4fc88617f800208f00f24468db6798369fe2e
|
Add user-supplied arguments in log_handler
|
neutron/openstack/common/log_handler.py
|
neutron/openstack/common/log_handler.py
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from oslo.config import cfg
from neutron.openstack.common import notifier
class PublishErrorsHandler(logging.Handler):
def emit(self, record):
if ('neutron.openstack.common.notifier.log_notifier' in
cfg.CONF.notification_driver):
return
notifier.api.notify(None, 'error.publisher',
'error_notification',
notifier.api.ERROR,
dict(error=record.msg))
|
Python
| 0.002685
|
@@ -594,16 +594,17 @@
icense.%0A
+%0A
import l
@@ -1092,10 +1092,19 @@
ord.
-msg
+getMessage()
))%0A
|
f83a4b400a65b3cb63a4ca946103f25816909b62
|
Remove accidental hard-coding of Firefox binary path in tests
|
conftest.py
|
conftest.py
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import socket
import subprocess
import time
import urllib
import pytest
from selenium import webdriver
from selenium.webdriver import DesiredCapabilities
drivers = (
'BlackBerry',
'Chrome',
'Edge',
'Firefox',
'Ie',
'Marionette',
'PhantomJS',
'Remote',
'Safari',
)
def pytest_addoption(parser):
parser.addoption(
'--driver',
action='append',
choices=drivers,
dest='drivers',
metavar='DRIVER',
help='driver to run tests against ({0})'.format(', '.join(drivers)))
def pytest_ignore_collect(path, config):
_drivers = set(drivers).difference(config.getoption('drivers') or drivers)
return len([d for d in _drivers if d.lower() in str(path)]) > 0
@pytest.fixture(scope='function')
def driver(request):
kwargs = {}
try:
driver_class = request.param
except AttributeError:
raise Exception('This test requires a --driver to be specified.')
skip = request.node.get_marker('ignore_{0}'.format(driver_class.lower()))
if skip is not None:
reason = skip.kwargs.get('reason') or skip.name
pytest.skip(reason)
if driver_class == 'BlackBerry':
kwargs.update({'device_password': 'password'})
if driver_class == 'Firefox':
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
binary = FirefoxBinary('/Applications/Firefox 47.app/Contents/MacOS/firefox-bin')
kwargs.update({'capabilities': {
'marionette': False,
'binary': binary}})
if driver_class == 'Marionette':
driver_class = 'Firefox'
kwargs.update({'capabilities': {'marionette': True}})
if driver_class == 'Remote':
capabilities = DesiredCapabilities.FIREFOX.copy()
kwargs.update({'desired_capabilities': capabilities})
driver = getattr(webdriver, driver_class)(**kwargs)
yield driver
driver.quit()
@pytest.fixture(autouse=True, scope='session')
def server(request):
if 'Remote' not in request.config.getoption('drivers'):
yield None
return
_host = 'localhost'
_port = 4444
_path = 'buck-out/gen/java/server/src/org/openqa/grid/selenium/selenium.jar'
def wait_for_server(url, timeout):
start = time.time()
while time.time() - start < timeout:
try:
urllib.urlopen(url)
return 1
except IOError:
time.sleep(0.2)
return 0
_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
url = 'http://{}:{}/wd/hub'.format(_host, _port)
try:
_socket.connect((_host, _port))
print('The remote driver server is already running or something else'
'is using port {}, continuing...'.format(_port))
except Exception:
print('Starting the Selenium server')
process = subprocess.Popen(['java', '-jar', _path])
print('Selenium server running as process: {}'.format(process.pid))
assert wait_for_server(url, 10), 'Timed out waiting for Selenium server at {}'.format(url)
print('Selenium server is ready')
yield process
process.terminate()
process.wait()
print('Selenium server has been terminated')
|
Python
| 0.000011
|
@@ -2065,174 +2065,8 @@
x':%0A
- from selenium.webdriver.firefox.firefox_binary import FirefoxBinary%0A binary = FirefoxBinary('/Applications/Firefox 47.app/Contents/MacOS/firefox-bin')%0A
@@ -2101,29 +2101,16 @@
ties': %7B
-%0A
'marione
@@ -2124,38 +2124,8 @@
alse
-,%0A 'binary': binary
%7D%7D)%0A
|
3202a561b79cf0ef30643671a9edd91ad67b999a
|
Fix wrong type hint in Schema constructor.
|
rororo/schemas/schema.py
|
rororo/schemas/schema.py
|
"""
=====================
rororo.schemas.schema
=====================
Implement class for validating request and response data against JSON Schema.
"""
import logging
import types
from typing import Any, Callable, Optional, Type # noqa: F401
try:
from multidict import MultiDict, MultiDictProxy
except ImportError: # pragma: no cover
MultiDict, MultiDictProxy = None, None
from jsonschema.exceptions import ValidationError
from .exceptions import Error
from .utils import AnyMapping, defaults, validate_func_factory, ValidateFunc
from .validators import DefaultValidator
__all__ = ('Schema', )
logger = logging.getLogger(__name__)
class Schema(object):
"""Validate request and response data against JSON Schema."""
__slots__ = (
'error_class', 'module', 'response_factory', 'validate_func',
'validation_error_class', 'validator_class', '_valid_request',
)
def __init__(self,
module: types.ModuleType,
*,
response_factory: Callable[..., Any]=None,
error_class: Type[Exception]=Error,
validator_class: Any=DefaultValidator,
validation_error_class: Type[Exception]=ValidationError,
validate_func: ValidateFunc=None) -> None:
"""Initialize Schema object.
:param module: Module contains at least request and response schemas.
:param response_factory: Put valid response data to this factory func.
:param error_class:
Wrap all errors in given class. If empty real errors would be
reraised.
:param validator_class:
Validator class to use for validating request and response data.
By default: ``rororo.schemas.validators.DefaultValidator``
:param validation_error_class:
Error class to be expected in case of validation error. By default:
``jsonschema.exceptions.ValidationError``
:param validate_func:
Validate function to be called for validating request and response
data. Function will receive 2 args: ``schema`` and ``pure_data``.
By default: ``None``
"""
self._valid_request = None # type: Optional[bool]
self.module = module
self.response_factory = response_factory
self.error_class = error_class
self.validator_class = validator_class
self.validate_func = (
validate_func or
validate_func_factory(validator_class))
self.validation_error_class = validation_error_class
def make_error(self,
message: str,
*,
error: Exception=None,
# ``error_class: Type[Exception]=None`` doesn't work on
# Python 3.5.2, but that is exact version ran by Read the
# Docs :( More info: http://stackoverflow.com/q/42942867
error_class: Any=None) -> Exception:
"""Return error instantiated from given message.
:param message: Message to wrap.
:param error: Validation error.
:param error_class:
Special class to wrap error message into. When omitted
``self.error_class`` will be used.
"""
return (error_class or self.error_class)(message)
def make_response(self,
data: AnyMapping=None,
**kwargs: Any) -> AnyMapping:
r"""Validate response data and wrap it inside response factory.
:param data: Response data. Could be ommited.
:param \*\*kwargs: Keyword arguments to be passed to response factory.
"""
if not self._valid_request:
logger.error('Request not validated, cannot make response')
raise self.make_error('Request not validated before, cannot make '
'response')
if data is None and self.response_factory is None:
logger.error('Response data omit, but no response factory is used')
raise self.make_error('Response data could be omitted only when '
'response factory is used')
response_schema = getattr(self.module, 'response', None)
if response_schema is not None:
self._validate(data, response_schema) # type: ignore
if self.response_factory is not None:
return self.response_factory(
*([data] if data is not None else []),
**kwargs)
return data or {}
def validate_request(self,
data: AnyMapping,
*additional: AnyMapping,
merged_class: Type[dict]=dict) -> AnyMapping:
r"""Validate request data against request schema from module.
:param data: Request data.
:param \*additional:
Additional data dicts to be merged with base request data.
:param merged_class:
When additional data dicts supplied method by default will return
merged **dict** with all data, but you can customize things to
use read-only dict or any other additional class or callable.
"""
request_schema = getattr(self.module, 'request', None)
if request_schema is None:
logger.error(
'Request schema should be defined',
extra={'schema_module': self.module,
'schema_module_attrs': dir(self.module)})
raise self.make_error('Request schema should be defined')
# Merge base and additional data dicts, but only if additional data
# dicts have been supplied
if additional:
data = merged_class(self._merge_data(data, *additional))
try:
self._validate(data, request_schema)
finally:
self._valid_request = False
self._valid_request = True
processor = getattr(self.module, 'request_processor', None)
return processor(data) if processor else data
def _merge_data(self, data: AnyMapping, *additional: AnyMapping) -> dict:
r"""Merge base data and additional dicts.
:param data: Base data.
:param \*additional: Additional data dicts to be merged into base dict.
"""
return defaults(
dict(data) if not isinstance(data, dict) else data,
*(dict(item) for item in additional))
def _pure_data(self, data: AnyMapping) -> dict:
"""
Convert mapping to pure dict instance to be compatible and possible to
pass to default ``jsonschema.validate`` func.
:param data: Data mapping.
"""
return dict(data) if not isinstance(data, dict) else data
def _validate(self, data: AnyMapping, schema: AnyMapping) -> AnyMapping:
"""Validate data against given schema.
:param data: Data to validate.
:param schema: Schema to use for validation.
"""
try:
return self.validate_func(schema, self._pure_data(data))
except self.validation_error_class as err:
logger.error(
'Schema validation error',
exc_info=True,
extra={'schema': schema, 'schema_module': self.module})
if self.error_class is None:
raise
raise self.make_error('Validation Error', error=err) from err
|
Python
| 0
|
@@ -1083,29 +1083,16 @@
ss:
-Type%5BException%5D=Error
+Any=None
,%0A
@@ -3293,48 +3293,132 @@
-return (error_class or self.error_class)
+if error_class is None:%0A error_class = self.error_class if self.error_class else Error%0A return error_class
(mes
|
987a74ab34da1058e8abca2afb295fd50408401d
|
Remove unused local variable `ret`. It is always initialized in the inner `if` statements, so no need to declared in this outer scope
|
appdaemon/plugins/dummy/dummyplugin.py
|
appdaemon/plugins/dummy/dummyplugin.py
|
import yaml
import asyncio
import copy
from appdaemon.appdaemon import AppDaemon
from appdaemon.plugin_management import PluginBase
class DummyPlugin(PluginBase):
def __init__(self, ad: AppDaemon, name, args):
super().__init__(ad, name, args)
self.AD = ad
self.stopping = False
self.config = args
self.name = name
self.logger.info("Dummy Plugin Initializing", "DUMMY")
self.name = name
if "namespace" in args:
self.namespace = args["namespace"]
else:
self.namespace = "default"
with open(args["configuration"], 'r') as yamlfd:
config_file_contents = yamlfd.read()
try:
self.config = yaml.load(config_file_contents, Loader=yaml.SafeLoader)
except yaml.YAMLError as exc:
self.logger.warning("Error loading configuration")
if hasattr(exc, 'problem_mark'):
if exc.context is not None:
self.logger.warning("parser says")
self.logger.warning(str(exc.problem_mark))
self.logger.warning(str(exc.problem) + " " + str(exc.context))
else:
self.logger.warning("parser says")
self.logger.warning(str(exc.problem_mark))
self.logger.warning(str(exc.problem))
self.state = self.config["initial_state"]
self.current_event = 0
self.logger.info("Dummy Plugin initialization complete")
def stop(self):
self.logger.debug("stop() called for %s", self.name)
self.stopping = True
#
# Get initial state
#
async def get_complete_state(self):
self.logger.debug("*** Sending Complete State: {} ***".format(self.state))
return copy.deepcopy(self.state)
async def get_metadata(self):
return {
"latitude": 41,
"longitude": -73,
"elevation": 0,
"time_zone": "America/New_York"
}
#
# Utility gets called every second (or longer if configured
# Allows plugin to do any housekeeping required
#
def utility(self):
pass
#self.logger.debug("*** Utility ***".format(self.state))
#
# Handle state updates
#
async def get_updates(self):
await self.AD.plugins.notify_plugin_started(self.name, self.namespace, self.get_metadata(), self.get_complete_state(), True)
while not self.stopping:
ret = None
if self.current_event >= len(self.config["sequence"]["events"]) and ("loop" in self.config["sequence"] and self.config["loop"] == 0 or "loop" not in self.config["sequence"]):
while not self.stopping:
await asyncio.sleep(1)
return None
else:
event = self.config["sequence"]["events"][self.current_event]
await asyncio.sleep(event["offset"])
if "state" in event:
entity = event["state"]["entity"]
old_state = self.state[entity]
new_state = event["state"]["newstate"]
self.state[entity] = new_state
ret = \
{
"event_type": "state_changed",
"data":
{
"entity_id": entity,
"new_state": new_state,
"old_state": old_state
}
}
self.logger.debug("*** State Update: %s ***", ret)
await self.AD.state.process_event(self.namespace, copy.deepcopy(ret))
elif "event" in event:
ret = \
{
"event_type": event["event"]["event_type"],
"data": event["event"]["data"],
}
self.logger.debug("*** Event: %s ***", ret)
await self.AD.state.process_event(self.namespace, copy.deepcopy(ret))
elif "disconnect" in event:
self.logger.debug("*** Disconnected ***")
self.AD.plugins.notify_plugin_stopped(self.namespace)
elif "connect" in event:
self.logger.debug("*** Connected ***")
await self.AD.plugins.notify_plugin_started(self.namespace)
self.current_event += 1
if self.current_event >= len(self.config["sequence"]["events"]) and "loop" in self.config["sequence"] and self.config["sequence"]["loop"] == 1:
self.current_event = 0
#
# Set State
#
def set_plugin_state(self, entity, state, **kwargs):
self.logger.debug("*** Setting State: %s = %s ***", entity, state)
self.state[entity] = state
def get_namespace(self):
return self.namespace
|
Python
| 0.000001
|
@@ -2497,30 +2497,8 @@
ng:%0A
- ret = None
%0A
|
6dade12592e4ef9e5bfd236846882f1856789476
|
Fix throwing exceptions.
|
routeros_api/base_api.py
|
routeros_api/base_api.py
|
import socket
from routeros_api import exceptions
LENGTH_MATRIX = [
(0x80, 0x0),
(0x40, 0x80),
(0x20, 0xC0),
(0x10, 0xE0),
(0x1, 0xF0),
]
OVER_MAX_LENGTH_MASK = 0xF8
class Connection(object):
def __init__(self, socket):
self.socket = socket
def send_sentence(self, words):
try:
for word in words + [b'']:
full_word = encode_length(len(word)) + word
self.socket.sendall(full_word)
except socket.error as e:
exceptions.RouterOsApiConnectionError(str(e))
def receive_sentence(self):
try:
return list(iter(self.receive_word, b''))
except socket.error as e:
exceptions.RouterOsApiConnectionError(str(e))
def receive_word(self):
result = []
result_length = 0
expected_length = decode_length(self.socket.recv)
while result_length != expected_length:
received = self.socket.recv(expected_length - result_length)
result.append(received)
result_length += len(received)
assert received
assert result_length <= expected_length
return b''.join(result)
def encode_length(length):
data, number_of_bytes = _encode_length(length)
return to_bytes(data, number_of_bytes)
def _encode_length(length):
if length < 0:
raise exceptions.FatalRouterOsApiError("Negative length.")
for bytes, (max_value, mask) in enumerate(LENGTH_MATRIX):
offset = 8 * bytes
if length < (max_value << offset):
return length | (mask << offset), bytes + 1
raise exceptions.FatalRouterOsApiError("String to long.")
def to_bytes(number, length):
if hasattr(number, 'to_bytes'):
return number.to_bytes(length, 'big')
else:
result = []
for byte in reversed(range(length)):
result.append(chr((number >> (8 * byte)) & 0xff))
return b''.join(result)
def decode_length(read):
first = ord(read(1))
masks = tuple(zip(*LENGTH_MATRIX))[1]
mask_with_next = zip(masks, masks[1:] + (OVER_MAX_LENGTH_MASK,))
for bytes, (mask, next_mask) in enumerate(mask_with_next):
if next_mask & first == mask:
result = first & ~next_mask
break
else:
raise exceptions.FatalRouterOsApiError("Malformed length")
for _ in range(bytes):
result <<= 8
result += ord(read(1))
return result
|
Python
| 0
|
@@ -506,32 +506,38 @@
e:%0A
+raise
exceptions.Route
@@ -708,24 +708,30 @@
+raise
exceptions.R
|
5e25e5c948242c5cbfa3e0bf1ba484ad11c70409
|
Change qtile-top to use new command graph
|
libqtile/scripts/qtile_top.py
|
libqtile/scripts/qtile_top.py
|
# Copyright (c) 2015, Roger Duran
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Command-line top like for qtile
"""
import os
import time
import argparse
import curses
import linecache
import tracemalloc
from tracemalloc import Snapshot
from libqtile import command
class TraceNotStarted(Exception):
pass
class TraceCantStart(Exception):
pass
def parse_args():
parser = argparse.ArgumentParser(description="Top like for qtile")
parser.add_argument('-l', '--lines', type=int, dest="lines", default=10,
help='Number of lines.')
parser.add_argument('-r', '--raw', dest="raw", action="store_true",
default=False, help='Output raw without curses')
parser.add_argument('-t', '--time', type=float, dest="seconds",
default=1.5, help='Number of seconds to refresh')
parser.add_argument('--force-start', dest="force_start",
action="store_true", default=False,
help='Force start tracemalloc on qtile')
parser.add_argument('-s', '--socket', type=str, dest="socket",
help='Use specified communication socket.')
opts = parser.parse_args()
return opts
def get_trace(client, force_start):
(started, path) = client.tracemalloc_dump()
if force_start and not started:
client.tracemalloc_toggle()
(started, path) = client.tracemalloc_dump()
if not started:
raise TraceCantStart
elif not started:
raise TraceNotStarted
return Snapshot.load(path)
def filter_snapshot(snapshot):
return snapshot.filter_traces((
tracemalloc.Filter(False, "<frozen importlib._bootstrap>"),
tracemalloc.Filter(False, "<unknown>"),
))
def get_stats(scr, client, group_by='lineno', limit=10, seconds=1.5,
force_start=False):
(max_y, max_x) = scr.getmaxyx()
curses.init_pair(1, curses.COLOR_GREEN, curses.COLOR_BLACK)
while True:
scr.addstr(0, 0, "Qtile - Top {} lines".format(limit))
scr.addstr(1, 0, '{0:<3s} {1:<40s} {2:<30s} {3:<16s}'.format('#', 'Line', 'Memory', ' ' * (max_x - 71)),
curses.A_BOLD | curses.A_REVERSE)
snapshot = get_trace(client, force_start)
snapshot = filter_snapshot(snapshot)
top_stats = snapshot.statistics(group_by)
cnt = 1
for index, stat in enumerate(top_stats[:limit], 1):
frame = stat.traceback[0]
# replace "/path/to/module/file.py" with "module/file.py"
filename = os.sep.join(frame.filename.split(os.sep)[-2:])
code = ""
line = linecache.getline(frame.filename, frame.lineno).strip()
if line:
code = line
mem = "{:.1f} KiB".format(stat.size / 1024.0)
filename = "{}:{}".format(filename, frame.lineno)
scr.addstr(cnt + 1, 0, '{:<3} {:<40} {:<30}'.format(index, filename, mem))
scr.addstr(cnt + 2, 4, code, curses.color_pair(1))
cnt += 2
other = top_stats[limit:]
cnt += 2
if other:
size = sum(stat.size for stat in other)
other_size = ("{:d} other: {:.1f} KiB".format(len(other), size / 1024.0))
scr.addstr(cnt, 0, other_size, curses.A_BOLD)
cnt += 1
total = sum(stat.size for stat in top_stats)
total_size = "Total allocated size: {0:.1f} KiB".format(total / 1024.0)
scr.addstr(cnt, 0, total_size, curses.A_BOLD)
scr.move(max_y - 2, max_y - 2)
scr.refresh()
time.sleep(seconds)
scr.erase()
def raw_stats(client, group_by='lineno', limit=10, force_start=False):
snapshot = get_trace(client, force_start)
snapshot = filter_snapshot(snapshot)
top_stats = snapshot.statistics(group_by)
print("Qtile - Top {} lines".format(limit))
for index, stat in enumerate(top_stats[:limit], 1):
frame = stat.traceback[0]
# replace "/path/to/module/file.py" with "module/file.py"
filename = os.sep.join(frame.filename.split(os.sep)[-2:])
print("#{}: {}:{}: {:.1f} KiB"
.format(index, filename, frame.lineno, stat.size / 1024.0))
line = linecache.getline(frame.filename, frame.lineno).strip()
if line:
print(' {}'.format(line))
other = top_stats[limit:]
if other:
size = sum(stat.size for stat in other)
print("{:d} other: {:.1f} KiB".format(len(other), size / 1024.0))
total = sum(stat.size for stat in top_stats)
print("Total allocated size: {0:.1f} KiB".format(total / 1024.0))
def main():
opts = parse_args()
lines = opts.lines
seconds = opts.seconds
force_start = opts.force_start
client = command.Client(opts.socket)
try:
if not opts.raw:
curses.wrapper(get_stats, client, limit=lines, seconds=seconds,
force_start=force_start)
else:
raw_stats(client, limit=lines, force_start=force_start)
except TraceNotStarted:
print("tracemalloc not started on qtile, start by setting "
"PYTHONTRACEMALLOC=1 before starting qtile")
print("or force start tracemalloc now, but you'll lose early traces")
exit(1)
except TraceCantStart:
print("Can't start tracemalloc on qtile, check the logs")
except KeyboardInterrupt:
exit(-1)
|
Python
| 0
|
@@ -1281,16 +1281,21 @@
command
+, ipc
%0A%0A%0Aclass
@@ -5800,37 +5800,136 @@
-client = command.Client(opts.
+if opts.socket is None:%0A socket = command.find_sockfile()%0A else:%0A socket = opts.socket%0A client = ipc.Client(
sock
|
c2f45dd632f1df3690af168d588bf6818219b14f
|
use boolean value
|
bulbs/content/tasks.py
|
bulbs/content/tasks.py
|
import time
from django.conf import settings
from django.template.loader import render_to_string
from django.template.base import TemplateDoesNotExist
from django.core.exceptions import ObjectDoesNotExist
from bulbs.utils import vault
from bulbs.instant_articles.renderer import InstantArticleRenderer
from bulbs.instant_articles.transform import transform
import requests
import logging
from celery import shared_task
logger = logging.getLogger(__name__)
@shared_task(default_retry_delay=5)
def index(content_type_id, pk, refresh=False):
from django.contrib.contenttypes.models import ContentType
content_type = ContentType.objects.get_for_id(content_type_id)
obj = content_type.model_class().objects.get(id=pk)
obj.index(refresh=refresh)
@shared_task(default_retry_delay=5)
def index_content_contributions(content_pk):
from bulbs.contributions.models import Contribution
for contribution in Contribution.objects.filter(content__pk=content_pk):
contribution.save()
@shared_task(default_retry_delay=5)
def index_content_report_content_proxy(content_pk):
from bulbs.contributions.models import ReportContent
try:
proxy = ReportContent.reference.get(id=content_pk)
proxy.index()
except ObjectDoesNotExist:
pass
@shared_task(default_retry_delay=5)
def index_feature_type_content(featuretype_pk):
from .models import FeatureType
featuretype = FeatureType.objects.get(pk=featuretype_pk)
for content in featuretype.content_set.all():
content.index()
@shared_task(default_retry_delay=5)
def update_feature_type_rates(featuretype_pk):
from bulbs.contributions.models import ContributorRole, FeatureTypeRate, FEATURETYPE
roles = ContributorRole.objects.filter(payment_type=FEATURETYPE)
for role in roles:
existing_rates = FeatureTypeRate.objects.filter(
feature_type_id=featuretype_pk,
role_id=role.pk)
if existing_rates.count() == 0:
FeatureTypeRate.objects.create(
rate=0,
feature_type_id=featuretype_pk,
role_id=role.pk)
def post_article(content, body, fb_page_id, fb_api_url, fb_token_path, fb_dev_mode, fb_publish):
fb_access_token = vault.read(fb_token_path)
from .models import Content
# Post article to instant article API
post = requests.post(
'{0}/{1}/instant_articles'.format(fb_api_url, fb_page_id),
data={
'access_token': fb_access_token,
'html_source': body,
'published': fb_publish,
'development_mode': fb_dev_mode
})
if not post.ok:
logger.error('''
Error in posting Instant Article.\n
Content ID: {0}\n
IA ID: {1}\n
Status Code: {2}'''.format(
content.id,
content.instant_article_id,
post.status_code))
return
# Poll for status of article
response = ""
while response != "SUCCESS":
time.sleep(1)
status = requests.get('{0}/{1}?access_token={2}'.format(
fb_api_url,
post.json().get('id'),
fb_access_token
))
# log errors
if not status.ok or status.json().get('status') == "ERROR":
logger.error('''
Error in getting status of Instant Article.\n
Content ID: {0}\n
IA ID: {1}\n
Status Code: {2}'''.format(
content.id,
content.instant_article_id,
status.status_code))
return
response = status.json().get('status')
# set instant_article_id to response id
Content.objects.filter(pk=content.id).update(
instant_article_id=status.json().get('id'))
def delete_article(content, fb_api_url, fb_token_path):
fb_access_token = vault.read(fb_token_path)
delete = requests.delete('{0}/{1}?access_token={2}'.format(
fb_api_url,
content.instant_article_id,
fb_access_token
))
if not delete.ok:
logger.error('''
Error in deleting Instant Article.\n
Content ID: {0}\n
IA ID: {1}\n
Status Code: {2}'''.format(
content.id,
content.instant_article_id,
delete.status_code))
else:
status = delete.json().get('success')
if bool(status) is not True:
logger.error('''
Error in deleting Instant Article.\n
Content ID: {0}\n
IA ID: {1}\n
Error: {2}'''.format(
content.id,
content.instant_article_id,
delete.json()))
@shared_task(default_retry_delay=5, time_limit=300)
def post_to_instant_articles_api(content_pk):
from .models import Content
content = Content.objects.get(pk=content_pk)
fb_page_id = getattr(settings, 'FACEBOOK_PAGE_ID', None)
fb_api_url = getattr(settings, 'FACEBOOK_API_BASE_URL', None)
fb_token_path = getattr(settings, 'FACEBOOK_TOKEN_VAULT_PATH', None)
fb_dev_mode = 'true' if getattr(settings, 'FACEBOOK_API_DEVELOPMENT_MODE', None) else 'false'
fb_publish = 'true' if getattr(settings, 'FACEBOOK_API_PUBLISH_ARTICLE', None) else 'false'
should_post = getattr(settings, 'FACEBOOK_POST_TO_IA', False)
if not fb_page_id or not fb_api_url or not fb_token_path:
logger.error('''
Error in Django Settings.\n
FACEBOOK_PAGE_ID: {0}\n
FACEBOOK_API_BASE_URL: {1}\n
FACEBOOK_TOKEN_VAULT_PATH: {2}'''.format(
fb_page_id,
fb_api_url,
fb_token_path))
return
# if feature type is IA approved & content is published
feature_type = getattr(content, 'feature_type', None)
if feature_type and feature_type.instant_article and content.is_published:
# render page source
context = {
'content': content,
'absolute_uri': getattr(settings, 'WWW_URL'),
'transformed_body': transform(
getattr(content, 'body', ''),
InstantArticleRenderer())
}
try:
source = render_to_string(
'instant_article/_instant_article.html', context
)
except TemplateDoesNotExist:
source = render_to_string(
'instant_article/base_instant_article.html', context
)
if should_post:
post_article(
content,
source,
fb_page_id,
fb_api_url,
fb_token_path,
fb_dev_mode,
fb_publish)
# if article is being unpublished, delete it from IA API
elif not content.is_published and content.instant_article_id:
if should_post:
delete_article(
content,
fb_api_url,
fb_token_path)
|
Python
| 0.001299
|
@@ -5237,27 +5237,28 @@
MENT_MODE',
-Non
+Fals
e) else 'fal
@@ -5338,19 +5338,20 @@
TICLE',
-Non
+Fals
e) else
|
0af36c8e43f0a7a633897ac3f0112f0d2b521a6e
|
Delete all ReviewFollowers and remove the entry in the ranker before deleting a StudentProposal.
|
app/soc/logic/models/student_proposal.py
|
app/soc/logic/models/student_proposal.py
|
#!/usr/bin/python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Student Proposal (Model) query functions.
"""
__authors__ = [
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
from soc.logic.models import base
from soc.logic.models import student as student_logic
from soc.models import student_proposal
import soc.models.linkable
import soc.models.student_proposal
class Logic(base.Logic):
"""Logic methods for the Student Proposal model.
"""
def __init__(self, model=soc.models.student_proposal.StudentProposal,
base_model=soc.models.linkable.Linkable,
scope_logic=student_logic):
"""Defines the name, key_name and model for this entity.
"""
super(Logic, self).__init__(model=model, base_model=base_model,
scope_logic=scope_logic)
def getRankerFor(self, entity):
"""Returns the ranker for the given Student Proposal.
Args:
entity: Student Proposal entity for which the ranker should be returned
Returns:
Ranker object which is used to rank the given entity
"""
from soc.logic.models.ranker_root import logic as ranker_root_logic
fields = {'link_id': student_proposal.DEF_RANKER_NAME,
'scope': entity.org}
ranker_root = ranker_root_logic.getForFields(fields, unique=True)
ranker = ranker_root_logic.getRootFromEntity(ranker_root)
return ranker
def _onCreate(self, entity):
"""Adds this proposal to the organization ranker entity.
"""
ranker = self.getRankerFor(entity)
ranker.SetScore(entity.key().name(), [entity.score])
super(Logic, self)._onCreate(entity)
def _updateField(self, entity, entity_properties, name):
"""Called when the fields of the student_proposal are updated.
- Update the ranker if the score changes and keep the score within bounds
- Remove the entity from the ranker when the status changes to invalid or rejected
"""
value = entity_properties[name]
if name == 'score':
# keep the score within bounds
min_score, max_score = student_proposal.DEF_SCORE
value = max(min_score, min(value, max_score-1))
entity_properties[name] = value
# update the ranker
ranker = self.getRankerFor(entity)
ranker.SetScore(entity.key().name(), [value])
if name == 'status':
if value in ['invalid', 'rejected'] and entity.status != value:
# the proposal is going into invalid or rejected state
# remove the score from the ranker
ranker = self.getRankerFor(entity)
# entries in the ranker can be removed by setting the score to None
ranker.SetScore(entity.key().name(), None)
return super(Logic, self)._updateField(entity, entity_properties, name)
logic = Logic()
|
Python
| 0
|
@@ -3302,16 +3302,808 @@
name)%0A%0A
+ def delete(self, entity):%0A %22%22%22Removes Ranker entry and all ReviewFollowers before deleting the entity.%0A%0A Args:%0A entity: an existing entity in datastore%0A %22%22%22%0A%0A from soc.models.logic.review_follower import logic as review_follower_logic%0A%0A # entries in the ranker can be removed by setting the score to None%0A ranker = self.getRankerFor(entity)%0A ranker.SetScore(entity.key().name(), None)%0A%0A # get all the ReviewFollwers that have this entity as it's scope%0A fields = %7B'scope': entity%7D%0A%0A # TODO be sure that this captures all the followers%0A followers = review_follower_logic.getForFields(fields)%0A%0A for follower in followers:%0A review_follower_logic.delete(follower)%0A%0A # call to super to complete the deletion%0A super(Logic, self).delete(entity)%0A%0A
%0Alogic =
|
80176ae73fc9843f63aca68306523da822d0b258
|
add "group" property to devices
|
samantha/plugins/plugin.py
|
samantha/plugins/plugin.py
|
"""Contains a baseclass for plugins."""
###############################################################################
#
# TODO: [ ] default methods
#
###############################################################################
# standard library imports
import logging
# related third party imports
# application specific imports
# pylint: disable=import-error
from core import subscribe_to
# pylint: enable=import-error
__version__ = "1.4.0"
# Initialize the logger
LOGGER = logging.getLogger(__name__)
class Plugin(object):
"""Baseclass, that holds the mandatory methods a plugin must support."""
def __init__(self, name="Plugin", active=False,
logger=None, file_path=None, plugin_type="s"):
"""Set the plugin's attributes, if they're not set already."""
self.name = name
self.uid = "NO_UID"
self.is_active = active
if logger:
self.logger = logger
else:
self.logger = LOGGER
if file_path:
self.path = file_path
else:
self.path = __file__
self.plugin_type = plugin_type
self.logger.info("Initialisation of the plugin complete.")
def __str__(self):
"""Return a simple string representation of the plugin."""
return "{} '{}', UID {}".format(
("Device" if self.plugin_type == "d" else "Plugin"),
self.name,
self.uid)
def __repr__(self):
"""Return a verbose string representation of the plugin."""
return "{type}\t{name:10}\tUID {uid}\tLoaded from {path}".format(
type=("Device" if self.plugin_type == "d" else "Plugin"),
name=self.name,
uid=self.uid,
path=self.path)
class Device(Plugin):
"""Baseclass, that holds the mandatory methods a device must support."""
def __init__(self, name="Device", active=False,
logger=None, file_path=None):
"""Set the plugin's attributes, if they're not set already."""
super(Device, self).__init__(name, active, logger, file_path, "d")
self.name = name
self.is_available = None
self.logger.info("Initialisation complete")
def turn_on(self, func):
@subscribe_to(self.name + "power.on")
def function(*args, **kwargs):
return func(*args, **kwargs)
return function
|
Python
| 0
|
@@ -445,17 +445,17 @@
= %221.4.
-0
+1
%22%0A%0A%0A# In
@@ -1952,16 +1952,28 @@
ath=None
+, group=None
):%0A
@@ -2171,16 +2171,43 @@
= None%0A
+ self.group = group%0A
|
586d5f34fc508d4a3eaa93bd39c5dc2b41e4878d
|
Migrate api7 to api8
|
project_task_state/project_task_state.py
|
project_task_state/project_task_state.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from dateutil.relativedelta import relativedelta
from datetime import datetime
_TASK_STATE = [('draft', 'New'), ('open', 'In Progress'), ('pending', 'Pending'), ('ready', 'Ready'), ('done', 'Done'), ('cancelled', 'Cancelled')]
class project_task_type(osv.Model):
_inherit = 'project.task.type'
_columns = {
'state': fields.selection(_TASK_STATE, 'Related Status', required=True),
'task_type':fields.many2one('task.type', string='Task Type'),
}
def mark_done(self, cr, uid, ids, context=None):
values = {
'state': 'done',
'name': _('Done'),
'readonly':'True',
}
self.write(cr, uid, ids, values, context=context)
return True
_defaults = {
'state': 'open',
'fold': False,
'case_default': False,
}
class task(osv.Model):
_inherit = 'project.task'
_columns = {
'state': fields.related('stage_id', 'state', type="selection", store=True,
selection=_TASK_STATE, string="Status", readonly=True, select=True),
}
def set_kanban_state_blocked(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'kanban_state': 'blocked'}, context=context)
def set_kanban_state_normal(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'kanban_state': 'normal'}, context=context)
def set_kanban_state_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'kanban_state': 'done'}, context=context)
return False
|
Python
| 0.000122
|
@@ -1023,16 +1023,17 @@
#######%0A
+%0A
from ope
@@ -1040,20 +1040,16 @@
nerp
-.osv
import
osv,
@@ -1048,19 +1048,22 @@
ort
-osv
+models
, fields
%0Afro
@@ -1062,87 +1062,16 @@
elds
-%0Afrom dateutil.relativedelta import relativedelta%0Afrom datetime import datetime
+, api, _
%0A%0A_T
@@ -1125,16 +1125,31 @@
gress'),
+%0A
('pendi
@@ -1184,16 +1184,31 @@
Ready'),
+%0A
('done'
@@ -1247,16 +1247,17 @@
led')%5D%0A%0A
+%0A
class pr
@@ -1272,19 +1272,22 @@
sk_type(
-osv
+models
.Model):
@@ -1287,20 +1287,16 @@
Model):%0A
-
%0A _in
@@ -1327,50 +1327,27 @@
pe'%0A
-%0A
-_columns = %7B%0A '
state
-':
+ =
fields.
sele
@@ -1342,17 +1342,17 @@
fields.
-s
+S
election
@@ -1392,25 +1392,24 @@
equired=True
-)
,%0A 't
@@ -1406,17 +1406,57 @@
-'
+ default='open')%0A
task_typ
@@ -1460,18 +1460,19 @@
type
-':
+ =
fields.
-m
+M
any2
@@ -1511,20 +1511,24 @@
pe')
-,%0A %7D%0A
+%0A%0A @api.multi
%0A
@@ -1550,36 +1550,8 @@
self
-, cr, uid, ids, context=None
):%0A
@@ -1651,16 +1651,17 @@
adonly':
+
'True',%0A
@@ -1693,45 +1693,14 @@
ite(
-cr, uid, ids, values, context=context
+values
)%0A
@@ -1740,33 +1740,8 @@
= %7B%0A
- 'state': 'open',%0A
@@ -1792,21 +1792,16 @@
alse
-,%0A
+%0A
%7D%0A
-
+%0A
%0Acla
@@ -1812,11 +1812,14 @@
ask(
-osv
+models
.Mod
@@ -1823,16 +1823,17 @@
Model):%0A
+%0A
_inh
@@ -1858,678 +1858,325 @@
sk'%0A
-
%0A
-_columns = %7B%0A 'state': fields.related('stage_id', 'state', type=%22selection%22, store=True,%0A selection=_TASK_STATE, string=%22Status%22, readonly=True, select=True),%0A %7D
+@api.one%0A @api.depends('stage_id')%0A def _compute_state(self):
%0A
-%0A
-de
+i
f se
-t_kanban_state_blocked(self, cr, uid, ids, context=None):%0A return self.write(cr, uid, ids, %7B'kanban_state': 'blocked'%7D, context=context)%0A%0A def set_kanban_state_normal(self, cr, uid, ids, context=None):%0A return self.write(cr, uid, ids, %7B'kanban_state': 'normal'%7D, context=context)%0A%0A def set_kanban_state_done(self, cr, uid, ids, context=None):%0A self.write(cr, uid, ids, %7B'kanban_state': 'done'%7D, context=context)%0A return False
+lf.stage_id:%0A self.state = self.stage_id.state%0A else:%0A self.state = 'draft'%0A%0A state = fields.Selection(%0A _TASK_STATE, string=%22Status%22, readonly=True, store=True,%0A compute='_compute_state')%0A
|
6c60c588c694804930ea6e2312f80830f7b1fe8d
|
fix tests
|
nipype/interfaces/freesurfer/tests/test_preprocess.py
|
nipype/interfaces/freesurfer/tests/test_preprocess.py
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import os
import pytest
from nipype.testing.fixtures import create_files_in_directory
import nipype.interfaces.freesurfer as freesurfer
@pytest.mark.skipif(freesurfer.no_freesurfer(), reason="freesurfer is not installed")
def test_robustregister(create_files_in_directory):
filelist, outdir = create_files_in_directory
reg = freesurfer.RobustRegister()
# make sure command gets called
assert reg.cmd == 'mri_robust_register'
# test raising error with mandatory args absent
with pytest.raises(ValueError): reg.run()
# .inputs based parameters setting
reg.inputs.source_file = filelist[0]
reg.inputs.target_file = filelist[1]
reg.inputs.auto_sens = True
assert reg.cmdline == ('mri_robust_register '
'--satit --lta %s_robustreg.lta --mov %s --dst %s' % (filelist[0][:-4], filelist[0], filelist[1]))
# constructor based parameter setting
reg2 = freesurfer.RobustRegister(source_file=filelist[0], target_file=filelist[1], outlier_sens=3.0,
out_reg_file='foo.lta', half_targ=True)
assert reg2.cmdline == ('mri_robust_register --halfdst %s_halfway.nii --lta foo.lta '
'--sat 3.0000 --mov %s --dst %s'
% (os.path.join(outdir, filelist[1][:-4]), filelist[0], filelist[1]))
@pytest.mark.skipif(freesurfer.no_freesurfer(), reason="freesurfer is not installed")
def test_fitmsparams(create_files_in_directory):
filelist, outdir = create_files_in_directory
fit = freesurfer.FitMSParams()
# make sure command gets called
assert fit.cmd == 'mri_ms_fitparms'
# test raising error with mandatory args absent
with pytest.raises(ValueError): fit.run()
# .inputs based parameters setting
fit.inputs.in_files = filelist
fit.inputs.out_dir = outdir
assert fit.cmdline == 'mri_ms_fitparms %s %s %s' % (filelist[0], filelist[1], outdir)
# constructor based parameter setting
fit2 = freesurfer.FitMSParams(in_files=filelist, te_list=[1.5, 3.5], flip_list=[20, 30], out_dir=outdir)
assert fit2.cmdline == ('mri_ms_fitparms -te %.3f -fa %.1f %s -te %.3f -fa %.1f %s %s'
% (1.500, 20.0, filelist[0], 3.500, 30.0, filelist[1], outdir))
@pytest.mark.skipif(freesurfer.no_freesurfer(), reason="freesurfer is not installed")
def test_synthesizeflash(create_files_in_directory):
filelist, outdir = create_files_in_directory
syn = freesurfer.SynthesizeFLASH()
# make sure command gets called
assert syn.cmd == 'mri_synthesize'
# test raising error with mandatory args absent
with pytest.raises(ValueError): syn.run()
# .inputs based parameters setting
syn.inputs.t1_image = filelist[0]
syn.inputs.pd_image = filelist[1]
syn.inputs.flip_angle = 30
syn.inputs.te = 4.5
syn.inputs.tr = 20
assert syn.cmdline == ('mri_synthesize 20.00 30.00 4.500 %s %s %s'
% (filelist[0], filelist[1], os.path.join(outdir, 'synth-flash_30.mgz')))
# constructor based parameters setting
syn2 = freesurfer.SynthesizeFLASH(t1_image=filelist[0], pd_image=filelist[1], flip_angle=20, te=5, tr=25)
assert syn2.cmdline == ('mri_synthesize 25.00 20.00 5.000 %s %s %s'
% (filelist[0], filelist[1], os.path.join(outdir, 'synth-flash_20.mgz')))
@pytest.mark.skipif(freesurfer.no_freesurfer(), reason="freesurfer is not installed")
def test_mandatory_outvol(create_files_in_directory):
filelist, outdir = create_files_in_directory
mni = freesurfer.MNIBiasCorrection()
# make sure command gets called
assert mni.cmd == "mri_nu_correct.mni"
# test raising error with mandatory args absent
with pytest.raises(ValueError): mni.cmdline
# test with minimal args
mni.inputs.in_file = filelist[0]
base, ext = os.path.splitext(os.path.basename(filelist[0]))
if ext == '.gz':
base, ext2 = os.path.splitext(base)
ext = ext2 + ext
assert mni.cmdline == (
'mri_nu_correct.mni --i %s --o %s_output.%s' % (filelist[0], base, ext))
# test with custom outfile
mni.inputs.out_file = 'new_corrected_file.mgz'
assert mni.cmdline == ('mri_nu_correct.mni --i %s --o new_corrected_file.mgz'
% (filelist[0]))
# constructor based tests
mni2 = freesurfer.MNIBiasCorrection(in_file=filelist[0],
out_file='bias_corrected_output',
iterations=4)
assert mni2.cmdline == ('mri_nu_correct.mni --i %s --n 4 --o bias_corrected_output.%s'
% (filelist[0], ext))
|
Python
| 0.000001
|
@@ -4223,17 +4223,16 @@
s_output
-.
%25s' %25 (f
@@ -4770,17 +4770,16 @@
d_output
-.
%25s'%0A
|
c3d7e7fdcbea0fc34bfa6d9d517efc4d54dc0b15
|
add file extension
|
scripts/generateQANotebook.py
|
scripts/generateQANotebook.py
|
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument('repo', help='data repository')
parser.add_argument('--tract', type=int)
parser.add_argument('--filt', type=str)
parser.add_argument('--output', '-o', default='QA', help='output folder')
args = parser.parse_args()
from explorer.notebook import Coadd_QANotebook
coadd_nb = Coadd_QANotebook(args.repo, args.tract, args.filt)
if not os.path.exists(args.output):
os.makedirs(args.output)
coadd_nb.write(os.path.join(args.output, 'coadd_{}_{}'.format(args.tract, args.filt)))
|
Python
| 0.000002
|
@@ -520,16 +520,22 @@
dd_%7B%7D_%7B%7D
+.ipynb
'.format
|
1f5bdf4ce98d55339bee0aad16f40439d8a99a33
|
Upgrade to the most recent version of buildkit.
|
buildkit.py
|
buildkit.py
|
"""
Common code used in my setup.py files.
"""
from __future__ import with_statement
import re
import os.path
import sys
def read(filename):
"""Read files relative to this file."""
full_path = os.path.join(os.path.dirname(sys.argv[0]), filename)
with open(full_path, 'r') as fh:
return fh.read()
def get_metadata(module_path):
"""Extract the metadata from a module file."""
matches = re.finditer(
r"^__(\w+?)__ *= *(['\"])(.*?)\2$",
read(module_path),
re.MULTILINE)
return dict(
(match.group(1), match.group(3).decode('unicode_escape'))
for match in matches)
def read_requirements(requirements_path):
"""Read a requirements file, stripping out the detritus."""
requirements = []
to_ignore = ('#', 'svn+', 'git+', 'bzr+', 'hg+')
with open(requirements_path, 'r') as fh:
for line in fh:
line = line.strip()
if line != '' and not line.startswith(to_ignore):
requirements.append(line)
return requirements
|
Python
| 0
|
@@ -84,18 +84,8 @@
nt%0A%0A
-import re%0A
impo
@@ -117,24 +117,25 @@
ef read(
+*
filename
):%0A %22
@@ -126,16 +126,17 @@
filename
+s
):%0A %22
@@ -166,20 +166,75 @@
o th
-is file.%22%22%22%0A
+e executable.%22%22%22%0A files = %5B%5D%0A for filename in filenames:%0A
@@ -298,16 +298,20 @@
lename)%0A
+
with
@@ -351,338 +351,62 @@
-return fh.read()%0A%0A%0Adef get_metadata(module_path):%0A %22%22%22Extract the metadata from a module file.%22%22%22%0A matches = re.finditer(%0A r%22%5E__(%5Cw+?)__ *= *(%5B'%5C%22%5D)(.*?)%5C2$%22,%0A read(module_path),%0A re.MULTILINE)%0A return dict(%0A (match.group(1), match.group(3).decode('unicode_escape'))%0A for match in match
+ files.append(fh.read())%0A return %22%5Cn%5Cn%22.join(fil
es)%0A
@@ -713,48 +713,378 @@
ine
-!
+=
= ''
-and not line.startswith(to_ignore)
+or line.startswith(to_ignore):%0A continue%0A if line.startswith('-r '):%0A requirements += read_requirements(%0A os.path.realpath(%0A os.path.join(%0A os.path.dirname(requirements_path),%0A line.split(' ', 1)%5B1%5D.lstrip())))%0A else
:%0A
|
4b248051b5a3521eaa5d1804961bd573a8ce04c7
|
remove references to the axis argument
|
scipy/sparse/linalg/_norm.py
|
scipy/sparse/linalg/_norm.py
|
"""Sparse matrix norms.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.sparse import issparse
from numpy.core import (
array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,
broadcast
)
def norm(x, ord=None):
"""
Norm of a sparse matrix
This function is able to return one of seven different matrix norms,
depending on the value of the ``ord`` parameter.
Parameters
----------
x : a sparse matrix
Input sparse matrix. If `axis` is None, `x` must be 1-D or 2-D
sparse matrix.
ord : {non-zero int, inf, -inf, 'fro'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
Returns
-------
n : float or matrix
Notes
-----
Some of the ord are not implemented because some associated functions like,
_multi_svd_norm, are not yet available for sparse matrix.
This docstring is modified based on numpy.linalg.norm.
https://github.com/numpy/numpy/blob/master/numpy/linalg/linalg.py
The following norms can be calculated:
===== ============================
ord norm for sparse matrices
===== ============================
None Frobenius norm
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
0 abs(x).sum(axis=axis)
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 Not implemented
-2 Not implemented
other Not implemented
===== ============================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from scipy.sparse import *
>>> import numpy as np
>>> from scipy.sparse.linalg import norm
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> b = csr_matrix(b)
>>> norm(b)
7.745966692414834
>>> norm(b, 'fro')
7.745966692414834
>>> norm(b, np.inf)
9
>>> norm(b, -np.inf)
2
>>> norm(b, 1)
7
>>> norm(b, -1)
6
"""
if not issparse(x):
raise TypeError("input is not sparse. use numpy.linalg.norm")
# Check the default case first and handle it immediately.
if ord in (None, 'fro', 'f'):
if np.issubdtype(x.dtype, np.complexfloating):
sqnorm = abs(x).power(2).sum()
else:
sqnorm = x.power(2).sum()
return sqrt(sqnorm)
# Normalize the `axis` argument to a tuple.
nd = x.ndim
axis = tuple(range(nd))
if len(axis) == 2:
row_axis, col_axis = axis
if not (-nd <= row_axis < nd and -nd <= col_axis < nd):
raise ValueError('Invalid axis %r for an array with shape %r' %
(axis, x.shape))
if row_axis % nd == col_axis % nd:
raise ValueError('Duplicate axes given.')
if ord == 2:
raise NotImplementedError
#return _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
raise NotImplementedError
#return _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
return abs(x).sum(axis=row_axis).max(axis=col_axis)[0,0]
elif ord == Inf:
return abs(x).sum(axis=col_axis).max(axis=row_axis)[0,0]
elif ord == -1:
return abs(x).sum(axis=row_axis).min(axis=col_axis)[0,0]
elif ord == -Inf:
return abs(x).sum(axis=col_axis).min(axis=row_axis)[0,0]
else:
raise ValueError("Invalid norm order for matrices.")
else:
raise ValueError("Improper number of dimensions to norm.")
|
Python
| 0.000001
|
@@ -777,73 +777,8 @@
rix.
- If %60axis%60 is None, %60x%60 must be 1-D or 2-D%0A sparse matrix.
%0A
@@ -3122,56 +3122,8 @@
m)%0A%0A
- # Normalize the %60axis%60 argument to a tuple.%0A
|
72eec77af653e5e43674a7a5c08083130a4a0770
|
delete binary()
|
calour/transforming.py
|
calour/transforming.py
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, Calour development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from logging import getLogger
from copy import deepcopy
from collections import defaultdict
import numpy as np
from sklearn import preprocessing
logger = getLogger(__name__)
def normalize(exp, total=10000, axis=1, inplace=False):
'''Normalize the sum of each sample (axis=0) or feature (axis=1) to sum total
Parameters
----------
exp : Experiment
total : float
the sum (along axis) to normalize to
axis : int (optional)
the axis to normalize. 1 (default) is normalize each sample, 0 to normalize each feature
inplace : bool (optional)
False (default) to create a copy, True to replace values in exp
Returns
-------
``Experiment``
the normalized experiment
'''
if not inplace:
exp = deepcopy(exp)
exp.data = preprocessing.normalize(exp.data, norm='l1', axis=axis) * total
return exp
def scale(exp, axis=1, inplace=False):
'''Standardize a dataset along an axis
.. warning:: It will convert the ``Experiment.data`` from the sparse matrix to dense array.
Parameters
----------
axis : 0 or 1
1 means scaling occur sample-wise; 0 feature-wise.
Returns
-------
``Experiment``
'''
if not inplace:
exp = deepcopy(exp)
if exp.sparse:
exp.sparse = False
preprocessing.scale(exp.data, axis=axis, copy=False)
return exp
def binarize(exp, threshold=1, inplace=False):
'''Binarize the data with a threshold.
It calls scikit-learn to do the real work.
Parameters
----------
threshold : Numeric
the cutoff value. Any values below or equal to this will be replaced by 0,
above it by 1.
Returns
-------
``Experiment``
'''
if not inplace:
exp = deepcopy(exp)
preprocessing.scale(exp.data, threshold=threshold, copy=False)
return exp
def log_n(exp, n=1, inplace=False):
'''Log transform the data
Parameters
----------
n : numeric, optional
cap the tiny values and then log transform the data.
inplace : bool, optional
Returns
-------
``Experiment``
'''
if not inplace:
exp = deepcopy(exp)
if exp.sparse:
exp.sparse = False
exp.data[exp.data < n] = n
exp.data = np.log2(exp.data)
return exp
def binary(exp, threshold=0, inplace=False):
'''Binary transform the data
Parameters
----------
threshold : numeric, optional
positions are called present only if >threhold
(default=0)
inplace : bool, optional
'''
if not inplace:
exp = deepcopy(exp)
if exp.sparse:
exp.sparse = False
exp.data = exp.data > threshold
return exp
def transform(exp, steps=[], inplace=False, **kwargs):
'''Chain transformations together.
Parameters
----------
steps : list of callable
each callable is a transformer that takes ``Experiment`` object as
its 1st argument and has a boolean parameter of ``inplace``. Each
callable should return an ``Experiment`` object.
inplace : bool
transformation occuring in the original data or a copy
kwargs : dict
keyword arguments to pass to each transformers. The key should
be in the form of "<transformer_name>__<param_name>". For
example, "transform(exp, steps=[log_n], log_n__n=3)" will set
"n" of function "log_n" to 3
Returns
-------
``Experiment``
with its data transformed
'''
if not inplace:
exp = deepcopy(exp)
params = defaultdict(dict)
for k, v in kwargs.items():
transformer, param_name = k.split('__')
if param_name == 'inplace':
raise ValueError('You should not give %s argument. It should be '
'set thru `inplace` argument for this function.')
params[transformer][param_name] = v
for step in steps:
step(exp, inplace=True, **params[step.__name__])
return exp
def normalize_by_subset_features(exp, features, total=10000, exclude=True, inplace=False):
'''Normalize each sample by their total sums without a list of features
Normalizes all features (including in the exclude list) by the
total sum calculated without the excluded features. This is to
alleviate the compositionality in the data set by only keeping the
features that you think are not changing across samples.
.. note:: sum is not identical in all samples after normalization
(since also keeps the excluded features)
Parameters
----------
features : list of str
The features to exclude (or include if exclude=False)
total : int (optional)
The total abundance for the non-excluded features per sample
exclude : bool (optional)
True (default) to calculate normalization factor without features in features list.
False to calculate normalization factor only with features in features list.
inplace : bool (optional)
False (default) to create a new experiment, True to normalize in place
Returns
-------
``Experiment``
The normalized experiment
'''
feature_pos = exp.feature_metadata.index.isin(features)
if exclude:
feature_pos = np.invert(feature_pos)
data = exp.get_data(sparse=False)
use_reads = np.sum(data[:, feature_pos], axis=1)
if inplace:
newexp = exp
else:
newexp = deepcopy(exp)
newexp.data = total * data / use_reads[:, None]
return newexp
|
Python
| 0.000007
|
@@ -2147,36 +2147,39 @@
preprocessing.
-scal
+binariz
e(exp.data, thre
@@ -2671,412 +2671,8 @@
p%0A%0A%0A
-def binary(exp, threshold=0, inplace=False):%0A '''Binary transform the data%0A%0A Parameters%0A ----------%0A threshold : numeric, optional%0A positions are called present only if %3Ethrehold%0A (default=0)%0A inplace : bool, optional%0A '''%0A if not inplace:%0A exp = deepcopy(exp)%0A%0A if exp.sparse:%0A exp.sparse = False%0A%0A exp.data = exp.data %3E threshold%0A return exp%0A%0A%0A
def
|
9d5e87cded6373b9d0ad6d4c94e227c0bf119749
|
Use find instead of index on convert_data
|
ruuvitag_sensor/ruuvi.py
|
ruuvitag_sensor/ruuvi.py
|
import re
import sys
import os
import time
from ruuvitag_sensor.url_decoder import UrlDecoder
mac_regex = '[0-9a-f]{2}([:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$'
if not sys.platform.startswith('linux') or os.environ.get('CI') == 'True':
# Use BleCommunicationDummy also for CI as it can't use gattlib
from ruuvitag_sensor.ble_communication import BleCommunicationDummy
ble = BleCommunicationDummy()
else:
from ruuvitag_sensor.ble_communication import BleCommunicationNix
ble = BleCommunicationNix()
# TODO: Split this class to common functions and RuuviTagSensor
class RuuviTagSensor(object):
def __init__(self, mac, name):
if not re.match(mac_regex, mac.lower()):
raise ValueError('{} is not valid mac address'.format(mac))
self._mac = mac
self._state = {}
self._name = name
self._data = None
@property
def mac(self):
return self._mac
@property
def name(self):
return self._name
@property
def state(self):
return self._state
@staticmethod
def get_data(mac):
raw = ble.get_data(mac)
return RuuviTagSensor.convert_data(raw)
@staticmethod
def convert_data(raw):
"""
Convert hexadcimal data to string and validate that data is from RuuviTag.
Encoded data part is after ruu.vi/# or r/
Returns:
Encoded sensor data part in string
"""
try:
# TODO: Fix conversion so convered data will show https://ruu.vi/# and htts://r/
# Now it has e.g. β²β»βΊβ β₯β₯β β βΊβ₯ruu.vi/#AjwYAMFc
base16_split = [raw[i:i + 2] for i in range(0, len(raw), 2)]
selected_hexs = filter(lambda x: int(x, 16) < 128, base16_split)
characters = [chr(int(c, 16)) for c in selected_hexs]
data = ''.join(characters)
# take only part after ruu.vi/# or r/
index = data.index('ruu.vi/#') + 8
if index > -1:
return data[index:]
else:
index = data.index('r/') + 2
if index > -1:
return data[index:]
return None
except:
return None
@staticmethod
def find_ruuvitags():
"""
Find all RuuviTags. Function will print the mac and the state of the sensors when found.
Function will execute as long as it is stopped. Stop ecexution with Crtl+C.
Returns:
Dictionary containing mac and state of found sensors
"""
print('Finding RuuviTags. Stop with Ctrl+C.')
datas = dict()
for ble_data in ble.get_datas():
# If mac already in datas continue
if ble_data[0] in datas:
continue
encoded = RuuviTagSensor.convert_data(ble_data[1])
# Check that encoded data is valid ruuvitag data it is sensor data
if encoded is not None:
state = UrlDecoder().decode_data(encoded)
if state is not None:
datas[ble_data[0]] = state
print(ble_data[0])
print(state)
return datas
@staticmethod
def get_data_for_sensors(macs, search_duratio_sec=5):
"""
Get lates data for sensors in the macs list.
Args:
macs: List of mac addresses
search_duratio_sec: Search duration in seconds. Default 5.
Returns:
Dictionary containing mac and state of found sensors
"""
print('Get latest data for sensors. Search duration is {}s'.format(search_duratio_sec))
print('MACs: {}'.format(macs))
start_time = time.time()
datas = dict()
data_iter = ble.get_datas()
for ble_data in data_iter:
if time.time() - start_time > search_duratio_sec:
data_iter.send(StopIteration)
break
# If mac in whitelist
if not ble_data[0] in macs:
continue
encoded = RuuviTagSensor.convert_data(ble_data[1])
# Check that encoded data is valid ruuvitag data it is sensor data
if encoded is not None:
state = UrlDecoder().decode_data(encoded)
if state is not None:
datas[ble_data[0]] = state
return datas
def update(self):
"""
Get lates data from the sensor and update own state.
Returns:
Latest state
"""
data = RuuviTagSensor.get_data(self._mac)
if data == self._data:
return self._state
self._data = data
if self._data is None:
self._state = {}
else:
self._state = UrlDecoder().decode_data(self._data)
return self._state
|
Python
| 0.000001
|
@@ -1933,21 +1933,20 @@
= data.
+f
ind
-ex
('ruu.vi
@@ -1953,12 +1953,8 @@
/#')
- + 8
%0A
@@ -1997,37 +1997,43 @@
return data%5B
+(
index
+ + 8)
:%5D%0A e
@@ -2070,23 +2070,18 @@
ata.
+f
ind
-ex
('r/')
- + 2
%0A
@@ -2140,21 +2140,27 @@
rn data%5B
+(
index
+ + 2)
:%5D%0A
@@ -3637,16 +3637,29 @@
.format(
+%0A
search_d
|
28adc3fbce76a562e729aef3ae19bdefd3379586
|
add taxonomy information to loaded sequences
|
scripts/load_into_database.py
|
scripts/load_into_database.py
|
#!/usr/bin/env python
import sys
import argparse
from BioSQL import BioSeqDatabase
from Bio import SeqIO
def load_gff(db, gff_file, fasta_file):
from BCBio.GFF import GFFParser
with open(fasta_file) as seq_handle:
seq_dict = SeqIO.to_dict(SeqIO.parse(seq_handle, "fasta"))
parser = GFFParser()
recs = parser.parse(gff_file, seq_dict )#, limit_info=limit_info)
db.load(recs)
def load_genbank(db, genbank_file):
with open(genbank_file) as fp:
db.load(SeqIO.parse(fp, 'genbank'))
def main(args):
server = BioSeqDatabase.open_database(driver="sqlite3",db=args.database)
if args.database_name not in server.keys():
server.new_database(args.database_name)
db = server[args.database_name]
try:
if args.gff is not None and args.fasta is not None:
load_gff(db, args.gff, args.fasta)
server.adaptor.commit()
elif args.genbank is not None:
load_genbank(db, args.genbank)
server.adaptor.commit()
except:
server.adaptor.rollback()
raise
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--database', help='name of premade biosql database')
parser.add_argument('-D', '--database-name', help='namespace of the database that you want to add into', dest='database_name', default='metagenomic_database')
parser.add_argument('-f', '--fasta', help='fasta file to add into the database')
parser.add_argument('-g', '--gff', help='gff file of reatures to add into the database. Must be paired with a fasta file')
parser.add_argument('-G', '--genbank', help='genbank file to add into the database')
args = parser.parse_args()
main(args)
|
Python
| 0.000001
|
@@ -99,16 +99,108 @@
SeqIO%0A%0A
+def add_taxid(inIter, taxid):%0A inIter.annotations%5B'ncbi_taxid'%5D = taxid%0A yield inIter%0A
%0Adef loa
@@ -225,24 +225,58 @@
, fasta_file
+, fetch_taxonomy=False, taxid=None
):%0A from
@@ -518,20 +518,74 @@
db.load(
-recs
+add_taxid(recs, taxid), fetch_NCBI_taxonomy=fetch_taxonomy
)%0A%0Adef l
@@ -612,16 +612,50 @@
ank_file
+, fetch_taxonomy=False, taxid=None
):%0A w
@@ -700,16 +700,26 @@
db.load(
+add_taxid(
SeqIO.pa
@@ -726,22 +726,76 @@
rse(
-fp, 'genbank')
+genbank_file, 'genbank'), taxid), fetch_NCBI_taxonomy=fetch_taxonomy
)%0A%0A%0A
@@ -1134,16 +1134,33 @@
gs.fasta
+, args.tax_lookup
)%0A
@@ -1269,16 +1269,33 @@
.genbank
+, args.tax_lookup
)%0A
@@ -1996,24 +1996,307 @@
database')%0A
+ parser.add_argument('-t', '--lookup-taxonomy', dest='tax_lookup', help='access taxonomy information on NCBI servers', action=%22store_true%22, default=False)%0A parser.add_argument('-T', '--taxid', help='supply a ncbi taxonomy id that will be applied to all sequences in the file')%0A
args = p
|
93b52ac533086bfa1747c2f3e10ca98d9e666197
|
Stop sending stepFinished updates from the tryserver.
|
scripts/master/status_push.py
|
scripts/master/status_push.py
|
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import urlparse
from buildbot.status import status_push
CR_PASSWORD_FILE = '.code_review_password'
class TryServerHttpStatusPush(status_push.HttpStatusPush):
"""Status push used by try server.
Rietveld listens to buildStarted and (step|build)Finished to know if a try
job succeeded or not.
"""
def __init__(self, serverUrl, *args, **kwargs):
# Appends the status listener to the base url.
serverUrl = urlparse.urljoin(serverUrl, 'status_listener')
blackList = [
'buildETAUpdate',
#'buildFinished',
#'buildStarted',
'buildedRemoved',
'builderAdded',
'builderChangedState',
'buildsetSubmitted',
'changeAdded',
'logFinished',
'logStarted',
'requestCancelled',
'requestSubmitted',
'shutdown',
'slaveConnected',
'slaveDisconnected',
'start',
'stepETAUpdate',
#'stepFinished',
'stepStarted',
'stepText2Changed',
'stepTextChanged',
]
if not os.path.isfile(CR_PASSWORD_FILE):
logging.warn("The file %s does not exist. "
"Connections to rietveld may not work."
% CR_PASSWORD_FILE)
pwd = ''
else:
with open(CR_PASSWORD_FILE, 'rb') as f:
pwd = f.readline().strip()
extra_post_params = { 'password': pwd }
status_push.HttpStatusPush.__init__(
self,
*args,
serverUrl=serverUrl,
blackList=blackList,
extra_post_params=extra_post_params,
**kwargs)
def setServiceParent(self, parent):
"""Adds the base_url property, it's not available to Rietveld otherwise."""
self.extra_post_params['base_url'] = parent.buildbotURL
status_push.HttpStatusPush.setServiceParent(self, parent)
|
Python
| 0.000003
|
@@ -1111,17 +1111,16 @@
-#
'stepFin
|
3de06dc85c05303d2fdd4c5a24acb282de075143
|
improve help message
|
beetsplug/fetchartist.py
|
beetsplug/fetchartist.py
|
"""
Fetchartist plugin for beets.
"""
import os
import shutil
import pylast
import requests
from beets import config
from beets import plugins
from beets import ui
from beetsplug import util
CONTENT_TYPES = ["image/png", "image/jpeg"]
FILE_TYPES = ['png', 'jpg']
CONTENT_TYPE_TO_EXTENSION_MAP = {
"image/png": "png",
"image/jpeg": "jpg"
}
COVER_NAME_KEY = "cover_name"
class ArtistInfo(object):
"""
Contains information about an artist, like it's name, paths that point to
its covers and the cover itself.
"""
def __init__(self, name):
self.name = name
self.paths = set()
self.cover = None
self.extension = None
self.remaining_paths = set()
def add_path(self, path):
"""
Adds a cover path to this artist.
"""
self.paths.add(path)
def get_write_paths(self, force):
"""
If there are remaining paths set, only covers at those paths don't
exist and need to be written. Otherwise all paths are returned. If
force is set, all covers should be written again.
"""
write_paths = self.paths
if not force and self.remaining_paths:
write_paths = self.remaining_paths
return [path + "." + self.extension for path in write_paths]
def __repr__(self):
return "ArtistInfo<name=%s,paths=%r>" % (self.name, self.paths)
class FetchArtistPlugin(plugins.BeetsPlugin):
"""
The fetchart plugin.
"""
def __init__(self):
super(FetchArtistPlugin, self).__init__()
self._last_fm = pylast.LastFMNetwork(api_key=plugins.LASTFM_KEY)
self.config.add({
COVER_NAME_KEY: ""
})
self._process_config()
self._create_path_templates()
def _process_config(self):
self._cover_name = self.config[COVER_NAME_KEY].get().strip()
if not self._cover_name:
self._cover_name = None
def _create_path_templates(self):
self._library_path = config["directory"].get()
self._default_template = util.strip_template_path_suffix(
config["paths"]["default"].get(), "$albumartist")
self._singleton_template = util.strip_template_path_suffix(
config["paths"]["singleton"].get(), "$artist")
def commands(self):
cmd = ui.Subcommand("fetchartist", help="download artist art")
cmd.parser.add_option("-f", "--force", dest="force",
action="store_true", default=False,
help="re-download art when already present")
def _func(lib, opts, args):
self._fetch_artist(lib.items(ui.decargs(args)), opts.force)
cmd.func = _func
return [cmd]
@staticmethod
def _get_artist_from_item(item):
"""
Returns an appropriate artist name for the given item. If the given item is
part of an album, the albumartist, otherwise the artist is returned.
"""
if item.get_album() is None:
return item.artist
return item.albumartist
def _get_cover_name(self, item):
if self._cover_name:
return self._cover_name
return FetchArtistPlugin._get_artist_from_item(item)
def _create_cover_path(self, item):
if item.get_album() is None:
template = self._singleton_template
else:
template = self._default_template
evaluated_template = item.evaluate_template(template)
cover_name = self._get_cover_name(item)
return os.path.join(self._library_path, evaluated_template, cover_name)
def _create_artist_infos(self, items):
artist_infos = dict()
for item in items:
artist = FetchArtistPlugin._get_artist_from_item(item)
artist_info = artist_infos.get(artist)
if artist_info is None:
artist_info = ArtistInfo(artist)
artist_infos[artist] = artist_info
artist_info.add_path(self._create_cover_path(item))
return sorted(artist_infos.values(), key=lambda a: a.name)
@staticmethod
def _check_for_existing_covers(artist_info):
existing_paths, missing_paths =\
util.find_existing_and_missing_files(artist_info.paths,
FILE_TYPES)
artist_info.remaining_paths = missing_paths
# if there is no cover at all, return False
if not existing_paths:
return False
# if there are all covers, return True
if not missing_paths:
return True
# TODO if only some covers exist query the user what to do
return False
def _request_cover(self, artist_name):
artist = self._last_fm.get_artist(artist_name)
try:
cover_url = artist.get_cover_image()
except pylast.WSError:
return None
response = requests.get(cover_url, stream=True)
content_type = response.headers.get('Content-Type')
if content_type is None or content_type not in CONTENT_TYPES:
self._log.debug(u"not a supported image: {}",
content_type or 'no content type')
return None
extension = CONTENT_TYPE_TO_EXTENSION_MAP[content_type]
return (response.raw, extension)
def _fetch_cover(self, artist_info):
result = self._request_cover(artist_info.name)
if result is None:
return False
artist_info.cover, artist_info.extension = result
return True
def _write_covers(self, artist_info, force):
for path in artist_info.get_write_paths(force):
self._log.debug("saving cover at '{}'".format(path))
with open(path, "wb") as target:
shutil.copyfileobj(artist_info.cover, target)
def _update_cover(self, artist_info, force):
all_exist = FetchArtistPlugin._check_for_existing_covers(artist_info)
if force or not all_exist:
if self._fetch_cover(artist_info):
self._write_covers(artist_info, force)
message = ui.colorize('text_success', 'artist cover found')
else:
message = ui.colorize('text_error', 'no artist cover found')
else:
message = ui.colorize('text_highlight_minor', 'has artist cover')
self._log.info(u'{0}: {1}', artist_info.name, message)
def _fetch_artist(self, items, force):
artist_infos = self._create_artist_infos(items)
for artist_info in artist_infos:
self._update_cover(artist_info, force)
|
Python
| 0.000009
|
@@ -2558,44 +2558,46 @@
lp=%22
-re-download art when already present
+force overwrite existing artist covers
%22)%0A%0A
|
4748a68b6bde662245b85ecf2a52fbedffc4ffcb
|
Remove exception handling.
|
beetsplug/sonosupdate.py
|
beetsplug/sonosupdate.py
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2018, Tobias Sauerwein.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Updates a Sonos library whenever the beets library is changed.
This is based on the Kodi Update plugin.
"""
from __future__ import division, absolute_import, print_function
from beets.plugins import BeetsPlugin
import six
import soco
class SonosUpdate(BeetsPlugin):
def __init__(self):
super(SonosUpdate, self).__init__()
self.register_listener('database_change', self.listen_for_db_change)
def listen_for_db_change(self, lib, model):
"""Listens for beets db change and register the update"""
self.register_listener('cli_exit', self.update)
def update(self, lib):
"""When the client exists try to send refresh request to a Sonos
controler.
"""
self._log.info(u'Requesting a Sonos library update...')
# Try to send update request.
try:
device = soco.discovery.any_soco()
device.music_library.start_library_update()
except NoneType:
self._log.warning(u'Could not find a Sonos device.')
return
self._log.info(u'Sonos update triggered')
|
Python
| 0
|
@@ -1455,63 +1455,8 @@
')%0A%0A
- # Try to send update request.%0A try:%0A
@@ -1493,16 +1493,36 @@
y_soco()
+%0A%0A if device:
%0A
@@ -1570,17 +1570,16 @@
pdate()%0A
-%0A
@@ -1583,21 +1583,10 @@
e
-xcept NoneTyp
+ls
e:%0A
|
77ddff664ad1e10037a43c3ffabd816387c35e42
|
Use a comprehension instead of a lambda function
|
rotational-cipher/rotational_cipher.py
|
rotational-cipher/rotational_cipher.py
|
import string
UPPER = string.ascii_uppercase
LOWER = string.ascii_lowercase
def rotate(s, n):
rules = shift_rules(n)
return "".join(map(lambda k: rules.get(k, k), s))
def shift_rules(n):
shifted = UPPER[n:] + UPPER[:n] + LOWER[n:] + LOWER[:n]
return {k:v for k,v in zip(UPPER+LOWER, shifted)}
|
Python
| 0.00054
|
@@ -141,43 +141,38 @@
oin(
-map(lambda k: rules.get(k, k),
+rules.get(ch, ch) for ch in
s)
-)%0A
%0A%0Ade
|
25beb8ce9f21d5ef5255304700a76ed2d7aaa425
|
Add initial solution
|
rotational-cipher/rotational_cipher.py
|
rotational-cipher/rotational_cipher.py
|
def rotate():
pass
|
Python
| 0.000001
|
@@ -1,8 +1,88 @@
+import string%0A%0A%0AUPPER = string.ascii_uppercase%0ALOWER = string.ascii_lowercase%0A%0A%0A
def rota
@@ -88,16 +88,332 @@
ate(
+s, n
):%0A
-pass
+return %22%22.join(rot_gen(s,n))%0A%0A%0Adef shift_rules(n):%0A shifted = UPPER%5Bn:%5D + UPPER%5B:n%5D + LOWER%5Bn:%5D + LOWER%5B:n%5D%0A return %7Bk:v for k,v in zip(UPPER+LOWER, shifted)%7D%0A%0A%0Adef rot_gen(s, n):%0A rules = shift_rules(n)%0A for ch in s:%0A try:%0A yield rules%5Bch%5D%0A except KeyError:%0A yield ch
%0A
|
644f66a39fd59b1983eee6f127e13e1585a598cd
|
Fix breakages from girder-client v2.0
|
script/upload-test-images.py
|
script/upload-test-images.py
|
import girder_client
import os
import sys
def main():
# Use the API key to authenticate.
key = os.environ.get("GIRDER_API_KEY")
if key is None:
print >>sys.stderr, "Environment variable GIRDER_API_KEY is blank. Cannot upload images."
return 1
gc = girder_client.GirderClient(host="data.kitware.com", port=443, scheme="https")
gc.authenticate(apiKey=key)
# Retrieve the target folder, which should be at ~/Public/Travis\ Candela
user = gc.get("user/me")
if user is None:
print >>sys.stderr, "No user logged in; API key may be bad."
return 1
travis_build_number = os.environ.get("TRAVIS_BUILD_NUMBER")
travis_job_number = os.environ.get("TRAVIS_JOB_NUMBER")
folder = gc.load_or_create_folder("Public", user["_id"], "user")
folder = gc.load_or_create_folder("Travis Candela", folder["_id"], "folder")
folder = gc.load_or_create_folder(travis_build_number, folder["_id"], "folder")
folder = gc.load_or_create_folder(travis_job_number, folder["_id"], "folder")
# Upload the files specified on the command line, creating (or loading) a
# folder for each.
for imageFile in sys.argv[1:]:
(dirname, filename) = os.path.split(imageFile)
compName = dirname.split(os.path.sep)[-2]
compFolder = gc.load_or_create_folder(compName, folder["_id"], "folder")
gc._upload_as_item(filename, compFolder["_id"], imageFile)
if __name__ == "__main__":
sys.exit(main())
|
Python
| 0
|
@@ -331,18 +331,8 @@
om%22,
- port=443,
sch
@@ -733,36 +733,33 @@
er = gc.load
-_or_c
+OrC
reate
-_f
+F
older(%22Publi
@@ -799,36 +799,33 @@
er = gc.load
-_or_c
+OrC
reate
-_f
+F
older(%22Travi
@@ -877,36 +877,33 @@
er = gc.load
-_or_c
+OrC
reate
-_f
+F
older(travis
@@ -958,36 +958,33 @@
er = gc.load
-_or_c
+OrC
reate
-_f
+F
older(travis
@@ -1296,20 +1296,17 @@
load
-_or_c
+OrC
reate
-_f
+F
olde
@@ -1338,25 +1338,113 @@
, %22folder%22)%0A
-%0A
+ size = os.stat(imageFile).st_size%0A%0A with open(imageFile, %22rb%22) as fd:%0A
gc._
@@ -1446,62 +1446,136 @@
gc.
-_
upload
-_as_item(filename, compFolder%5B%22_id%22%5D, imageFile
+File(%0A parentId=compFolder%5B%22_id%22%5D, stream=fd, name=filename, size=size,%0A parentType=%22folder%22
)%0A%0A%0A
|
c71c82a8f1da678da740b726a66ab5bc9955b261
|
work on digits_video.py
|
samples/python2/digits_video.py
|
samples/python2/digits_video.py
|
import numpy as np
import cv2
import digits
import os
import video
from common import mosaic
def main():
cap = video.create_capture()
classifier_fn = 'digits_svm.dat'
if not os.path.exists(classifier_fn):
print '"%s" not found, run digits.py first' % classifier_fn
return
model = digits.SVM()
model.load('digits_svm.dat')
SZ = 20
while True:
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
bin = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 31, 10)
bin = cv2.medianBlur(bin, 3)
contours, _ = cv2.findContours( bin.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
boxes = []
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
if h < 20 or h > 60 or 1.2*h < w:
continue
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0))
sub = bin[y:,x:][:h,:w]
#sub = ~cv2.equalizeHist(sub)
#_, sub_bin = cv2.threshold(sub, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
s = 1.1*h/SZ
m = cv2.moments(sub)
m00 = m['m00']
if m00/255 < 0.1*w*h or m00/255 > 0.9*w*h:
continue
#frame[y:,x:][:h,:w] = sub[...,np.newaxis]
c1 = np.float32([m['m10'], m['m01']]) / m00
c0 = np.float32([SZ/2, SZ/2])
t = c1 - s*c0
A = np.zeros((2, 3), np.float32)
A[:,:2] = np.eye(2)*2
A[:,2] = t
sub1 = cv2.warpAffine(sub, A, (SZ, SZ), flags=cv2.WARP_INVERSE_MAP | cv2.INTER_LINEAR)
sub1 = digits.deskew(sub1)
sample = np.float32(sub1).reshape(1,SZ*SZ) / 255.0
digit = model.predict(sample)[0]
cv2.putText(frame, '%d'%digit, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (200, 0, 0), thickness = 1)
boxes.append(sub1)
if len(boxes) > 0:
cv2.imshow('box', mosaic(10, boxes))
cv2.imshow('frame', frame)
cv2.imshow('bin', bin)
if cv2.waitKey(1) == 27:
break
if __name__ == '__main__':
main()
|
Python
| 0.000105
|
@@ -862,18 +862,18 @@
if h %3C
-20
+16
or h %3E
@@ -1180,13 +1180,16 @@
s =
-1.1*h
+float(h)
/SZ%0D
@@ -1596,17 +1596,17 @@
.eye(2)*
-2
+s
%0D%0A
|
77a259b1cdc5b46cad8975ffd086c053a634381e
|
Use Mysql.
|
saskatoon/saskatoon/settings.py
|
saskatoon/saskatoon/settings.py
|
# coding: utf-8
"""
Django settings for saskatoon project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-ly1!%ui5z+*cfy9&wb=os6c(iysect2od0di1d$p(o$a696jo'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'bootstrap3_datepicker',
'dal',
'dal_select2',
'django_filters',
'modeltranslation',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'pages',
'harvest',
'member',
'saskatoon',
'django_extensions',
'bootstrap3',
'django_forms_bootstrap',
'crispy_forms',
'simple_history',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.locale.LocaleMiddleware',
]
ROOT_URLCONF = 'saskatoon.urls'
APPEND_SLASH = True
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
BASE_DIR + '/saskatoon/templates/'
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'saskatoon.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
#DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.mysql',
# 'NAME': 'saskatoon',
# 'USER': '',
# 'PASSWORD': '',
# }
#}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
from django.core.urlresolvers import reverse_lazy
AUTH_USER_MODEL = "member.AuthUser"
LOGIN_URL = reverse_lazy('pages:login')
LOGIN_REDIRECT_URL = reverse_lazy('pages:index')
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
# http://django-modeltranslation.readthedocs.io/en/latest/installation.html
from django.utils.translation import gettext_lazy as _
LANGUAGE_CODE = 'fr-ca'
TIME_ZONE = 'America/Montreal'
USE_I18N = True
USE_L10N = True
USE_TZ = True
gettext = lambda s: s
LANGUAGES = (
('fr', u'FranΓ§ais'),
('en', u'English'),
)
LOCALE_PATHS = [
'harvest/locale/',
'member/locale/',
'pages/locale/',
'saskatoon/locale/'
]
MODELTRANSLATION_TRANSLATION_FILES = (
'harvest.translation',
'member.translation',
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'saskatoon/static')
STATIC_URL = '/static/'
# Media files (uploaded files, images)
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# BOOTSRATP 3
BOOTSTRAP3 = {
'success_css_class': '',
}
# SUIT CONFIG
SUIT_CONFIG = {
'ADMIN_NAME': 'Saskatoon',
'MENU_EXCLUDE': ('auth.group', 'auth'),
}
FILTERS_HELP_TEXT_FILTER = False
|
Python
| 0
|
@@ -2606,83 +2606,40 @@
%0A%0A#
-Database%0A# https://docs.djangoproject.com/en/1.9/ref/settings/#database
+USE MySQL for your local test
s%0A%0A
-#
DATA
@@ -2648,17 +2648,16 @@
SES = %7B%0A
-#
'def
@@ -2665,17 +2665,16 @@
ult': %7B%0A
-#
@@ -2711,17 +2711,16 @@
mysql',%0A
-#
@@ -2732,21 +2732,11 @@
': '
-saskatoon
',%0A
-#
@@ -2747,25 +2747,24 @@
'USER': '',%0A
-#
'PAS
@@ -2779,152 +2779,8 @@
'',%0A
-# %7D%0A#%7D%0A%0ADATABASES = %7B%0A 'default': %7B%0A 'ENGINE': 'django.db.backends.sqlite3',%0A 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),%0A
@@ -2784,18 +2784,16 @@
%7D%0A%7D%0A%0A
-%0A%0A
# Passwo
|
9b0612b0a4d5c483013311d7c7814cc268609cb0
|
Fix URL in setup.py
|
support/python/collins_client/setup.py
|
support/python/collins_client/setup.py
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(name="collins_client",
version="0.1.0",
description="The python interface to the collins api.",
author="John Bunting, Nick Thuesen, Nick Sauro, Will Richard",
author_email="opensourcesoftware@tumblr.com",
url="https://github.com/tumblr/collins/tree/master/support/python/collins-client",
packages=find_packages(),
keywords='collins infastructure managment',
classifiers= [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2 :: Only',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: System :: Systems Administration'
],
install_requires= [
'grequests==0.2.0',
]
)
|
Python
| 0.000002
|
@@ -375,17 +375,17 @@
/collins
--
+_
client%22,
|
0577d536583fb85e6a4e9675115ce744f60679e7
|
Update string module references
|
app/lib/text_handling.py
|
app/lib/text_handling.py
|
# -*- coding: utf-8 -*-
"""
Common string handling functions.
Usage:
$ python -m lib.text_handling
"""
from string import punctuation, whitespace
def flattenText(text, replacement=u" "):
r"""
Remove line breaks in a string.
Flatten a string from multi-line to a single line, using a specified
string to replace line breaks.
Rather than just replacing '\n', we also consider the '\r\n' Windows line
ending, as this has been observed in Twitter profile descriptions even when
testing on a Linux machine.
It is not practical to use .split and .join here. Since splitting on
one kind of characters produces a list, which then has to have its
elements split on the other kind of character, then the nested list
would to be made into a flat list and then joined as a single string.
:param text: Single unicode string, which could have line breaks
in the '\n' or '\r\n' format.
:param replacement: Unicode string to use in place of the line
breaks. Defaults to a single space. Other recommended values are:
- u"\t"
- u" "
- u" ; "
- u"\n"
:return: the input text with newline characters replaced with the
replacement string.
"""
text = text.replace(u"\r\n", replacement)
if replacement != "\n":
text = text.replace(u"\n", replacement)
return text
def stripSymbols(inputStr, keepHash=False, keepAt=False, keepWhiteSpace=False):
"""
Remove symbols from a string, but optionally keep any which are specified.
Accepts str and unicode input so this function has broader application,
but rejects other data types. The output type is forced to match the
type of the input. (Note: it appears that both types may contain a unicode
character, but only an ASCII str can contain ASCII characters.)
Removal of unicode characters:
https://stackoverflow.com/questions/15321138/removing-unicode-u2026-like-characters-in-a-string-in-python2-7
:param str inputStr: Word or sentence.
:param keepHash: Set as True to keep the '#' symbol.
:param keepAt: Set as True to keep the '@' symbol.
:param keepWhiteSpace: Set at True to keep the whitespace characters.
:return outputList: A list of cleaned strings without punctuation or
special unicode characters. Keeps the characters indicated by arguments.
"""
assert isinstance(inputStr, basestring), (
'Expected input as unicode or ascii string, but got type `{0}`.'
.format(type(inputStr).__name__)
)
# Force the input to be unicode.
if type(inputStr) == unicode:
outputStr = inputStr
else:
outputStr = inputStr.decode('unicode_escape')
# Remove unicode symbols.
outputStr = outputStr.encode('ascii', 'ignore')
# Replace whitespace characters.
wsToRemove = whitespace
if not keepWhiteSpace:
for ws in wsToRemove:
if ws in outputStr:
outputStr = outputStr.replace(ws, ' ')
# Remove standard punctuation.
charToRemove = punctuation
if keepHash:
charToRemove = charToRemove.replace('#', '')
if keepAt:
charToRemove = charToRemove.replace('@', '')
for c in charToRemove:
if c in outputStr:
outputStr = outputStr.replace(c, '')
if type(inputStr) == unicode:
outputStr = outputStr.encode('utf-8')
outputList = outputStr.split(' ')
outputList = [s for s in outputList if s]
return outputList
def main():
"""
Function to manually verify functionality of the strip symbols logic.
"""
tests = [
"I am a #Tweet, but need cleaning! ^-^ Why don't you help me,"
" my friend @jamie_123?",
u"Iβm a #unicode string with unicode symbol near the start!",
"Iβm an #ascii string, also with unicode symbol near the start!",
u"Unicode symbol \u2026 (β¦) in unicode.",
"Unicode symbol \u2026 (β¦) in ascii.",
"I am some ****stars**** and I am some <<<arrows>>>.",
"I have \t\ttabs.",
"I am a \nline break.",
punctuation,
"Join me LIVE with @VP, @SecretaryPerry, @SecretaryZinke and"
" @EPAScottPruitt. \n#UnleashingAmericanEnergy\nhttps://t.co/hlM7F2BQD9",
"MAKE AMERICA SAFE AGAIN!\n\n#NoSanctuaryForCriminalsAct \n#KatesLaw"
" #SaveAmericanLives \n\nhttps://t.co/jbN4hPjqjS",
# Todo - handle URIs in sentence.
"This is a link! http://IAmLink.com#yeah",
u"https://IAmUnicodeLink.com/abc_def"
]
for t in tests:
print t
if type(t) != str:
t = t.encode('ascii', 'ignore')
print stripSymbols(t, keepHash=True, keepAt=True)
print '----'
if __name__ == '__main__':
main()
|
Python
| 0.000006
|
@@ -105,50 +105,21 @@
%22%22%22%0A
-from string import punctuation, whitespace
+import string
%0A%0A%0Ad
@@ -2842,36 +2842,8 @@
rs.%0A
- wsToRemove = whitespace%0A
@@ -2881,23 +2881,29 @@
for
-ws
+c
in
-wsToRemov
+string.whitespac
e:%0A
@@ -2916,18 +2916,17 @@
if
-ws
+c
in outp
@@ -2978,18 +2978,17 @@
replace(
-ws
+c
, ' ')%0A%0A
@@ -3022,16 +3022,16 @@
uation.%0A
-
char
@@ -3041,16 +3041,23 @@
emove =
+string.
punctuat
@@ -4085,16 +4085,23 @@
+string.
punctuat
|
e69f45ecc3ee23be203136be02e9648a4930a3e8
|
Make test.py use difflib instead of filecmp.
|
test/test.py
|
test/test.py
|
# -*- coding: utf-8 -*-
import os
import sys
import filecmp
import tempfile
from subprocess import call
def run_test(fmt, sample, arg):
temp = tempfile.NamedTemporaryFile()
psdump = "../build/psdump -o {0} -f {1} {2}".format(temp.name, fmt, arg)
call(psdump.split())
if not filecmp.cmp(temp.name, sample):
print "{0} test failed.".format(fmt.upper())
return 1
return 0
if __name__ == '__main__':
dirname = os.path.dirname(sys.argv[0])
os.chdir(dirname)
psdfile = "sample.psd"
psdfiles = "sample.psd sample.psd"
print "### Single input file test"
status = 0
status += run_test("text", "sample.txt", psdfile)
status += run_test("xml", "sample.xml", psdfile)
status += run_test("json", "sample.json", psdfile)
if status == 0: print "### OK."
print "### Multiple input files test"
status = 0
status += run_test("text", "sample_multi.txt", psdfiles)
status += run_test("xml", "sample_multi.xml", psdfiles)
status += run_test("json", "sample_multi.json", psdfiles)
if status == 0: print "### OK."
|
Python
| 0
|
@@ -61,24 +61,23 @@
%0Aimport
-tempfile
+difflib
%0Afrom su
@@ -134,49 +134,8 @@
g):%0A
- temp = tempfile.NamedTemporaryFile()%0A
@@ -167,21 +167,22 @@
-o
-%7B0%7D
+temp
-f %7B
-1%7D %7B2
+0%7D %7B1
%7D%22.f
@@ -187,27 +187,16 @@
.format(
-temp.name,
fmt, arg
@@ -227,48 +227,353 @@
())%0A
+%0A
-if not filecmp.cmp(temp.name, sample
+with open(%22temp%22, %22r%22) as temp_file:%0A temp_lines = temp_file.readlines()%0A%0A with open(sample, %22r%22) as sample_file:%0A sample_lines = sample_file.readlines()%0A%0A # Compare corresponding lines from 'temp' and 'sample' files%0A diff = difflib.ndiff(temp_lines, sample_lines)%0A if filter(lambda x: not x.startswith(' '), diff
):%0A
|
80c5f94f3c2ed02c8603d3eecea23cdb4711ae79
|
Use hanging indent
|
openfisca_france/tests/test_tax_rates.py
|
openfisca_france/tests/test_tax_rates.py
|
# -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openfisca_core import periods
from openfisca_core.simulations import average_tax_rate, marginal_tax_rate
import openfisca_france
TaxBenefitSystem = openfisca_france.init_country()
tax_benefit_system = TaxBenefitSystem()
def test_average_tax_rate():
year = 2013
simulation = tax_benefit_system.new_scenario().init_single_entity(
axes = [
dict(
count = 100,
name = 'sali',
max = 24000,
min = 0,
),
],
period = periods.period('year', year),
parent1 = dict(agem = 40 * 12 + 6),
).new_simulation(debug = True)
assert (average_tax_rate(simulation, target_column_name = 'revdisp', varying_column_name = 'revdisp') == 0).all()
def test_marginal_tax_rate():
year = 2013
simulation = tax_benefit_system.new_scenario().init_single_entity(
axes = [
dict(
count = 10000,
name = 'sali',
max = 1000000,
min = 0,
),
],
period = periods.period('year', year),
parent1 = dict(agem = 40 * 12 + 6),
).new_simulation(debug = True)
assert (marginal_tax_rate(simulation, target_column_name = 'revdisp', varying_column_name = 'revdisp') == 0).all()
if __name__ == '__main__':
import logging
import sys
logging.basicConfig(level = logging.ERROR, stream = sys.stdout)
test_marginal_tax_rate()
test_average_tax_rate()
|
Python
| 0.000001
|
@@ -1389,35 +1389,43 @@
0,%0A
-),%0A
+ ),%0A
%5D,%0A
@@ -1943,35 +1943,43 @@
0,%0A
-),%0A
+ ),%0A
%5D,%0A
|
a6e609c44434bdeeb8ba4391ed3508d335a9a5e3
|
Revert queries-hive `No JSON object could be decoded` commit (#2624)
|
apps/jobbrowser/src/jobbrowser/api2.py
|
apps/jobbrowser/src/jobbrowser/api2.py
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import sys
from urllib.request import Request, urlopen
from django.http import HttpResponse
from desktop.lib.i18n import smart_unicode
from desktop.lib.django_util import JsonResponse
from desktop.views import serve_403_error
from jobbrowser.apis.base_api import get_api
from jobbrowser.apis.query_store import query_store_proxy, stream_download_bundle
from jobbrowser.conf import DISABLE_KILLING_JOBS, USE_PROXY
if sys.version_info[0] > 2:
from django.utils.translation import gettext as _
else:
from django.utils.translation import ugettext as _
LOG = logging.getLogger(__name__)
def api_error_handler(func):
def decorator(*args, **kwargs):
response = {}
try:
return func(*args, **kwargs)
except Exception as e:
LOG.exception('Error running %s' % func)
response['status'] = -1
response['message'] = smart_unicode(e)
finally:
if response:
return JsonResponse(response)
return decorator
@api_error_handler
def jobs(request, interface=None):
response = {'status': -1}
cluster = json.loads(request.POST.get('cluster', '{}'))
interface = interface or json.loads(request.POST.get('interface'))
filters = dict([(key, value) for _filter in json.loads(
request.POST.get('filters', '[]')) for key, value in list(_filter.items()) if value
])
jobs = get_api(request.user, interface, cluster=cluster).apps(filters)
response['apps'] = jobs['apps']
response['total'] = jobs.get('total')
response['status'] = 0
return JsonResponse(response)
@api_error_handler
def job(request, interface=None):
response = {'status': -1}
cluster = json.loads(request.POST.get('cluster', '{}'))
interface = interface or json.loads(request.POST.get('interface'))
app_id = json.loads(request.POST.get('app_id'))
if interface == 'schedules':
offset = json.loads(request.POST.get('pagination', '{"offset": 1}')).get('offset')
response_app = get_api(request.user, interface, cluster=cluster).app(app_id, offset=offset)
else:
response_app = get_api(request.user, interface, cluster=cluster).app(app_id)
if response_app.get('status') == -1 and response_app.get('message'):
response.update(response_app)
else:
response['app'] = response_app
response['status'] = 0
return JsonResponse(response)
@api_error_handler
def action(request, interface=None, action=None):
response = {'status': -1, 'message': ''}
cluster = json.loads(request.POST.get('cluster', '{}'))
interface = json.loads(request.POST.get('interface'))
app_ids = json.loads(request.POST.get('app_ids'))
operation = json.loads(request.POST.get('operation'))
if operation.get('action') == 'kill' and DISABLE_KILLING_JOBS.get():
return serve_403_error(request)
response['operation'] = operation
response.update(
get_api(request.user, interface, cluster=cluster).action(app_ids, operation)
)
return JsonResponse(response)
@api_error_handler
def logs(request):
response = {'status': -1}
cluster = json.loads(request.POST.get('cluster', '{}'))
interface = json.loads(request.POST.get('interface'))
app_id = json.loads(request.POST.get('app_id'))
app_type = json.loads(request.POST.get('type'))
log_name = json.loads(request.POST.get('name'))
response['logs'] = get_api(request.user, interface, cluster=cluster).logs(
app_id, app_type, log_name, json.loads(request.GET.get('is_embeddable', 'false').lower())
)
response['status'] = 0
return JsonResponse(response)
@api_error_handler
def profile(request):
response = {'status': -1}
cluster = json.loads(request.POST.get('cluster', '{}'))
interface = json.loads(request.POST.get('interface'))
app_id = json.loads(request.POST.get('app_id'))
app_type = json.loads(request.POST.get('app_type'))
app_property = json.loads(request.POST.get('app_property'))
app_filters = dict([
(key, value) for _filter in json.loads(request.POST.get('app_filters', '[]'))
for key, value in list(_filter.items()) if value
])
api = get_api(request.user, interface, cluster=cluster)
api._set_request(request) # For YARN
resp = api.profile(app_id, app_type, app_property, app_filters)
if isinstance(resp, HttpResponse):
return resp
else:
response[app_property] = resp
response['status'] = 0
return JsonResponse(response)
@api_error_handler
def query_store_api(request, path=None):
response = {'status': -1}
if USE_PROXY.get():
response = query_store_proxy(request, path)
else:
if path == 'api/query/search':
filters = json.loads(request.body)
resp = get_api(request.user, interface='queries-hive').apps(filters['search'])
response = resp['apps']
return JsonResponse(response)
@api_error_handler
def query_store_download_bundle(request, id=None):
return stream_download_bundle(request, id)
|
Python
| 0.000004
|
@@ -2138,16 +2138,90 @@
e%0A %5D)%0A%0A
+ if interface == 'queries-hive':%0A filters = json.loads(request.body)%0A%0A
jobs =
@@ -2378,32 +2378,118 @@
%5B'status'%5D = 0%0A%0A
+ if interface == 'queries-hive':%0A return JsonResponse(response%5B'apps'%5D)%0A else:%0A
return JsonRes
@@ -2707,32 +2707,125 @@
t('interface'))%0A
+ if interface == 'queries-hive':%0A app_id = json.loads(request.body)%5B'queryId'%5D%0A else:%0A
app_id = json.
@@ -3331,32 +3331,117 @@
%5B'status'%5D = 0%0A%0A
+ if interface == 'queries-hive':%0A return JsonResponse(response%5B'app'%5D)%0A else:%0A
return JsonRes
|
a26e3c604e735c69a105386a0d92168870ab7a0b
|
fix node artifacts upload path (#1601)
|
scenarios/kubernetes_kubelet.py
|
scenarios/kubernetes_kubelet.py
|
#!/usr/bin/env python
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Need to figure out why this only fails on travis
# pylint: disable=bad-continuation
"""Runs node and/or kubelet tests for kubernetes/kubernetes."""
import argparse
import os
import re
import subprocess
import sys
BRANCH_VERSION = {
'release-1.2': 'release-1.4',
'release-1.3': 'release-1.4',
'master': 'release-1.6',
}
VERSION_TAG = {
'release-1.4': '1.4-latest',
'release-1.5': '1.5-latest',
'release-1.6': '1.6-latest',
}
def check(*cmd):
"""Log and run the command, raising on errors."""
print >>sys.stderr, 'Run:', cmd
subprocess.check_call(cmd)
def var(path):
"""Expands '${foo} interesting' to 'something interesting'."""
return os.path.expandvars(path)
def main(script, properties, branch, ssh, ssh_pub, robot):
"""Test node branch by sending script specified properties and creds."""
mat = re.match(r'master|release-\d+\.\d+', branch)
if not mat:
raise ValueError(branch)
tag = VERSION_TAG[BRANCH_VERSION.get(branch, branch)]
img = 'gcr.io/k8s-testimages/kubekins-node:%s' % tag
artifacts = '%s/_artifacts' % os.environ['WORKSPACE']
if not os.path.isdir(artifacts):
os.makedirs(artifacts)
k8s = os.getcwd()
if not os.path.basename(k8s) == 'kubernetes':
raise ValueError(k8s)
for path in [ssh, ssh_pub, robot]:
if not os.path.isfile(var(path)):
raise IOError(path, var(path))
private = '/root/.ssh/google_compute_engine'
public = '%s.pub' % private
service = '/service-account.json'
os.environ['NODE_TEST_SCRIPT'] = script
os.environ['NODE_TEST_PRORPERTIES'] = properties
check('docker', 'pull', img) # Update image if changed
check(
'docker', 'run', '--rm=true',
'-v', '/etc/localtime:/etc/localtime:ro',
'-v', '/var/run/docker.sock:/var/run/docker.sock',
'-v', '%s:/go/src/k8s.io/kubernetes' % k8s,
'-v', '%s:/workspace/artifacts' % artifacts,
'-v', '%s:%s:ro' % (var(robot), service),
'-v', '%s:%s:ro' % (var(ssh), private),
'-v', '%s:%s:ro' % (var(ssh_pub), public),
'-e', 'GCE_USER=jenkins',
'-e', 'GOOGLE_APPLICATION_CREDENTIALS=%s' % service,
'-e', 'JENKINS_GCE_SSH_PRIVATE_KEY_FILE=%s' % private,
'-e', 'JENKINS_GCE_SSH_PUBLIC_KEY_FILE=%s' % public,
'-e', 'NODE_TEST_PROPERTIES=%s' % properties,
'-e', 'NODE_TEST_SCRIPT=%s' % script,
'-e', 'REPO_DIR=%s' % k8s, # TODO(fejta): used?
img,
)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(
'Runs kubelet tests with the specified script, properties and creds')
PARSER.add_argument(
'--ssh',
default='${JENKINS_GCE_SSH_PRIVATE_KEY_FILE}',
help='Path to .ssh/google_compute_engine keys')
PARSER.add_argument(
'--ssh-pub',
default='${JENKINS_GCE_SSH_PUBLIC_KEY_FILE}',
help='Path to pub gce ssh key')
PARSER.add_argument(
'--service-account',
default='${GOOGLE_APPLICATION_CREDENTIALS}',
help='Path to service-account.json')
PARSER.add_argument(
'--branch', default='master', help='Branch used for testing')
PARSER.add_argument(
'--properties',
default="test/e2e_node/jenkins/jenkins-ci.properties",
help='Path to a .properties file')
PARSER.add_argument(
'--script',
default='./test/e2e_node/jenkins/e2e-node-jenkins.sh',
help='Script in kubernetes/kubernetes that runs checks')
ARGS = PARSER.parse_args()
main(
ARGS.script,
ARGS.properties,
ARGS.branch,
ARGS.ssh,
ARGS.ssh_pub,
ARGS.service_account,
)
|
Python
| 0
|
@@ -2544,16 +2544,17 @@
rkspace/
+_
artifact
|
20e7453d143223ae1c95ad32ee49660ceeadf3f7
|
Prepare for 1.1.0
|
crudset/version.py
|
crudset/version.py
|
version = "1.0.0"
|
Python
| 0.000014
|
@@ -10,9 +10,13 @@
%221.
-0
+1
.0
+-dev
%22%0A
|
4510db1e8f2fe3298de395a9d8b1e0783f92c758
|
update revision
|
source/tyr/migrations/versions/1b59ffb421e4_change_max_nb_crowfly_by_mode_type.py
|
source/tyr/migrations/versions/1b59ffb421e4_change_max_nb_crowfly_by_mode_type.py
|
"""change max_nb_crowfly_by_mode to JSONB and set server_default
Revision ID: 1b59ffb421e4
Revises: 483639f1f00
Create Date: 2018-08-30 12:42:21.089095
"""
# revision identifiers, used by Alembic.
revision = '1b59ffb421e4'
down_revision = '483639f1f00'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import JSONB
from navitiacommon import default_values
import json
def upgrade():
op.drop_column('instance', 'max_nb_crowfly_by_mode')
op.add_column('instance', sa.Column('max_nb_crowfly_by_mode', JSONB,
server_default=json.dumps(default_values.max_nb_crowfly_by_mode)))
def downgrade():
op.drop_column('instance', 'max_nb_crowfly_by_mode')
op.add_column('instance', sa.Column('max_nb_crowfly_by_mode', sa.PickleType(pickler=json), nullable=True))
|
Python
| 0
|
@@ -236,27 +236,28 @@
sion = '
-483639f1f00
+105823db902c
'%0A%0Afrom
|
0e1c2dad600da595403ee893b787a29bdc38e215
|
Bump to 2.1.1
|
crudset/version.py
|
crudset/version.py
|
version = "0.2.0-dev"
|
Python
| 0.000134
|
@@ -8,15 +8,11 @@
= %22
-0.2.0-dev
+2.1.1
%22%0A
|
513c7a2f5c5fb5a8c47b3173a8d5854755f7928f
|
Use factories instead of creating instance from model
|
pylab/website/tests/test_about_page.py
|
pylab/website/tests/test_about_page.py
|
import datetime
from django_webtest import WebTest
from django.contrib.auth.models import User
from pylab.core.models import Event
class AboutPageTests(WebTest):
def setUp(self):
self.user = User.objects.create(username='u1')
def test_no_events_on_about_page(self):
resp = self.app.get('/about/')
self.assertEqual(resp.status_int, 200)
self.assertTrue(b'No events yet.' in resp.content)
def test_event_list_on_about_page(self):
Event.objects.create(
author=self.user,
starts=datetime.datetime(2015, 9, 3),
ends=datetime.datetime(2015, 9, 3),
title='Test title',
osm_map_link='http://openstreetmap.org/',
description='Test description',
)
resp = self.app.get('/about/')
self.assertEqual(resp.status_int, 200)
self.assertTrue(b'Test title' in resp.content)
|
Python
| 0
|
@@ -49,32 +49,24 @@
est%0A
+%0A
from
-django.contrib.auth
+pylab.core
.mod
@@ -80,13 +80,13 @@
ort
-User%0A
+Event
%0Afro
@@ -90,37 +90,40 @@
from pylab.core.
-model
+factorie
s import Event%0A%0A
@@ -120,16 +120,23 @@
rt Event
+Factory
%0A%0A%0Aclass
@@ -166,85 +166,8 @@
):%0A%0A
- def setUp(self):%0A self.user = User.objects.create(username='u1')%0A%0A
@@ -414,53 +414,140 @@
vent
-.objects.create(%0A author=self.user
+Factory(%0A event_type=Event.WEEKLY_MEETING,%0A title='Summer Python workshop',%0A slug='python-workshop'
,%0A
@@ -579,36 +579,44 @@
.datetime(2015,
-9
+7
, 3
+0, 18, 0
),%0A e
@@ -647,143 +647,21 @@
15,
-9, 3),%0A title='Test title',%0A osm_map_link='http://openstreetmap.org/',%0A description='Test description'
+7, 30, 20, 0)
,%0A
@@ -668,16 +668,17 @@
)%0A
+%0A
@@ -785,18 +785,30 @@
e(b'
-Test title
+Summer Python workshop
' in
|
244649b9908de64b97498d3d29d7718f487f84a3
|
Fix test
|
test/command_line/tst_stills_process.py
|
test/command_line/tst_stills_process.py
|
from __future__ import division
from dials.array_family import flex # import dependency
class Test(object):
def __init__(self):
from os.path import join
import libtbx.load_env
try:
dials_regression = libtbx.env.dist_path('dials_regression')
except KeyError, e:
print 'SKIP: dials_regression not configured'
exit(0)
self.path = join(dials_regression, "image_examples/SACLA_MPCCD_Cheetah")
def run(self):
self.test_sacla_h5()
def test_sacla_h5(self):
from os.path import join, exists
from libtbx import easy_run
import os
from uuid import uuid4
dirname ='tmp_%s' % uuid4().hex
os.mkdir(dirname)
os.chdir(dirname)
assert exists(join(self.path, 'run266702-0-subset.h5'))
f = open("process.phil", 'w')
f.write("""
indexing {
known_symmetry {
space_group = P43212
unit_cell = 78.9 78.9 38.1 90 90 90
}
method = real_space_grid_search
refinement_protocol.d_min_start = 2.2
}
spotfinder {
filter.min_spot_size = 2
}
refinement {
parameterisation {
beam.fix = all
detector.fix_list = Dist,Tau1
auto_reduction {
action = fix
min_nref_per_parameter = 1
}
crystal {
unit_cell {
restraints {
tie_to_target {
values = 78.9,78.9,38.1,90,90,90
sigmas = 1,1,1,0,0,0
apply_to_all = True
}
}
}
}
}
}
integration {
integrator = stills
profile.fitting = False
background {
algorithm = simple
simple {
model.algorithm = linear2d
outlier.algorithm = tukey
}
}
}
profile {
gaussian_rs {
min_spots.overall = 0
}
}
""")
f.close()
# Call dials.stills_process
result = easy_run.fully_buffered([
'dials.stills_process',
join(self.path, 'run266702-0-subset.h5'),
'process.phil',
]).raise_if_errors()
result.show_stdout()
import cPickle as pickle
for result, n_refls in zip(["idx-run266702-0-subset_00000_integrated.pickle",
"idx-run266702-0-subset_00001_integrated.pickle"],
[range(109,114), range(80,85)]): # large ranges to handle platform-specific differences
table = pickle.load(open(result, 'rb'))
assert len(table) in n_refls, len(table)
assert 'id' in table
assert (table['id'] == 0).count(False) == 0
print 'OK'
if __name__ == '__main__':
from dials.test import cd_auto
with cd_auto(__file__):
test = Test()
test.run()
|
Python
| 0.000004
|
@@ -2232,16 +2232,116 @@
kle%0A
+# Frame 1 no longer indexing after cctbx r25607 which made wavelengths be on a per-image basis%0A #
for resu
@@ -2410,24 +2410,25 @@
ickle%22,%0A
+#
@@ -2494,24 +2494,25 @@
ckle%22%5D,%0A
+#
@@ -2615,16 +2615,205 @@
erences%0A
+ for result, n_refls in zip(%5B%22idx-run266702-0-subset_00000_integrated.pickle%22%5D,%0A %5Brange(109,114)%5D): # large ranges to handle platform-specific differences%0A
ta
|
c9ba9b8796de0802f7b941245cc41eb7d59ce7c8
|
Normalize output path to cp.py
|
build/cp.py
|
build/cp.py
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Copy a file.
This module works much like the cp posix command - it takes 2 arguments:
(src, dst) and copies the file with path |src| to |dst|.
"""
import shutil
import sys
def Main(src, dst):
# Use copy instead of copyfile to ensure the executable bit is copied.
return shutil.copy(src, dst)
if __name__ == '__main__':
sys.exit(Main(sys.argv[1], sys.argv[2]))
|
Python
| 0.999758
|
@@ -334,16 +334,26 @@
%7C.%0A%22%22%22%0A%0A
+import os%0A
import s
@@ -490,20 +490,38 @@
py(src,
+os.path.normpath(
dst)
+)
%0A%0A%0Aif __
|
55c00fd6684d6fb378326026475945aea59bfa0b
|
Fix iterator to list
|
pyosmo/end_conditions/step_coverage.py
|
pyosmo/end_conditions/step_coverage.py
|
from pyosmo.end_conditions.base import OsmoEndCondition
class StepCoverage(OsmoEndCondition):
"""
Stops testing when count is filled
"""
def __init__(self, coverage_percent):
if coverage_percent > 100 or coverage_percent < 0:
raise Exception(f"Coverage is {coverage_percent} and it need to be >0 and <1")
self.coverage = coverage_percent / 100
def end_test(self, history, model):
""" Stops test case when defined number of test steps are executed """
all_steps = model.all_steps
steps_used = 0
for step in all_steps:
if history.current_test_case.get_step_count(step) > 0:
steps_used += 1
current_coverage = steps_used / len(all_steps)
return current_coverage >= self.coverage
def end_suite(self, history, model):
""" Stops test suite when defined number of test cases are executed """
all_steps = model.all_steps
steps_used = 0
for step in all_steps:
if history.get_step_count(step) > 0:
steps_used += 1
current_coverage = steps_used / len(all_steps)
return current_coverage >= self.coverage
|
Python
| 0.000084
|
@@ -516,32 +516,37 @@
all_steps =
+list(
model.all_steps%0A
@@ -536,32 +536,33 @@
(model.all_steps
+)
%0A steps_u
|
589a7803630c1e8809e917ff8d4c846fef3f86eb
|
Fix "invalid message size" test
|
test/functional/p2p_invalid_messages.py
|
test/functional/p2p_invalid_messages.py
|
#!/usr/bin/env python3
# Copyright (c) 2015-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node responses to invalid network messages."""
import asyncio
import struct
from test_framework import messages
from test_framework.mininode import (
NetworkThread,
P2PDataStore,
P2PInterface,
)
from test_framework.test_framework import PivxTestFramework
MSG_LIMIT = 4 * 1000 * 1000 # 4MB, per MAX_PROTOCOL_MESSAGE_LENGTH
VALID_DATA_LIMIT = MSG_LIMIT - 5 # Account for the 5-byte length prefix
class msg_unrecognized:
"""Nonsensical message. Modeled after similar types in test_framework.messages."""
command = b'badmsg'
def __init__(self, *, str_data):
self.str_data = str_data.encode() if not isinstance(str_data, bytes) else str_data
def serialize(self):
return messages.ser_string(self.str_data)
def __repr__(self):
return "{}(data={})".format(self.command, self.str_data)
class InvalidMessagesTest(PivxTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
"""
. Test msg header
0. Send a bunch of large (4MB) messages of an unrecognized type. Check to see
that it isn't an effective DoS against the node.
"""
self.test_magic_bytes()
self.test_checksum()
self.test_size()
self.test_command()
self.test_large_inv()
node = self.nodes[0]
self.node = node
node.add_p2p_connection(P2PDataStore())
conn2 = node.add_p2p_connection(P2PDataStore())
#
# 0.
#
# Send as large a message as is valid, ensure we aren't disconnected but
# also can't exhaust resources.
#
msg_at_size = msg_unrecognized(str_data="b" * VALID_DATA_LIMIT)
assert len(msg_at_size.serialize()) == MSG_LIMIT
self.log.info("Sending a bunch of large, junk messages to test memory exhaustion. May take a bit...")
# Run a bunch of times to test for memory exhaustion.
for _ in range(80):
node.p2p.send_message(msg_at_size)
# Check that, even though the node is being hammered by nonsense from one
# connection, it can still service other peers in a timely way.
for _ in range(20):
conn2.sync_with_ping(timeout=2)
# Peer 1, despite serving up a bunch of nonsense, should still be connected.
self.log.info("Waiting for node to drop junk messages.")
node.p2p.sync_with_ping(timeout=320)
assert node.p2p.is_connected
def test_magic_bytes(self):
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
async def swap_magic_bytes():
conn._on_data = lambda: None # Need to ignore all incoming messages from now, since they come with "invalid" magic bytes
conn.magic_bytes = b'\x00\x11\x22\x32'
# Call .result() to block until the atomic swap is complete, otherwise
# we might run into races later on
asyncio.run_coroutine_threadsafe(swap_magic_bytes(), NetworkThread.network_event_loop).result()
with self.nodes[0].assert_debug_log(['PROCESSMESSAGE: INVALID MESSAGESTART ping peer=1']):
conn.send_message(messages.msg_ping(nonce=0xff))
conn.wait_for_disconnect(timeout=1)
self.nodes[0].disconnect_p2ps()
def test_checksum(self):
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
with self.nodes[0].assert_debug_log(['ProcessMessages(badmsg, 2 bytes): CHECKSUM ERROR expected 78df0a04 was ffffffff']):
msg = conn.build_message(msg_unrecognized(str_data="d"))
cut_len = (
4 + # magic
12 + # command
4 #len
)
# modify checksum
msg = msg[:cut_len] + b'\xff' * 4 + msg[cut_len + 4:]
self.nodes[0].p2p.send_raw_message(msg)
conn.sync_with_ping(timeout=1)
self.nodes[0].disconnect_p2ps()
def test_size(self):
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
with self.nodes[0].assert_debug_log(['']):
msg = conn.build_message(msg_unrecognized(str_data="d"))
cut_len = (
4 + # magic
12 # command
)
# modify len to MAX_SIZE + 1
msg = msg[:cut_len] + struct.pack("<I", 0x02000000 + 1) + msg[cut_len + 4:]
self.nodes[0].p2p.send_raw_message(msg)
conn.wait_for_disconnect(timeout=1)
self.nodes[0].disconnect_p2ps()
def test_command(self):
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
with self.nodes[0].assert_debug_log(['PROCESSMESSAGE: ERRORS IN HEADER']):
msg = msg_unrecognized(str_data="d")
msg = conn.build_message(msg)
# Modify command
msg = msg[:7] + b'\x00' + msg[7 + 1:]
self.nodes[0].p2p.send_raw_message(msg)
conn.sync_with_ping(timeout=1)
self.nodes[0].disconnect_p2ps()
def test_large_inv(self): # future: add Misbehaving value check, first invalid message raise it to 20, second to 40.
conn = self.nodes[0].add_p2p_connection(P2PInterface())
with self.nodes[0].assert_debug_log(['ERROR: peer=5 message inv size() = 50001']):
msg = messages.msg_inv([messages.CInv(1, 1)] * 50001)
conn.send_and_ping(msg)
with self.nodes[0].assert_debug_log(['ERROR: peer=5 message getdata size() = 50001']):
msg = messages.msg_getdata([messages.CInv(1, 1)] * 50001)
conn.send_and_ping(msg)
self.nodes[0].disconnect_p2ps()
if __name__ == '__main__':
InvalidMessagesTest().main()
|
Python
| 0.000009
|
@@ -280,22 +280,8 @@
ncio
-%0Aimport struct
%0A%0Afr
@@ -4325,202 +4325,121 @@
-msg = conn.build_message(msg_unrecognized(str_data=%22d%22))%0A cut_len = (%0A 4 + # magic%0A 12 # command%0A )%0A # modify len to MAX_SIZE
+# Create a message with oversized payload%0A msg = msg_unrecognized(str_data=%22d%22*(VALID_DATA_LIMIT
+ 1
+))
%0A
@@ -4457,77 +4457,31 @@
g =
-msg%5B:cut_len%5D + struct.pack(%22%3CI%22, 0x02000000 + 1) + msg%5Bcut_len + 4:%5D
+conn.build_message(msg)
%0A
|
1a40afa75693f0c2ae3b2eaac62ff9ca6bb02488
|
Add sqlite schema
|
synapse/storage/schema/delta/24/fts.py
|
synapse/storage/schema/delta/24/fts.py
|
# Copyright 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from synapse.storage import get_statements
from synapse.storage.engines import PostgresEngine
logger = logging.getLogger(__name__)
POSTGRES_SQL = """
CREATE TABLE event_search (
event_id TEXT,
room_id TEXT,
key TEXT,
vector tsvector
);
INSERT INTO event_search SELECT
event_id, room_id, 'content.body',
to_tsvector('english', json::json->'content'->>'body')
FROM events NATURAL JOIN event_json WHERE type = 'm.room.message';
INSERT INTO event_search SELECT
event_id, room_id, 'content.name',
to_tsvector('english', json::json->'content'->>'name')
FROM events NATURAL JOIN event_json WHERE type = 'm.room.name';
INSERT INTO event_search SELECT
event_id, room_id, 'content.topic',
to_tsvector('english', json::json->'content'->>'topic')
FROM events NATURAL JOIN event_json WHERE type = 'm.room.topic';
CREATE INDEX event_search_fts_idx ON event_search USING gin(vector);
CREATE INDEX event_search_ev_idx ON event_search(event_id);
"""
def run_upgrade(cur, database_engine, *args, **kwargs):
if not isinstance(database_engine, PostgresEngine):
# We only support FTS for postgres currently.
return
for statement in get_statements(POSTGRES_SQL.splitlines()):
cur.execute(statement)
|
Python
| 0.000005
|
@@ -681,16 +681,45 @@
esEngine
+, Sqlite3Engine%0A%0Aimport ujson
%0A%0Alogger
@@ -1606,18 +1606,259 @@
nt_id);%0A
-%22%22
+CREATE INDEX event_search_ev_ridx ON event_search(room_id);%0A%22%22%22%0A%0A%0ASQLITE_TABLE = (%0A %22CREATE VIRTUAL TABLE event_search USING fts3 ( event_id, room_id, key, value)%22%0A)%0ASQLITE_INDEX = %22CREATE INDEX event_search_ev_idx ON event_search(event_id)
%22%0A%0A%0Adef
@@ -1916,20 +1916,16 @@
%0A if
-not
isinstan
@@ -1973,142 +1973,1472 @@
-# We only support FTS for postg
+for statement in get_statements(POSTGRES_SQL.splitlines()):%0A cur.execute(statement)%0A return%0A%0A if isinstance(database_engine, Sqlite3Engine):%0A cur.execute(SQLITE_TABLE)%0A%0A rowid = -1%0A while True:%0A cur.execute(%0A %22SELECT rowid, json FROM event_json%22%0A %22 WHERE rowid %3E ?%22%0A %22 ORDER BY rowid ASC LIMIT 100%22,%0A (rowid,)%0A )%0A%0A
res
+ =
cur
-rently.%0A return%0A%0A for statement in get_statements(POSTGRES_SQL.splitlines()):%0A
+.fetchall()%0A%0A if not res:%0A break%0A%0A events = %5B%0A ujson.loads(js)%0A for _, js in res%0A %5D%0A%0A rowid = max(rid for rid, _ in res)%0A%0A rows = %5B%5D%0A for ev in events:%0A if ev%5B%22type%22%5D == %22m.room.message%22:%0A rows.append((%0A ev%5B%22event_id%22%5D, ev%5B%22room_id%22%5D, %22content.body%22,%0A ev%5B%22content%22%5D%5B%22body%22%5D%0A ))%0A if ev%5B%22type%22%5D == %22m.room.name%22:%0A rows.append((%0A ev%5B%22event_id%22%5D, ev%5B%22room_id%22%5D, %22content.name%22,%0A ev%5B%22content%22%5D%5B%22name%22%5D%0A ))%0A if ev%5B%22type%22%5D == %22m.room.topic%22:%0A rows.append((%0A ev%5B%22event_id%22%5D, ev%5B%22room_id%22%5D, %22content.topic%22,%0A ev%5B%22content%22%5D%5B%22topic%22%5D%0A ))%0A%0A if rows:%0A logger.info(rows)%0A
cur.
@@ -3425,32 +3425,34 @@
)%0A
+
cur.execute(stat
@@ -3438,28 +3438,222 @@
cur.execute
-(statement
+many(%0A %22INSERT INTO event_search (event_id, room_id, key, value)%22%0A %22 VALUES (?,?,?,?)%22,%0A rows%0A )%0A%0A # cur.execute(SQLITE_INDEX
)%0A
|
544782d2da7fa918133c70ea4e9d0ffe918dcdb4
|
Fix documentation for eval and change default display to False
|
kindred/evalFunctions.py
|
kindred/evalFunctions.py
|
import kindred
from collections import Counter
def evaluate(goldCorpus,testCorpus,metric='f1score',display=True):
""" Compares the gold corpus with the test corpus and calculate appropriate metrics.
:param goldCorpus: The gold standard set of data
:type goldCorpus: kindred.Corpus
:param testCorpus: The test set for comparison
:type testCorpus: kindred.Corpus
:param metric: Which metric to use (precision/recall/f1score)
:type metric: str
:return: The value of the corresponding metric
:rtype: float
"""
assert isinstance(goldCorpus,kindred.Corpus)
assert isinstance(testCorpus,kindred.Corpus)
TPs,FPs,FNs = Counter(),Counter(),Counter()
#goldTuples = [ ]
#for doc in goldCorpus.documents:
# relTuples = [ (r.relationType,tuple(r.entityIDs)) for r in doc.getRelations() ]
# goldTuples += relTuples
goldTuples = [ (r.relationType,tuple(r.entityIDs)) for r in goldCorpus.getRelations() ]
testTuples = [ (r.relationType,tuple(r.entityIDs)) for r in testCorpus.getRelations() ]
totalSet = set(goldTuples + testTuples)
for relation in totalSet:
inGold = relation in goldTuples
inTest = relation in testTuples
relType = relation[0]
if inGold and inTest:
TPs[relType] += 1
elif inGold:
FNs[relType] += 1
elif inTest:
FPs[relType] += 1
sortedRelTypes = sorted( list(set( [relation[0] for relation in totalSet] )))
maxLen = max( [len(rt) for rt in sortedRelTypes ] )
formatString = '%-' + str(maxLen) + 's\tTP:%d FP:%d FN:%d\tP:%f R:%f F1:%f'
for relType in sortedRelTypes:
TP,FP,FN = TPs[relType],FPs[relType],FNs[relType]
precision = 0.0 if (TP+FP) == 0 else TP / float(TP+FP)
recall = 0.0 if (TP+FN) == 0 else TP / float(TP+FN)
f1score = 0.0 if precision==0 or recall == 0 else 2 * (precision*recall) / (precision+recall)
if display:
print(formatString % (relType,TP,FP,FN,precision,recall,f1score))
TP,FP,FN = sum(TPs.values()),sum(FPs.values()),sum(FNs.values())
precision = 0.0 if (TP+FP) == 0 else TP / float(TP+FP)
recall = 0.0 if (TP+FN) == 0 else TP / float(TP+FN)
f1score = 0.0 if precision==0 or recall == 0 else 2 * (precision*recall) / (precision+recall)
if display:
print("-"*50)
print(formatString % ("All",TP,FP,FN,precision,recall,f1score))
if metric == 'f1score':
return f1score
elif metric == 'precision':
return precision
elif metric == 'recall':
return recall
elif metric == 'all':
return precision,recall,f1score
else:
raise RuntimeError('Unknown metric: %s' % metric)
|
Python
| 0.003999
|
@@ -106,11 +106,12 @@
lay=
-Tru
+Fals
e):%0A
@@ -252,42 +252,8 @@
ata%0A
-%09:type goldCorpus: kindred.Corpus%0A
%09:pa
@@ -300,42 +300,8 @@
son%0A
-%09:type testCorpus: kindred.Corpus%0A
%09:pa
@@ -358,16 +358,214 @@
f1score)
+. 'all' will provide all three as a tuple%0A%09:param display: Whether to print (to stdout) specific statistics for each relation type%0A%09:type goldCorpus: kindred.Corpus%0A%09:type testCorpus: kindred.Corpus
%0A%09:type
@@ -576,16 +576,37 @@
ic: str%0A
+%09:type display: bool%0A
%09:return
@@ -644,16 +644,29 @@
g metric
+ (or metrics)
%0A%09:rtype
@@ -672,16 +672,37 @@
e: float
+ (or tuple of floats)
%0A%09%22%22%22%0A%0A%09
|
04303628ff68a1fc6cc978dcd8ecb9286732a24b
|
Update sfp_azureblobstorage.py
|
modules/sfp_azureblobstorage.py
|
modules/sfp_azureblobstorage.py
|
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_azureblobstorage
# Purpose: SpiderFoot plug-in for identifying potential Azure blobs related
# to the target.
#
# Author: Steve Micallef <steve@binarypool.com>
#
# Created: 14/07/2019
# Copyright: (c) Steve Micallef 2019
# Licence: GPL
# -------------------------------------------------------------------------------
import threading
import time
from sflib import SpiderFoot, SpiderFootPlugin, SpiderFootEvent
class sfp_azureblobstorage(SpiderFootPlugin):
"""Azure Blob Finder:Footprint,Passive:Crawling and Scanning::Search for potential Azure blobs associated with the target and attempt to list their contents."""
meta = {
'name': "Azure Blob Finder",
'summary': "Search for potential Azure blobs associated with the target and attempt to list their contents.",
'flags': [ "" ],
'useCases': [ "Footprint", "Passive" ],
'categories': [ "Crawling and Scanning" ],
'dataSource': {
'website': "https://azure.microsoft.com/en-in/services/storage/blobs/",
'model': "COMMERCIAL_ONLY",
'references': [
"https://docs.microsoft.com/en-in/azure/storage/blobs/storage-quickstart-blobs-dotnet",
"https://azure.microsoft.com/en-in/pricing/details/storage/",
"https://azure.microsoft.com/en-in/updates/?product=storage",
"https://azure.microsoft.com/en-in/features/storage-explorer/"
],
'favIcon': "https://azurecomcdn.azureedge.net/cvt-1d5d83e09d2171261e69bc5faba5f3018c1bf1c590f0e666d26c9bc9ef7a561b/images/icon/favicon.ico",
'logo': "",
'description': "Massively scalable object storage for unstructured data.\n"
"Get scalable, cost-effective cloud storage for all your unstructured data. "
"Pay only for what you use and save money compared with on-premises storage options. "
"Choose from among four storage tiers, depending on how often you will access the data. "
"Store performance-sensitive data in Premium, frequently accessed data in "
"Hot, infrequently accessed data in Cool and rarely accessed data in Archive.\n.",
}
}
# Default options
opts = {
"suffixes": "test,dev,web,beta,bucket,space,files,content,data,prod,staging,production,stage,app,media,development,-test,-dev,-web,-beta,-bucket,-space,-files,-content,-data,-prod,-staging,-production,-stage,-app,-media,-development",
"_maxthreads": 20
}
# Option descriptions
optdescs = {
"suffixes": "List of suffixes to append to domains tried as blob storage names"
}
results = None
s3results = None
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.s3results = self.tempStorage()
self.results = self.tempStorage()
self.lock = threading.Lock()
for opt in list(userOpts.keys()):
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
def watchedEvents(self):
return ["DOMAIN_NAME", "LINKED_URL_EXTERNAL"]
# What events this module produces
# This is to support the end user in selecting modules based on events
# produced.
def producedEvents(self):
return ["CLOUD_STORAGE_BUCKET"]
def checkSite(self, url):
res = self.sf.fetchUrl(url, timeout=10, useragent="SpiderFoot", noLog=True)
if res['code']:
with self.lock:
self.s3results[url] = True
def threadSites(self, siteList):
ret = list()
self.s3results = dict()
running = True
i = 0
t = []
for site in siteList:
if self.checkForStop():
return None
self.sf.info("Spawning thread to check bucket: " + site)
t.append(threading.Thread(name='thread_sfp_azureblobstorages_' + site,
target=self.checkSite, args=(site,)))
t[i].start()
i += 1
# Block until all threads are finished
while running:
found = False
for rt in threading.enumerate():
if rt.name.startswith("thread_sfp_azureblobstorages_"):
found = True
if not found:
running = False
time.sleep(0.25)
# Return once the scanning has completed
return self.s3results
def batchSites(self, sites):
i = 0
res = list()
siteList = list()
for site in sites:
if i >= self.opts['_maxthreads']:
data = self.threadSites(siteList)
if data == None:
return res
for ret in list(data.keys()):
if data[ret]:
res.append(ret)
i = 0
siteList = list()
siteList.append(site)
i += 1
return res
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
if eventData in self.results:
return None
else:
self.results[eventData] = True
self.sf.debug("Received event, %s, from %s" % (eventName, srcModuleName))
if eventName == "LINKED_URL_EXTERNAL":
if ".blob.core.windows.net" in eventData:
b = self.sf.urlFQDN(eventData)
evt = SpiderFootEvent("CLOUD_STORAGE_BUCKET", b, self.__name__, event)
self.notifyListeners(evt)
return None
targets = [ eventData.replace('.', '') ]
kw = self.sf.domainKeyword(eventData, self.opts['_internettlds'])
if kw:
targets.append(kw)
urls = list()
for t in targets:
suffixes = [''] + self.opts['suffixes'].split(',')
for s in suffixes:
if self.checkForStop():
return None
b = t + s + ".blob.core.windows.net"
url = "https://" + b
urls.append(url)
# Batch the scans
ret = self.batchSites(urls)
for b in ret:
evt = SpiderFootEvent("CLOUD_STORAGE_BUCKET", b, self.__name__, event)
self.notifyListeners(evt)
# End of sfp_azureblobstorage class
|
Python
| 0.000001
|
@@ -1178,23 +1178,29 @@
': %22
-COMMERCIAL_ONLY
+FREE_NOAUTH_UNLIMITED
%22,%0A
|
95768a09d0bf7f6f3576fc28568c3b7897467541
|
Add license
|
src/python/tensorflow_cloud/core/tests/examples/multi_file_example/scale_model.py
|
src/python/tensorflow_cloud/core/tests/examples/multi_file_example/scale_model.py
|
import tensorflow_cloud as tfc
tfc.run(
entry_point="train_model.py",
requirements_txt="requirements.txt",
stream_logs=True,
)
|
Python
| 0
|
@@ -1,8 +1,619 @@
+# Copyright 2020 Google LLC. All Rights Reserved.%0D%0A#%0D%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0D%0A# you may not use this file except in compliance with the License.%0D%0A# You may obtain a copy of the License at%0D%0A#%0D%0A# http://www.apache.org/licenses/LICENSE-2.0%0D%0A#%0D%0A# Unless required by applicable law or agreed to in writing, software%0D%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0D%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0D%0A# See the License for the specific language governing permissions and%0D%0A# limitations under the License.%0D%0A%0D%0A
import t
|
6c2e00084fb11a6d37d55fed247d2e7b6a373823
|
Fix dependencies.
|
partner_contact_nationality/__openerp__.py
|
partner_contact_nationality/__openerp__.py
|
# -*- encoding: utf-8 -*-
# Odoo, Open Source Management Solution
# Copyright (C) 2014-2015 Grupo ESOC <www.grupoesoc.es>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
{
"name": "Contact's nationality",
"version": "1.0.0",
"author": "Odoo Community Association (OCA)",
"category": "Customer Relationship Management",
"website": "https://odoo-community.org/",
"depends": [
"base"
],
"data": [
"views/res_partner.xml",
],
}
|
Python
| 0
|
@@ -1016,13 +1016,30 @@
%22
+partner_contact_
base%22
+,
%0A
|
fb0d099362086c99beaa08e9b8b5934a0534d1c5
|
Revert "test_onroad: increase driverState timing to avoid random failures"
|
selfdrive/test/test_onroad.py
|
selfdrive/test/test_onroad.py
|
#!/usr/bin/env python3
import json
import os
import subprocess
import time
import numpy as np
import unittest
from collections import Counter
from pathlib import Path
import cereal.messaging as messaging
from cereal.services import service_list
from common.basedir import BASEDIR
from common.timeout import Timeout
from common.params import Params
from selfdrive.hardware import TICI
from selfdrive.loggerd.config import ROOT
from selfdrive.test.helpers import set_params_enabled
from tools.lib.logreader import LogReader
# Baseline CPU usage by process
PROCS = {
"selfdrive.controls.controlsd": 50.0,
"./loggerd": 45.0,
"./locationd": 9.1,
"selfdrive.controls.plannerd": 20.0,
"./_ui": 15.0,
"selfdrive.locationd.paramsd": 9.1,
"./camerad": 7.07,
"./_sensord": 6.17,
"selfdrive.controls.radard": 5.67,
"./_modeld": 4.48,
"./boardd": 3.63,
"./_dmonitoringmodeld": 2.67,
"selfdrive.thermald.thermald": 2.41,
"selfdrive.locationd.calibrationd": 2.0,
"./_soundd": 2.0,
"selfdrive.monitoring.dmonitoringd": 1.90,
"./proclogd": 1.54,
"selfdrive.logmessaged": 0.2,
"./clocksd": 0.02,
"./ubloxd": 0.02,
"selfdrive.tombstoned": 0,
"./logcatd": 0,
}
if TICI:
PROCS.update({
"./loggerd": 60.0,
"selfdrive.controls.controlsd": 26.0,
"./camerad": 31.0,
"./_ui": 21.0,
"selfdrive.controls.plannerd": 12.0,
"selfdrive.locationd.paramsd": 5.0,
"./_dmonitoringmodeld": 10.0,
"selfdrive.thermald.thermald": 1.5,
})
def cputime_total(ct):
return ct.cpuUser + ct.cpuSystem + ct.cpuChildrenUser + ct.cpuChildrenSystem
def check_cpu_usage(first_proc, last_proc):
result = "------------------------------------------------\n"
result += "------------------ CPU Usage -------------------\n"
result += "------------------------------------------------\n"
r = True
dt = (last_proc.logMonoTime - first_proc.logMonoTime) / 1e9
for proc_name, normal_cpu_usage in PROCS.items():
first, last = None, None
try:
first = [p for p in first_proc.procLog.procs if proc_name in p.cmdline][0]
last = [p for p in last_proc.procLog.procs if proc_name in p.cmdline][0]
cpu_time = cputime_total(last) - cputime_total(first)
cpu_usage = cpu_time / dt * 100.
if cpu_usage > max(normal_cpu_usage * 1.1, normal_cpu_usage + 5.0):
# cpu usage is high while playing sounds
if proc_name == "./_soundd" and cpu_usage < 25.:
continue
result += f"Warning {proc_name} using more CPU than normal\n"
r = False
elif cpu_usage < min(normal_cpu_usage * 0.65, max(normal_cpu_usage - 1.0, 0.0)):
result += f"Warning {proc_name} using less CPU than normal\n"
r = False
result += f"{proc_name.ljust(35)} {cpu_usage:.2f}%\n"
except IndexError:
result += f"{proc_name.ljust(35)} NO METRICS FOUND {first=} {last=}\n"
r = False
result += "------------------------------------------------\n"
print(result)
return r
class TestOnroad(unittest.TestCase):
@classmethod
def setUpClass(cls):
os.environ['SKIP_FW_QUERY'] = "1"
os.environ['FINGERPRINT'] = "TOYOTA COROLLA TSS2 2019"
set_params_enabled()
# Make sure athena isn't running
Params().delete("DongleId")
Params().delete("AthenadPid")
os.system("pkill -9 -f athena")
logger_root = Path(ROOT)
initial_segments = set()
if logger_root.exists():
initial_segments = set(Path(ROOT).iterdir())
# start manager and run openpilot for a minute
try:
manager_path = os.path.join(BASEDIR, "selfdrive/manager/manager.py")
proc = subprocess.Popen(["python", manager_path])
sm = messaging.SubMaster(['carState'])
with Timeout(150, "controls didn't start"):
while sm.rcv_frame['carState'] < 0:
sm.update(1000)
# make sure we get at least two full segments
cls.segments = []
with Timeout(300, "timed out waiting for logs"):
while len(cls.segments) < 3:
new_paths = set()
if logger_root.exists():
new_paths = set(logger_root.iterdir()) - initial_segments
segs = [p for p in new_paths if "--" in str(p)]
cls.segments = sorted(segs, key=lambda s: int(str(s).rsplit('--')[-1]))
time.sleep(5)
finally:
proc.terminate()
if proc.wait(60) is None:
proc.kill()
cls.lr = list(LogReader(os.path.join(str(cls.segments[1]), "rlog.bz2")))
def test_cloudlog_size(self):
msgs = [m for m in self.lr if m.which() == 'logMessage']
total_size = sum(len(m.as_builder().to_bytes()) for m in msgs)
self.assertLess(total_size, 3.5e5)
cnt = Counter([json.loads(m.logMessage)['filename'] for m in msgs])
big_logs = [f for f, n in cnt.most_common(3) if n / sum(cnt.values()) > 30.]
self.assertEqual(len(big_logs), 0, f"Log spam: {big_logs}")
def test_cpu_usage(self):
proclogs = [m for m in self.lr if m.which() == 'procLog']
self.assertGreater(len(proclogs), service_list['procLog'].frequency * 45, "insufficient samples")
cpu_ok = check_cpu_usage(proclogs[0], proclogs[-1])
self.assertTrue(cpu_ok)
def test_model_timings(self):
#TODO this went up when plannerd cpu usage increased, why?
cfgs = [("modelV2", 0.035, 0.03), ("driverState", 0.03, 0.025)]
for (s, instant_max, avg_max) in cfgs:
ts = [getattr(getattr(m, s), "modelExecutionTime") for m in self.lr if m.which() == s]
self.assertLess(min(ts), instant_max, f"high '{s}' execution time: {min(ts)}")
self.assertLess(np.mean(ts), avg_max, f"high avg '{s}' execution time: {np.mean(ts)}")
if __name__ == "__main__":
unittest.main()
|
Python
| 0
|
@@ -5297,16 +5297,17 @@
0.0
-3
+25
, 0.02
-5
+1
)%5D%0A
|
86d604f69ac0e42fb05ec84c3b20da03c7d7d109
|
Fix lint error
|
scripts/download_oai_harvest.py
|
scripts/download_oai_harvest.py
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
Standalone script for downloading the OAI-PMH for Calm.
The final output is dumped into a JSON file ``calm_records.json``, which
can be useful for doing bulk analysis of the Calm data.
"""
import collections
import json
import re
from urllib.parse import unquote
import requests
OAI_URL = 'http://archives.wellcomelibrary.org/oai/OAI.aspx'
RESUMPTION_TOKEN_RE = re.compile(
r'<resumptionToken[^>]*>(?P<token>[^<]+)</resumptionToken>'
)
STREAM_PARSER_RE = re.compile(
r'<(?P<name>[A-Za-z0-9]+) urlencoded=\"(?P<value>[^\"]*)\"/?>'
)
def fetch_calm_records():
params = {
'verb': 'ListRecords',
'metadataPrefix': 'calm_xml'
}
while True:
r = requests.get(OAI_URL, params=params)
# We can't parse the Calm "XML" with an XML parser, because it isn't
# actually valid XML. Instead the values are URL-encoded as an
# attribute on an XML-like tag, so we unpick those with a regex
# and store the values that way.
records = r.text.split('</record>')
records.pop()
for rec in records:
d = collections.defaultdict(list)
for m in STREAM_PARSER_RE.finditer(rec):
d[m.group('name')].append(unquote(m.group('value')))
yield dict(d)
# Results from the OAI harvests are paginated, to prevent records
# changing order under our feet. The presence of a `resumptionToken`
# tells us how to access the next page.
try:
params['resumptionToken'] = RESUMPTION_TOKEN_RE.search(r.text).group('token')
except Exception as e:
raise StopIteration
if 'resumptionToken' in params and 'metadataPrefix' in params:
del params['metadataPrefix']
all_records = []
for r in fetch_calm_records():
all_records.append(r)
if len(all_records) % 1000 == 0:
print(f'{len(d)}...')
json.dump(
all_records,
open('calm_records.json', 'w'),
indent=2, sort_keys=True
)
|
Python
| 0.000035
|
@@ -1658,13 +1658,8 @@
tion
- as e
:%0A
@@ -1935,17 +1935,27 @@
(f'%7Blen(
-d
+all_records
)%7D...')%0A
|
96b3904e64617d6c04b0e44506482cd264e28132
|
use common method for permission checking
|
osmaxx-py/osmaxx/contrib/auth/frontend_permissions.py
|
osmaxx-py/osmaxx/contrib/auth/frontend_permissions.py
|
from django.conf import settings
from django.contrib.auth.decorators import login_required, user_passes_test
from django.core.urlresolvers import reverse_lazy
from django.utils.decorators import method_decorator
from rest_framework import permissions
FRONTEND_USER_GROUP = settings.OSMAXX_FRONTEND_USER_GROUP
def frontend_access_required(function=None):
"""
Decorator for views that checks that the user has the correct access rights,
redirecting to the information page if necessary.
"""
access_denied_info_url = reverse_lazy('excerptexport:access_denied')
actual_decorator = user_passes_test(
_may_user_access_osmaxx_frontend,
login_url=access_denied_info_url
)
if function:
return actual_decorator(function)
return actual_decorator
def _may_user_access_osmaxx_frontend(user):
"""
Actual test to check if the user is in the frontend user group,
to give access or deny it. Note: Admins have superpowers.
"""
return user.has_perm('excerptexport.add_extractionorder')
class LoginRequiredMixin(object):
"""
Login required Mixin for Class Based Views.
"""
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(*args, **kwargs)
class FrontendAccessRequiredMixin(object):
"""
Frontend Access Check Mixin for Class Based Views.
"""
@method_decorator(frontend_access_required)
def dispatch(self, *args, **kwargs):
return super(FrontendAccessRequiredMixin, self).dispatch(*args, **kwargs)
class AuthenticatedAndAccessPermission(permissions.BasePermission):
"""
Allows access only to authenticated users with frontend permissions.
"""
def has_permission(self, request, view):
return request.user.is_authenticated() and _may_user_access_osmaxx_frontend(request.user)
class HasBBoxAccessPermission(permissions.BasePermission):
message = 'Accessing this bounding box is not allowed.'
def has_object_permission(self, request, view, obj):
return obj.excerpt.is_public or obj.excerpt.owner == request.user
class HasExcerptAccessPermission(permissions.BasePermission):
message = 'Accessing this excerpt is not allowed.'
def has_object_permission(self, request, view, obj):
return obj.is_public or obj.owner == request.user
|
Python
| 0
|
@@ -305,16 +305,376 @@
GROUP%0A%0A%0A
+def _may_user_access_osmaxx_frontend(user):%0A %22%22%22%0A Actual test to check if the user is in the frontend user group,%0A to give access or deny it. Note: Admins have superpowers.%0A %22%22%22%0A return user.has_perm('excerptexport.add_extractionorder')%0A%0A%0Adef _may_user_access_this_excerpt(user, excerpt):%0A return excerpt.is_public or excerpt.owner == user%0A%0A%0A
def fron
@@ -710,16 +710,16 @@
=None):%0A
-
%22%22%22%0A
@@ -1157,262 +1157,8 @@
r%0A%0A%0A
-def _may_user_access_osmaxx_frontend(user):%0A %22%22%22%0A Actual test to check if the user is in the frontend user group,%0A to give access or deny it. Note: Admins have superpowers.%0A %22%22%22%0A return user.has_perm('excerptexport.add_extractionorder')%0A%0A%0A
clas
@@ -2191,66 +2191,64 @@
urn
-obj.excerpt.is_public or obj.excerpt.owner == request.user
+_may_user_access_this_excerpt(request.user, obj.excerpt)
%0A%0A%0Ac
@@ -2440,38 +2440,38 @@
urn
-obj.is_public or obj.owner ==
+_may_user_access_this_excerpt(
requ
@@ -2474,13 +2474,19 @@
request.user
+, obj)
%0A
|
fc4214c19fdbdee6118155e5b51d037028670300
|
Remove get_events and get_cosine_graph
|
pygraphc/similarity/LogTextSimilarity.py
|
pygraphc/similarity/LogTextSimilarity.py
|
from pygraphc.preprocess.PreprocessLog import PreprocessLog
from pygraphc.similarity.StringSimilarity import StringSimilarity
from itertools import combinations
import networkx as nx
import csv
class LogTextSimilarity(object):
"""A class for calculating cosine similarity between a log pair. This class is intended for
non-graph based clustering method.
"""
def __init__(self, mode, logtype, logs, clusters, h5file=''):
"""The constructor of class LogTextSimilarity.
Parameters
----------
mode : str
Mode of operation, i.e., text and text-h5
logtype : str
Type for event log, e.g., auth, syslog, etc.
logs : list
List of every line of original logs.
clusters : dict
Dictionary of clusters. Key: cluster_id, value: list of log line id.
h5file : str
File name for h5 file to save cosine similarity result.
"""
self.mode = mode
self.logtype = logtype
self.logs = logs
self.clusters = clusters
self.h5file = h5file
def get_cosine_similarity(self):
"""Get cosine similarity from a pair of log lines in a file.
Returns
-------
cosine_similarity : dict
Dictionary of cosine similarity in non-graph clustering. Key: (log_id1, log_id2),
value: cosine similarity distance.
"""
preprocess = PreprocessLog(self.logtype)
preprocess.preprocess_text(self.logs)
events = preprocess.events_text
cosines_similarity = {}
if self.mode == 'text':
# calculate cosine similarity
for log_pair in combinations(range(preprocess.loglength), 2):
cosines_similarity[log_pair] = StringSimilarity.get_cosine_similarity(events[log_pair[0]]['tf-idf'],
events[log_pair[1]]['tf-idf'],
events[log_pair[0]]['length'],
events[log_pair[1]]['length'])
return cosines_similarity
elif self.mode == 'text-csv':
print self.mode
for nodex in xrange(preprocess.loglength):
csv_file = '/tmp/cosine-' + str(nodex) + '.csv'
f = open(csv_file, 'wb')
writer = csv.writer(f)
for cluster_id, cluster in self.clusters.iteritems():
row = []
for c in cluster:
if nodex != c:
similarity = StringSimilarity.get_cosine_similarity(events[nodex]['tf-idf'],
events[c]['tf-idf'],
events[nodex]['length'],
events[c]['length'])
# if similarity > 0:
row.append(1 - similarity)
if row:
row.append(cluster_id)
writer.writerow(row)
f.close()
def get_events(self):
preprocess = PreprocessLog(self.logtype)
preprocess.preprocess_text(self.logs)
events = preprocess.events_text
return events
def get_cosine_graph(self):
if self.mode == 'text-graph':
preprocess = PreprocessLog(self.logtype)
preprocess.preprocess_text(self.logs)
events = preprocess.events_text
# create cosine graph and the nodes
cosine_graph = nx.MultiGraph()
cosine_graph.add_nodes_from(xrange(preprocess.loglength))
# create edges
for source, dest in combinations(xrange(preprocess.loglength), 2):
cosines_similarity = StringSimilarity.get_cosine_similarity(events[source]['tf-idf'],
events[dest]['tf-idf'],
events[source]['length'],
events[dest]['length'])
cosine_graph.add_edge(source, dest, weight=cosines_similarity)
return cosine_graph
|
Python
| 0.000005
|
@@ -3383,1192 +3383,4 @@
e()%0A
-%0A def get_events(self):%0A preprocess = PreprocessLog(self.logtype)%0A preprocess.preprocess_text(self.logs)%0A events = preprocess.events_text%0A%0A return events%0A%0A def get_cosine_graph(self):%0A if self.mode == 'text-graph':%0A preprocess = PreprocessLog(self.logtype)%0A preprocess.preprocess_text(self.logs)%0A events = preprocess.events_text%0A%0A # create cosine graph and the nodes%0A cosine_graph = nx.MultiGraph()%0A cosine_graph.add_nodes_from(xrange(preprocess.loglength))%0A%0A # create edges%0A for source, dest in combinations(xrange(preprocess.loglength), 2):%0A cosines_similarity = StringSimilarity.get_cosine_similarity(events%5Bsource%5D%5B'tf-idf'%5D,%0A events%5Bdest%5D%5B'tf-idf'%5D,%0A events%5Bsource%5D%5B'length'%5D,%0A events%5Bdest%5D%5B'length'%5D)%0A cosine_graph.add_edge(source, dest, weight=cosines_similarity)%0A%0A return cosine_graph%0A
|
e38e0b61b74316a171d49fa9390ecc736408694d
|
Simplify nanomsg sample
|
samples/nanomsg/hello_world_asyncio.py
|
samples/nanomsg/hello_world_asyncio.py
|
import asyncio
import sys
import nanomsg as nn
from nanomsg.asyncio import Socket
async def ping(url, barrier):
with Socket(protocol=nn.NN_PUSH) as sock, sock.connect(url):
await sock.send(b'Hello, World!')
# Shutdown the endpoint after the other side ack'ed; otherwise
# the message could be lost.
await barrier.wait()
async def pong(url, barrier):
with Socket(protocol=nn.NN_PULL) as sock, sock.bind(url):
message = await sock.recv()
print(bytes(message.as_memoryview()).decode('ascii'))
await barrier.wait()
async def close_loop(barrier):
await barrier.wait()
asyncio.get_event_loop().stop()
class Barrier:
def __init__(self, parties, *, loop=None):
self.parties = parties
self._cond = asyncio.Condition(loop=loop)
async def wait(self):
await self._cond.acquire()
try:
assert self.parties > 0
self.parties -= 1
if self.parties > 0:
await self._cond.wait()
else:
self._cond.notify_all()
assert self.parties == 0
finally:
self._cond.release()
def main():
barrier = Barrier(3)
url = 'inproc://test'
print('Play asynchronous ping-pong on %s' % url)
asyncio.ensure_future(ping(url, barrier))
asyncio.ensure_future(pong(url, barrier))
asyncio.ensure_future(close_loop(barrier))
loop = asyncio.get_event_loop()
try:
loop.run_forever()
finally:
loop.close()
return 0
if __name__ == '__main__':
sys.exit(main())
|
Python
| 0.000006
|
@@ -90,39 +90,35 @@
c def ping(url,
-barrier
+ack
):%0A with Sock
@@ -325,39 +325,35 @@
.%0A await
-barrier
+ack
.wait()%0A%0A%0Aasync
@@ -366,23 +366,19 @@
ng(url,
-barrier
+ack
):%0A w
@@ -545,746 +545,219 @@
a
-wait barrier.wait()%0A%0A%0Aasync def close_loop(barrier):%0A await barrier.wait()%0A asyncio.get_event_loop().stop()%0A%0A%0Aclass Barrier:%0A%0A def __init__(self, parties, *, loop=None):%0A self.parties = parties%0A self._cond = asyncio.Condition(loop=loop)%0A%0A async def wait(self):%0A await self._cond.acquire()%0A try:%0A assert self.parties %3E 0%0A self.parties -= 1%0A if self.parties %3E 0:%0A await self._cond.wait()%0A else:%0A self._cond.notify_all()%0A assert self.parties == 0%0A finally:%0A self._cond.release()%0A%0A%0Adef main():%0A barrier = Barrier(3)%0A%0A url = 'inproc://test'%0A print('Play asynchronous ping-pong on %25s' %25 url)%0A
+ck.set()%0A%0A%0Adef main():%0A url = 'inproc://test'%0A print('Play asynchronous ping-pong on %25s' %25 url)%0A loop = asyncio.get_event_loop()%0A ack = asyncio.Event()%0A loop.run_until_complete(asyncio.wait(%5B%0A
@@ -788,26 +788,27 @@
ng(url,
-barrier))%0A
+ack)),%0A
asyn
@@ -839,156 +839,23 @@
rl,
-barrier))%0A%0A asyncio.ensure_future(close_loop(barrier))%0A
+ack)),
%0A
-loop = asyncio.get_event_loop()%0A try:%0A loop.run_forever()%0A finally:%0A
+%5D))%0A
|
f3b6771c43042c599e57d3a26fa678518e12455d
|
Update jupyterlab/tests/mock_packages/interop/consumer/setup.py
|
jupyterlab/tests/mock_packages/interop/consumer/setup.py
|
jupyterlab/tests/mock_packages/interop/consumer/setup.py
|
import json
from glob import glob
import os.path as osp
name = 'jlab_mock_consumer'
HERE = osp.abspath(osp.dirname(__file__))
with open(osp.join(HERE, 'package.json')) as fid:
data = json.load(fid)
from setuptools import setup
js_name = data['name']
setup(name=name,
version=data['version'],
py_modules = [name],
data_files = [
(f'share/jupyter/labextensions/{js_name}', glob('static/package.json')),
(f'share/jupyter/labextensions/{js_name}', glob('static/static/*'))
])
|
Python
| 0
|
@@ -511,8 +511,9 @@
)%0A %5D)
+%0A
|
7c3d2f8afbc5c6d1dc7c719f97ca93ffb908d6ce
|
Add tests of tangential velocity.
|
test_SR1d.py
|
test_SR1d.py
|
import eos_defns
import SR1d
from numpy.testing import assert_allclose
def test_standard_sod():
"""
Relativistic Sod test.
Numbers are taken from the General Matlab code, so accuracy isn't perfect.
"""
eos = eos_defns.eos_gamma_law(5.0/3.0)
w_left = SR1d.State(1.0, 0.0, 0.0, 1.5, eos, label="L")
w_right = SR1d.State(0.125, 0.0, 0.0, 1.2, eos, label="R")
rp = SR1d.RP(w_left, w_right)
p_star_matlab = 0.308909954203586
assert_allclose(rp.p_star, p_star_matlab, rtol=1e-6)
rarefaction_speeds_matlab = [-0.690065559342354, -0.277995552140227]
assert_allclose(rp.waves[0].wave_speed, rarefaction_speeds_matlab, rtol=1e-6)
shock_speed_matlab = 0.818591417744604
assert_allclose(rp.waves[2].wave_speed, shock_speed_matlab, rtol=1e-6)
|
Python
| 0
|
@@ -765,28 +765,1408 @@
ck_speed_matlab, rtol=1e-6)%0A
+%0Adef test_bench_3():%0A %22%22%22%0A Test Bench problem 3.%0A %0A Take from Marti & Muller's Living Review (section 6.3). See%0A http://computastrophys.livingreviews.org/Articles/lrca-2015-3%0A %0A Left and right states have been flipped so it complements the above%0A Sod test.%0A %22%22%22%0A eos = eos_defns.eos_gamma_law(5.0/3.0)%0A w_left = SR1d.State(1.0, 0.0, 0.99, 0.015, eos, label=%22L%22)%0A w_right = SR1d.State(1.0, 0.0, 0.0, 15000, eos, label=%22R%22)%0A rp = SR1d.RP(w_left, w_right)%0A v_shock_ref = 0.927006%0A v_contact_ref = 0.766706%0A assert_allclose(rp.waves%5B0%5D.wave_speed, v_shock_ref, rtol=1e-6)%0A assert_allclose(rp.waves%5B1%5D.wave_speed, v_contact_ref, rtol=1e-6)%0A %0A%0Adef test_bench_4():%0A %22%22%22%0A Test Bench problem 4.%0A %0A Take from Marti & Muller's Living Review (section 6.3). See%0A http://computastrophys.livingreviews.org/Articles/lrca-2015-3%0A %0A Left and right states have been flipped so it complements the above%0A Sod test.%0A %22%22%22%0A eos = eos_defns.eos_gamma_law(5.0/3.0)%0A w_left = SR1d.State(1.0, 0.0, 0.9, 0.015, eos, label=%22L%22)%0A w_right = SR1d.State(1.0, 0.0, 0.9, 15000, eos, label=%22R%22)%0A rp = SR1d.RP(w_left, w_right)%0A v_shock_ref = 0.445008%0A v_contact_ref = 0.319371%0A assert_allclose(rp.waves%5B0%5D.wave_speed, v_shock_ref, rtol=1e-6)%0A assert_allclose(rp.waves%5B1%5D.wave_speed, v_contact_ref, rtol=1e-6)%0A
|
f7b92034f258d2d2868bee5c79a4c3bbd4c5fa9a
|
Fix wallet_reorgsrestore functional test flakiness
|
test/functional/wallet_reorgsrestore.py
|
test/functional/wallet_reorgsrestore.py
|
#!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test tx status in case of reorgs while wallet being shutdown.
Wallet txn status rely on block connection/disconnection for its
accuracy. In case of reorgs happening while wallet being shutdown
block updates are not going to be received. At wallet loading, we
check against chain if confirmed txn are still in chain and change
their status if block in which they have been included has been
disconnected.
"""
from decimal import Decimal
import os
import shutil
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
disconnect_nodes,
)
class ReorgsRestoreTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Send a tx from which to conflict outputs later
txid_conflict_from = self.nodes[0].sendtoaddress(
self.nodes[0].getnewaddress(), Decimal("10"))
self.nodes[0].generate(1)
self.sync_blocks()
# Disconnect node1 from others to reorg its chain later
disconnect_nodes(self.nodes[0], self.nodes[1])
disconnect_nodes(self.nodes[1], self.nodes[2])
connect_nodes(self.nodes[0], self.nodes[2])
# Send a tx to be unconfirmed later
txid = self.nodes[0].sendtoaddress(
self.nodes[0].getnewaddress(), Decimal("10"))
tx = self.nodes[0].gettransaction(txid)
self.nodes[0].generate(4)
tx_before_reorg = self.nodes[0].gettransaction(txid)
assert_equal(tx_before_reorg["confirmations"], 4)
# Disconnect node0 from node2 to broadcast a conflict on their
# respective chains
disconnect_nodes(self.nodes[0], self.nodes[2])
nA = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(
txid_conflict_from)["details"] if tx_out["amount"] == Decimal("10"))
inputs = []
inputs.append({"txid": txid_conflict_from, "vout": nA})
outputs_1 = {}
outputs_2 = {}
# Create a conflicted tx broadcast on node0 chain and conflicting tx
# broadcast on node1 chain. Both spend from txid_conflict_from
outputs_1[self.nodes[0].getnewaddress()] = Decimal("9.99998")
outputs_2[self.nodes[0].getnewaddress()] = Decimal("9.99998")
conflicted = self.nodes[0].signrawtransactionwithwallet(
self.nodes[0].createrawtransaction(inputs, outputs_1))
conflicting = self.nodes[0].signrawtransactionwithwallet(
self.nodes[0].createrawtransaction(inputs, outputs_2))
conflicted_txid = self.nodes[0].sendrawtransaction(conflicted["hex"])
self.nodes[0].generate(1)
conflicting_txid = self.nodes[2].sendrawtransaction(conflicting["hex"])
self.nodes[2].generate(9)
# Reconnect node0 and node2 and check that conflicted_txid is
# effectively conflicted
connect_nodes(self.nodes[0], self.nodes[2])
self.sync_blocks([self.nodes[0], self.nodes[2]])
conflicted = self.nodes[0].gettransaction(conflicted_txid)
conflicting = self.nodes[0].gettransaction(conflicting_txid)
assert_equal(conflicted["confirmations"], -9)
assert_equal(conflicted["walletconflicts"][0], conflicting["txid"])
# Node0 wallet is shutdown
self.stop_node(0)
self.start_node(0)
# The block chain re-orgs and the tx is included in a different block
self.nodes[1].generate(9)
self.nodes[1].sendrawtransaction(tx["hex"])
self.nodes[1].generate(1)
self.nodes[1].sendrawtransaction(conflicted["hex"])
self.nodes[1].generate(1)
# Node0 wallet file is loaded on longest sync'ed node1
self.stop_node(1)
self.nodes[0].backupwallet(
os.path.join(
self.nodes[0].datadir,
'wallet.bak'))
shutil.copyfile(
os.path.join(
self.nodes[0].datadir,
'wallet.bak'),
os.path.join(
self.nodes[1].datadir,
'regtest',
'wallet.dat'))
self.start_node(1)
tx_after_reorg = self.nodes[1].gettransaction(txid)
# Check that normal confirmed tx is confirmed again but with different
# blockhash
assert_equal(tx_after_reorg["confirmations"], 2)
assert(tx_before_reorg["blockhash"] != tx_after_reorg["blockhash"])
conflicted_after_reorg = self.nodes[1].gettransaction(conflicted_txid)
# Check that conflicted tx is confirmed again with blockhash different
# than previously conflicting tx
assert_equal(conflicted_after_reorg["confirmations"], 1)
assert(conflicting["blockhash"] != conflicted_after_reorg["blockhash"])
if __name__ == '__main__':
ReorgsRestoreTest().main()
|
Python
| 0.001715
|
@@ -933,16 +933,82 @@
odes = 3
+%0A self.extra_args = %5B%5B%22-noparkdeepreorg%22%5D%5D * self.num_nodes
%0A%0A de
|
a6b86330938f813eec534eac22bd917d64ff6896
|
remove unused argument from run proxy script
|
pixelated/pixelated_dispatcher/__init__.py
|
pixelated/pixelated_dispatcher/__init__.py
|
#
# Copyright (c) 2014 ThoughtWorks Deutschland GmbH
#
# Pixelated is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pixelated is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Pixelated. If not, see <http://www.gnu.org/licenses/>.
import os
import subprocess
import sys
import logging
from pixelated.client.cli import Cli
from pixelated.client.dispatcher_api_client import PixelatedDispatcherClient
from pixelated.proxy import DispatcherProxy
from pixelated.manager import SSLConfig, DispatcherManager
from pixelated.common import init_logging, latest_available_ssl_version
__author__ = 'fbernitt'
import argparse
def is_proxy():
for arg in sys.argv:
if arg == 'proxy':
return True
return False
def is_manager():
for arg in sys.argv:
if arg == 'manager':
return True
return False
def filter_args():
return [arg for arg in sys.argv[1:] if arg not in ['manager', 'proxy']]
def is_cli():
return not (is_manager() or is_proxy())
def prepare_venv(root_path):
venv_path = os.path.join(root_path, 'virtualenv')
script = os.path.join(os.path.dirname(__file__), '..', 'scripts', 'create_mailpile_venv.sh')
subprocess.call([script, venv_path])
mailpile_path = os.path.join(venv_path, 'bin', 'mailpile')
return venv_path, mailpile_path
def run_manager():
parser = argparse.ArgumentParser(description='Multipile', )
parser.add_argument('-r', '--root_path', help='The rootpath for mailpile')
parser.add_argument('-m', '--mailpile_bin', help='The mailpile executable', default='mailpile')
parser.add_argument('-b', '--backend', help='the backend to use (fork|docker)', default='fork')
parser.add_argument('--bind', help="bind to interface. Default 127.0.0.1", default='127.0.0.1')
parser.add_argument('--sslcert', help='The SSL certficate to use', default=None)
parser.add_argument('--sslkey', help='The SSL key to use', default=None)
parser.add_argument('--debug', help='Set log level to debug', default=False, action='store_true')
parser.add_argument('--log-config', help='Provide a python logging config file', default=None)
parser.add_argument('--provider', help='Specify the provider this dispatcher will connect to', default='localhost')
parser.add_argument('--provider-ca', dest='provider_ca', help='Specify the provider CA to use to validate connections', default=True)
parser.add_argument('--provider-fingerprint', dest='provider_fingerprint', help='Pin provider certifcate to fingerprint', default=None)
group = parser.add_mutually_exclusive_group()
group.add_argument('--mailpile-virtualenv', help='Use specified virtual env for mailpile', default=None)
group.add_argument('--auto-mailpile-virtualenv', dest='auto_venv', help='Boostrap virtualenv for mailpile', default=False, action='store_true')
args = parser.parse_args(args=filter_args())
if args.sslcert:
ssl_config = SSLConfig(args.sslcert,
args.sslkey,
latest_available_ssl_version())
else:
ssl_config = None
venv = args.mailpile_virtualenv
mailpile_bin = args.mailpile_bin
if args.auto_venv:
venv, mailpile_bin = prepare_venv(args.root_path)
if args.root_path is None or not os.path.isdir(args.root_path):
raise ValueError('root path %s not found!' % args.root_path)
log_level = logging.DEBUG if args.debug else logging.INFO
log_config = args.log_config
init_logging('manager', level=log_level, config_file=log_config)
provider_ca = args.provider_ca if args.provider_fingerprint is None else False
manager = DispatcherManager(args.root_path, mailpile_bin, ssl_config, args.provider, mailpile_virtualenv=venv, provider=args.backend, leap_provider_ca=provider_ca, leap_provider_fingerprint=args.provider_fingerprint, bindaddr=args.bind)
manager.serve_forever()
def run_proxy():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--port', help='The port the dispatcher runs on')
parser.add_argument('-m', '--manager', help='hostname:port of the manager')
parser.add_argument('--banner', help='banner file to show on login screen', default='_login_screen_message.html')
parser.add_argument('--bind', help="interface to bind to (default: 127.0.0.1)", default='127.0.0.1')
parser.add_argument('--sslcert', help='proxy HTTP server SSL certificate', default=None)
parser.add_argument('--sslkey', help='proxy HTTP server SSL key', default=None)
parser.add_argument('--fingerprint', help='pin certificate to fingerprint', default=None) # TODO: check this more deeply
parser.add_argument('--disable-verifyhostname', help='disable hostname verification; if fingerprint is specified it gets precedence', dest="verify_hostname", action='store_false', default=None)
parser.add_argument('--debug', help='set log level to debug', default=False, action='store_true')
parser.add_argument('--log-config', help='provide a python logging config file', default=None)
args = parser.parse_args(args=filter_args())
manager_hostname, manager_port = args.manager.split(':')
certfile = args.sslcert if args.sslcert else None
keyfile = args.sslkey if args.sslcert else None
manager_cafile = certfile if args.fingerprint is None else False
log_level = logging.DEBUG if args.debug else logging.INFO
log_config = args.log_config
init_logging('dispatcher', level=log_level, config_file=log_config)
client = PixelatedDispatcherClient(manager_hostname, manager_port, cacert=manager_cafile, fingerprint=args.fingerprint, assert_hostname=args.verify_hostname)
client.validate_connection()
dispatcher = DispatcherProxy(client, bindaddr=args.bind, keyfile=keyfile,
certfile=certfile, banner=args.banner)
dispatcher.serve_forever()
def run_cli():
Cli(args=filter_args()).run()
def main():
if is_manager():
run_manager()
elif is_proxy():
run_proxy()
else:
run_cli()
if __name__ == '__main__':
main()
|
Python
| 0.000001
|
@@ -4452,88 +4452,8 @@
r()%0A
- parser.add_argument('-p', '--port', help='The port the dispatcher runs on')%0A
|
9776615477bc9411dc6e0014396646d405dd0bb0
|
print the pin name when complaining about missing description
|
scripts/generate-constraints.py
|
scripts/generate-constraints.py
|
#!/usr/bin/python
# Copyright (c) 2013 Quanta Research Cambridge, Inc.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import argparse, json, sys
from collections import OrderedDict
import copy
bindings = {
#'pins': 'pins',
'pin_name': 'pins' # legacy
}
errorDetected = False
def newArgparser():
argparser = argparse.ArgumentParser("Generate constraints file for board.")
argparser.add_argument('--boardfile', help='Board description file (json)')
argparser.add_argument('--pinoutfile', default=[], help='Project description file (json)', action='append')
argparser.add_argument('-b', '--bind', default=[], help='Bind signal group to pin group', action='append')
argparser.add_argument('-o', '--output', default=None, help='Write output to file')
argparser.add_argument('-f', '--fpga', default="xilinx", help='Target FPGA Vendor')
return argparser
if __name__=='__main__':
argparser=newArgparser()
options = argparser.parse_args()
for binding in options.bind:
split = binding.split(':')
bindings[split[0]] = split[1]
boardInfo = json.loads(open(options.boardfile).read())
print(options.fpga)
if options.fpga == "xilinx":
template='''\
set_property LOC "%(LOC)s" [get_ports "%(name)s"]
set_property PIO_DIRECTION "%(PIO_DIRECTION)s" [get_ports "%(name)s"]
'''
setPropertyTemplate='''\
set_property %(prop)s "%(val)s" [get_ports "%(name)s"]
'''
elif options.fpga == "altera":
template='''\
set_location_assignment "%(LOC)s" -to "%(name)s"
'''
setPropertyTemplate='''\
set_instance_assignment -name %(prop)s "%(val)s" -to "%(name)s"
'''
out = sys.stdout
if options.output:
out = open(options.output, 'w')
for filename in options.pinoutfile:
print('generate-constraints: processing file "' + filename + '"')
pinstr = open(filename).read()
pinout = json.loads(pinstr, object_pairs_hook=OrderedDict)
for pin in pinout:
projectPinInfo = pinout[pin]
loc = 'TBD'
iodir = 'TBD'
used = []
boardPinInfo = {}
pinName = ''
#print('PPP', projectPinInfo)
for groupName in bindings:
if projectPinInfo.has_key(groupName):
used.append(groupName)
pinName = projectPinInfo[groupName]
#print('LLL', groupName, pinName, bindings[groupName])
boardPinInfo = boardInfo[bindings[groupName]]
break
if pinName == '':
for prop in projectPinInfo:
#print('JJJJ', prop)
if boardInfo.get(prop):
used.append(prop)
pinName = projectPinInfo[prop]
boardPinInfo = boardInfo[prop]
#print('FFF', prop, pinName, boardPinInfo, boardPinInfo.has_key(pinName), boardPinInfo.get(pinName))
break
if boardPinInfo == {}:
print('Missing group description for', pinName, projectPinInfo, file=sys.stderr)
errorDetected = True
pinInfo = {}
if boardPinInfo.has_key(pinName):
pinInfo = copy.copy(boardPinInfo[pinName])
else:
print('Missing pin description for', pinName, projectPinInfo, file=sys.stderr)
pinInfo['LOC'] = 'fmc.%s' % (pinName)
errorDetected = True
pinInfo['name'] = pin
for prop in projectPinInfo:
if projectPinInfo.has_key(prop):
pinInfo[prop] = projectPinInfo[prop]
out.write(template % pinInfo)
for k in pinInfo:
if k in used+['name', 'LOC', 'PIO_DIRECTION']: continue
out.write(setPropertyTemplate % {
'name': pin,
'prop': k,
'val': pinInfo[k],
})
if errorDetected:
sys.exit(-1);
|
Python
| 0.000803
|
@@ -4198,32 +4198,37 @@
escription for',
+ pin,
pinName, projec
@@ -4487,24 +4487,29 @@
iption for',
+ pin,
pinName, pr
|
1794185db5411c2d02ec1300bfa9e079a48b7a9d
|
Fix broken ETW dependency.
|
syzygy/scripts/benchmark/benchmark.gyp
|
syzygy/scripts/benchmark/benchmark.gyp
|
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{
'variables': {
'chromium_code': 1,
},
'targets': [
{
'target_name': 'benchmark',
'type': 'none',
'variables': {
'benchmark_sources': [
'benchmark.py',
'chrome_control.py',
'chrome_control_test.py',
'chrome_utils.py',
'event_counter.py',
'ibmperf.py',
'ibmperf_test.py',
'instrument.py',
'optimize.py',
'profile.py',
'runner.py',
'setup.py',
'trace_event.py',
'zip_benchmark.py',
],
# The executables generated by the dependencies of the benchmark
# build rule. These need to go to inputs of the action below to make
# sure the benchmark egg is rebuilt on any change to them.
'benchmark_executables': [
'<(PRODUCT_DIR)/agent_logger.exe',
'<(PRODUCT_DIR)/basic_block_entry_client.dll',
'<(PRODUCT_DIR)/call_trace_client.dll',
'<(PRODUCT_DIR)/call_trace_control.exe',
'<(PRODUCT_DIR)/call_trace_service.exe',
'<(PRODUCT_DIR)/coverage_client.dll',
'<(PRODUCT_DIR)/dromaeo.zip',
'<(PRODUCT_DIR)/grinder.exe',
'<(PRODUCT_DIR)/instrument.exe',
'<(PRODUCT_DIR)/profile_client.dll',
'<(PRODUCT_DIR)/relink.exe',
'<(PRODUCT_DIR)/reorder.exe',
'<(PRODUCT_DIR)/run_in_snapshot.exe',
'<(PRODUCT_DIR)/run_in_snapshot_xp.exe',
'<(PRODUCT_DIR)/run_in_snapshot_x64.exe',
'<(PRODUCT_DIR)/syzyasan_rtl.dll',
'<(PRODUCT_DIR)/virtualenv.exe',
'<(PRODUCT_DIR)/wsdump.exe',
],
'setup_file': 'setup.py',
'success_file': '<(PRODUCT_DIR)/Benchmark-egg-success.txt',
'script_file': '<(src)/syzygy/build/build_egg.py',
},
'sources': [
'<@(benchmark_sources)',
],
'dependencies': [
'<(src)/syzygy/agent/asan/asan.gyp:syzyasan_rtl',
'<(src)/syzygy/agent/basic_block_entry/basic_block_entry.gyp:'
'basic_block_entry_client',
'<(src)/syzygy/agent/call_trace/call_trace.gyp:call_trace_client',
'<(src)/syzygy/agent/coverage/coverage.gyp:coverage_client',
'<(src)/syzygy/agent/profiler/profiler.gyp:profile_client',
'<(src)/syzygy/grinder/grinder.gyp:grinder',
'<(src)/syzygy/instrument/instrument.gyp:instrument',
'<(src)/syzygy/py/py.gyp:virtualenv',
'<(src)/syzygy/py/etw_db/etw_db.gyp:etw',
'<(src)/syzygy/py/etw_db/etw_db.gyp:etw_db',
'<(src)/syzygy/relink/relink.gyp:relink',
'<(src)/syzygy/reorder/reorder.gyp:reorder',
'<(src)/syzygy/snapshot/snapshot.gyp:run_in_snapshot',
'<(src)/syzygy/snapshot/snapshot.gyp:run_in_snapshot_xp',
'<(src)/syzygy/snapshot/snapshot.gyp:run_in_snapshot_x64',
'<(src)/syzygy/trace/agent_logger/agent_logger.gyp:agent_logger',
'<(src)/syzygy/trace/etw_control/etw_control.gyp:call_trace_control',
'<(src)/syzygy/trace/service/service.gyp:call_trace_service_exe',
'<(src)/syzygy/wsdump/wsdump.gyp:wsdump',
'<(src)/third_party/dromaeo/dromaeo.gyp:dromaeo_zip',
],
'actions': [
{
'action_name': 'build_benchmark',
'msvs_cygwin_shell': 0,
'inputs': [
'<(script_file)',
'<(setup_file)',
'<@(benchmark_sources)',
'<@(benchmark_executables)',
],
'outputs': [
'<(success_file)',
],
'action': [
'"<(PRODUCT_DIR)/py/scripts/python"',
'<(script_file)',
'--setup-file', '<(setup_file)',
'--build-dir', '<(PRODUCT_DIR)/temp/benchmark',
'--success-file', '<(success_file)',
'--',
'install_data',
'--exe-dir', '<(PRODUCT_DIR)',
],
},
],
},
{
'target_name': 'benchmark_zip',
'type': 'none',
'dependencies': [
'benchmark',
'<(src)/third_party/sawbuck/py/etw/etw.gyp:etw',
'<(src)/syzygy/py/etw_db/etw_db.gyp:etw_db',
'<(src)/syzygy/scripts/scripts.gyp:setuptools',
],
'actions': [
{
'action_name': 'create_benchmark_zip',
'msvs_cygwin_shell': 0,
'inputs': [
'zip_benchmark.py',
# The -success files are modified on successful egging,
# and have a fixed name. We use them to trigger re-zipping
# rather than the eggs, which have variable file names.
'<(PRODUCT_DIR)/Benchmark-egg-success.txt',
'<(PRODUCT_DIR)/ETW-egg-success.txt',
'<(PRODUCT_DIR)/ETW-Db-egg-success.txt',
'<(PRODUCT_DIR)/setuptools-0.6c11-py2.6.egg',
],
'outputs': [
'<(PRODUCT_DIR)/benchmark.bat',
'<(PRODUCT_DIR)/benchmark.zip',
'<(PRODUCT_DIR)/instrument.bat',
'<(PRODUCT_DIR)/optimize.bat',
'<(PRODUCT_DIR)/profile.bat',
],
'action': [
'"<(PRODUCT_DIR)/py/scripts/python"',
'zip_benchmark.py',
'--root-dir',
'<(PRODUCT_DIR)',
],
},
],
},
]
}
|
Python
| 0.00015
|
@@ -4667,38 +4667,31 @@
rc)/
-third_party/sawbuck
+syzygy
/py/etw
+_db
/etw
+_db
.gyp
|
a9cb12b48976cde0973966488c6957e606036d26
|
clean up
|
krbroast-pcap2hashcat.py
|
krbroast-pcap2hashcat.py
|
#!/usr/bin/env python3 -tt
from scapy.all import *
import struct
import codecs
from pyasn1.codec.ber import encoder, decoder
MESSAGETYPEOFFSETUDP = 17
MESSAGETYPEOFFSETTCP = 21
DEBUG = True
TGS_REP = 13
def findkerbpayloads(packets, verbose=False):
kploads = []
i = 1
unfinished = {}
for p in packets:
# UDP
if p.haslayer(UDP) and p.sport == 88 and p[UDP].load[MESSAGETYPEOFFSETUDP] == TGS_REP:
if verbose: print("found UDP payload of size %i" % len(p[UDP].load) )
kploads.append(p[UDP].load)
#TCP
elif p.haslayer(TCP) and p.sport == 88 and p[TCP].flags & 23== 16: #ACK Only, ignore push (8), urg (32), and ECE (64+128)
# assumes that each TCP packet contains the full payload
try:
payload = p[TCP].load
except:
continue
print(payload)
if len(payload) > MESSAGETYPEOFFSETTCP and payload[MESSAGETYPEOFFSETTCP] == TGS_REP:
# found start of new TGS-REP
size = struct.unpack(">I", payload[:4])[0]
if size + 4 == len(payload):
kploads.append(payload[4:size+4]) # strip the size field
else:
#print('ERROR: Size is incorrect: %i vs %i' % (size, len(payload)))
unfinished[(p[IP].src, p[IP].dst, p[TCP].dport)] = (payload[4:size+4], size)
if verbose: print("found TCP payload of size %i" % size)
elif (p[IP].src, p[IP].dst, p[TCP].dport) in unfinished:
ticketdata, size = unfinished.pop((p[IP].src, p[IP].dst, p[TCP].dport))
ticketdata += payload
#print("cont: %i %i" % (len(ticketdata), size))
if len(ticketdata) == size:
kploads.append(ticketdata)
elif len(ticketdata) < size:
unfinished[(p[IP].src, p[IP].dst, p[TCP].dport)] = (ticketdata, size)
else:
# OH NO! Oversized!
print('Too much data received! Source: %s Dest: %s DPort %i' % (p[IP].src, p[IP].dst, p[TCP].dport))
return kploads
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Find TGS_REP packets in a pcap file and write them for use cracking')
parser.add_argument('-f', '--pcap', dest='pcaps', action='append', required=True,
metavar='PCAPFILE', #type=file, #argparse.FileType('r'),
help='a file to search for Kerberos TGS_REP packets')
parser.add_argument('-w', '--outputfile', dest='outfile', action='store', required=False,
metavar='OUTPUTFILE', type=argparse.FileType('w'),
help='the output file')
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', default=False,
help='display verbose messages')
args = parser.parse_args()
kploads = []
for f in args.pcaps:
packets = rdpcap(f)
kploads += findkerbpayloads(packets, args.verbose)
if len(kploads) == 0:
print('no payloads found')
sys.exit(0)
if args.outfile:
print('writing %i hex encoded payloads to %s' % (len(kploads), args.outfile.name))
decode_hex = codecs.getdecoder("hex_codec")
for p in kploads:
encticket = decoder.decode(p)[0][4][3][2].asOctets().hex()
out = "$krb5tgs$23$*user$realm$test/spn*$" + encticket[:32] + "$" + encticket[32:]
if args.outfile:
args.outfile.write(out + '\n')
else:
print(out)
sys.exit()
|
Python
| 0.000001
|
@@ -761,27 +761,8 @@
ue%0A%0A
-%09%09%09print(payload)%0A%0A
%09%09%09i
@@ -3071,17 +3071,4 @@
out)
-%0A%09%09sys.exit()
|
1786702388abc4fe737ee73d64ef5864f42f0c3d
|
Fix missing offset for Query
|
chat/query.py
|
chat/query.py
|
# Copyright 2017 Oursky Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .predicate import Predicate
class Query:
def __init__(self, record_type,
predicate=None, count=False,
limit=50, offset=None, include=[]):
self.record_type = record_type
if predicate is None:
predicate = Predicate()
self.predicate = predicate
self.count = count
self.sort = []
self.limit = limit
self.offset = None
self.include = include
def add_order(self, key, order):
self.sort.append([{'$type': 'keypath', '$val': key}, order])
return self
|
Python
| 0.00002
|
@@ -991,20 +991,22 @@
ffset =
-None
+offset
%0A
|
c637eb216e9dc148a588019d22bc96db3565b3fc
|
Correct breakpoints
|
cmsplugin_bs4forcascade/bootstrap4/settings.py
|
cmsplugin_bs4forcascade/bootstrap4/settings.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import ugettext_lazy as _
from cmsplugin_cascade.extra_fields.config import PluginExtraFieldsConfig
CASCADE_PLUGINS = getattr(settings, 'BS4_CASCADE_PLUGINS',
['buttons', 'carousel', 'accordion', 'container', 'image', 'picture','card',
'tabs', 'gallery', 'jumbotron'],)
if 'cmsplugin_bs4forcascade' in settings.INSTALLED_APPS:
CASCADE_PLUGINS.append('secondary_menu')
def set_defaults(config):
config.setdefault('bootstrap4', {})
config['bootstrap4'].setdefault(
'breakpoints', (
('xs', (0, 'mobile', _("mobile phones"), 0, 542)),
('sm', (576, 'phablet', _("phablets"), 544, 767)),
('md', (768, 'tablet', _("tablets"), 768, 991)),
('lg', (992, 'laptop', _("laptops"), 992, 1199)),
('xl', (1200, 'desktop', _("large desktops"), 1200, 1980)),))
for tpl in config['bootstrap4']['breakpoints']:
if len(tpl[1]) != 5:
msg = "The configuration directive CMSPLUGIN_CASCADE['bootstrap4']['bootstrap4']['{}'] requires 5 parameters"
raise ImproperlyConfigured(msg.format(tpl[0]))
config['bootstrap4'].setdefault('gutter', 30)
config['plugins_with_extra_fields'].setdefault('Bootstrap4ButtonPlugin', PluginExtraFieldsConfig())
config['plugins_with_extra_fields'].setdefault('Bootstrap4RowPlugin', PluginExtraFieldsConfig())
config['plugins_with_extra_fields'].setdefault('BootstrapJumbotronPlugin', PluginExtraFieldsConfig(
inline_styles={
'extra_fields:Paddings': ['margin-top', 'margin-bottom', 'padding-top', 'padding-bottom'],
'extra_units:Paddings': 'px,em'
}
))
config['plugins_with_extra_render_templates'].setdefault('BootstrapSecondaryMenuPlugin', (
('cascade/bootstrap4/secmenu-list-group.html', _("List Group")),
('cascade/bootstrap4/secmenu-unstyled-list.html', _("Unstyled List")),))
if os.getenv('DJANGO_CLIENT_FRAMEWORK', '').startswith('angular'):
config['bootstrap4']['template_basedir'] = 'angular-ui'
|
Python
| 0.000193
|
@@ -722,17 +722,19 @@
('xs', (
-0
+575
, 'mobil
@@ -761,14 +761,16 @@
%22),
+56
0, 5
-42
+75
)),%0A
@@ -825,10 +825,10 @@
), 5
-44
+76
, 76
@@ -1029,16 +1029,17 @@
80)),))%0A
+%0A
for
|
342f523fc92834e454eef29dfb1bab825d6a552f
|
correct the output log message
|
btsprice/exchanges.py
|
btsprice/exchanges.py
|
# -*- coding: utf-8 -*-
import requests
import json
import logging
from btsprice.misc import get_median
from btsprice.yahoo import Yahoo
class Exchanges():
# ------------------------------------------------------------------------
# Init
# ------------------------------------------------------------------------
def __init__(self):
self.header = {'content-type': 'application/json',
'User-Agent': 'Mozilla/5.0 Gecko/20100101 Firefox/22.0'}
self.log = logging.getLogger('bts')
self.order_types = ["bids", "asks"]
self.yahoo = Yahoo()
# ------------------------------------------------------------------------
# Fetch data
# ------------------------------------------------------------------------
#
def fetch_from_btc38(self, quote="cny", base="bts"):
try:
url = "http://api.btc38.com/v1/depth.php"
params = {'c': base, 'mk_type': quote}
response = requests.get(
url=url, params=params, headers=self.header, timeout=3)
result = json.loads(vars(response)['_content'].decode("utf-8-sig"))
for order_type in self.order_types:
for order in result[order_type]:
order[0] = float(order[0])
order[1] = float(order[1])
order_book_ask = sorted(result["asks"])
order_book_bid = sorted(result["bids"], reverse=True)
return {"bids": order_book_bid, "asks": order_book_ask}
except:
self.log.error("Error fetching results from btc38!")
return
def fetch_from_bter(self, quote="cny", base="bts"):
try:
url = "http://data.bter.com/api/1/depth/%s_%s" % (base, quote)
result = requests.get(
url=url, headers=self.header, timeout=3).json()
for order_type in self.order_types:
for order in result[order_type]:
order[0] = float(order[0])
order[1] = float(order[1])
order_book_ask = sorted(result["asks"])
order_book_bid = sorted(result["bids"], reverse=True)
return {"bids": order_book_bid, "asks": order_book_ask}
except:
self.log.error("Error fetching results from bter!")
return
def fetch_from_yunbi(self, quote="cny", base="bts"):
try:
url = "https://yunbi.com/api/v2/depth.json"
params = {'market': base+quote}
result = requests.get(
url=url, params=params, headers=self.header, timeout=3).json()
for order_type in self.order_types:
for order in result[order_type]:
order[0] = float(order[0])
order[1] = float(order[1])
order_book_ask = sorted(result["asks"])
order_book_bid = sorted(result["bids"], reverse=True)
return {"bids": order_book_bid, "asks": order_book_ask}
except:
self.log.error("Error fetching results from yunbi!")
return
def fetch_from_poloniex(self, quote="btc", base="bts"):
try:
quote = quote.upper()
base = base.upper()
url = "http://poloniex.com/public?command=\
returnOrderBook¤cyPair=%s_%s" % (quote, base)
result = requests.get(
url=url, headers=self.header, timeout=3).json()
for order_type in self.order_types:
for order in result[order_type]:
order[0] = float(order[0])
order[1] = float(order[1])
order_book_ask = sorted(result["asks"])
order_book_bid = sorted(result["bids"], reverse=True)
return {"bids": order_book_bid, "asks": order_book_ask}
except:
self.log.error("Error fetching results from bter!")
return
def fetch_from_yahoo(self, assets=None):
return(self.yahoo.fetch_price())
def get_btcprice_in_cny(self):
price_queue = []
_order_book = self.fetch_from_btc38("cny", "btc")
if _order_book:
_price_btc = (
_order_book["bids"][0][0] + _order_book["asks"][0][0]) / 2.0
price_queue.append(_price_btc)
try:
url = "https://data.btcchina.com/data/ticker?market=btccny"
result = requests.get(
url=url, headers=self.header, timeout=3).json()
price_queue.append(float(result["ticker"]["last"]))
except:
self.log.error("Error fetching results from btcchina!")
try:
url = "http://api.huobi.com/staticmarket/ticker_btc_json.js"
result = requests.get(
url=url, headers=self.header, timeout=3).json()
price_queue.append(float(result["ticker"]["last"]))
except:
self.log.error("Error fetching results from huobi!")
try:
url = "https://www.okcoin.cn/api/ticker.do?symbol=btc_cny"
result = requests.get(
url=url, headers=self.header, timeout=3).json()
price_queue.append(float(result["ticker"]["last"]))
except:
self.log.error("Error fetching results from okcoin!")
if price_queue:
return get_median(price_queue)
else:
return None
def get_btcprice_in_usd(self):
price_queue = []
_order_book = self.fetch_from_poloniex("USDT", "btc")
if _order_book:
_price_btc = (
_order_book["bids"][0][0] + _order_book["asks"][0][0]) / 2.0
price_queue.append(_price_btc)
try:
url = "https://www.okcoin.com/api/v1/ticker.do?symbol=btc_usd"
result = requests.get(
url=url, headers=self.header, timeout=3).json()
price_queue.append(float(result["ticker"]["last"]))
except:
self.log.error("Error fetching results from okcoin!")
if price_queue:
return get_median(price_queue)
else:
return None
if __name__ == "__main__":
exchanges = Exchanges()
exchanges.get_btcprice_in_usd()
exchanges.get_btcprice_in_cny()
|
Python
| 0.999999
|
@@ -3894,36 +3894,40 @@
ng results from
-bter
+poloniex
!%22)%0A
|
2683219a40382687f2bd09f14aab763afe6d97a6
|
Update bb-runtime branch for GNAT Community 2019
|
scripts/install_dependencies.py
|
scripts/install_dependencies.py
|
#! /usr/bin/env python2
import argparse
import os
import os.path
import subprocess
import sys
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '../'))
def run_program(argv, cwd=os.path.dirname(os.path.realpath(__file__))):
print "$ %s" % " ".join(argv)
p = subprocess.Popen(
argv,
cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout, stderr = p.communicate()
try:
stdout = stdout.decode('ascii')
except UnicodeError:
return 'stdout is not ASCII'
try:
stderr = stderr.decode('ascii')
except UnicodeError:
return 'stderr is not ASCII'
return (p.returncode, stdout, stderr)
def git_clone(repo_url, branch, dst, recursive=False):
extra_args = []
if recursive:
extra_args = extra_args + ["--recursive"]
# Clone the repo
returncode, stdout, stderr = run_program(
['git', 'clone', repo_url, dst] + extra_args)
print stdout
if returncode:
print 'git clone error (returned {}):\n{}'.format(
returncode, stderr
)
return returncode
if branch:
# Clone the repo
returncode, stdout, stderr = run_program(
'git', '-C', dst, 'checkout', '-b', branch, "origin/" + branch)
print stdout
if returncode:
print 'git branch checkout error (returned {}):\n{}'.format(
returncode, stderr
)
return returncode
# Git repositories
# - Git repository URL
# - Git branch (if not master)
# - Destination directory
# - Recursive clone?
# - install command (if any)
git_repos = [("https://github.com/AdaCore/bb-runtimes",
None,
"bb-runtimes",
False,
["python", ROOT_DIR + "/bb-runtimes/install.py", "--arch=arm-eabi"]),
]
parser = argparse.ArgumentParser('Download and install dependencies')
parser.add_argument(
'pattern', nargs='*',
help='List of pattern to filter the set of dependencies to install'
)
def main(args):
at_least_one_error = False
print "ROOT_DIR :" + ROOT_DIR
ret = 0
for repo, branch, dest, recursive, build_cmd in git_repos:
if args.pattern and not any(pat in repo for pat in args.pattern):
continue
dest = os.path.join(ROOT_DIR, dest)
if not os.path.exists(dest):
ret = git_clone(repo, branch, dest, recursive)
else:
print "%s already cloned" % dest
if ret:
at_least_one_error = True
if build_cmd:
print "Running build command:"
ret, stdout, stderr = run_program(build_cmd, dest)
print stdout
if ret:
print 'Dependency install command error' +\
' (returned {}):\n{}'.format(ret, stderr)
if at_least_one_error:
sys.exit(1)
if __name__ == '__main__':
main(parser.parse_args())
|
Python
| 0
|
@@ -1215,32 +1215,33 @@
r = run_program(
+%5B
%0A 'gi
@@ -1299,16 +1299,17 @@
+ branch
+%5D
)%0A
@@ -1724,12 +1724,24 @@
-None
+%22community-2019%22
,%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.