commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
e1d5cf8dd8c143cad0229b62e9aeb0c3b06ca3e7
|
Fix unittest assertion
|
driftbase/auth/tests/test_steam.py
|
driftbase/auth/tests/test_steam.py
|
import unittest
import mock
from json import loads, dumps
import requests
from werkzeug.exceptions import Unauthorized, ServiceUnavailable
from driftbase.auth.steam import run_ticket_validation
patcher = None
def setUpModule():
original_get = requests.get
def requests_get_mock(url, *args, **kw):
class Response(object):
def json(self):
return loads(self.content)
response = Response()
response.status_code = 200
if url == 'key url':
response.content = 'secret key'
elif 'AuthenticateUserTicket' in url:
ret = {
"response": {
"params": {
"result": "OK",
"steamid": "76561198026053155",
"ownersteamid": "76561198026053155",
"vacbanned": False,
"publisherbanned": False
}
}
}
response.content = dumps(ret)
elif 'CheckAppOwnership' in url:
ret = {
"appownership": {
"ownsapp": True,
"permanent": False,
"timestamp": "2016-07-04T08:01:08Z",
"ownersteamid": "76561198026053155",
"result": "OK"
}
}
response.content = dumps(ret)
else:
return original_get(url, *args, **kw)
return response
global patcher
patcher = mock.patch('requests.get', requests_get_mock)
patcher.start()
def tearDownModule():
global patcher
patcher.stop()
class SteamCase(unittest.TestCase):
steamid = '76561198026053155'
ticket = "140000003DED863BEB5F462E23D6EB0301001001C78E8457180000000100000002000000B2470DD200000000E081FC0"\
"111000000B2000000320000000400000023D6EB0301001001E0010000B2470DD2CA00010A00000000604C8457E0FB9F5701"\
"00000000000000000036237172F0213710820FA4E76E26FCD11C7A2A1EC868680D7AF51DAEB7859BACEB85D4972E0E2DDB0"\
"4D9D8EC2E24392C4981F1588930285424F4B4B15F545AD2B1E06482163A9E91BF2EE5BF0A270C3B287FFE7F532AF0A0448D"\
"11381EEE1CA2652FA914C2C833A362761B394D7D8489F9CC5886839AA8F0053547ACE7582C3A"
def test_missing_fields(self):
# Verify missing field check. The exception will list out all missing fields, so by removing
# a single field, we should only be notified of that one missing.
with self.assertRaises(Unauthorized) as context:
run_ticket_validation({})
self.assertIn("The token is missing required fields: ticket, appid.", context.exception.description)
def test_broken_url(self):
# Verify that broken key url is caught
with self.assertRaises(ServiceUnavailable) as context:
run_ticket_validation({'ticket': self.ticket, 'appid': 123}, key_url='http://localhost:1/')
self.assertIn("The server is temporarily unable", context.exception.description)
def test_steam(self):
# Can't really test this. Just mock success cases from api.steampowered.com.
steamid = run_ticket_validation({'steamid': self.steamid, 'ticket': self.ticket, 'appid': 123}, key_url='key url')
self.assertTrue(steamid == self.steamid)
# TODO: mock error cases as well. it isn't that hard.
if __name__ == "__main__":
import logging
logging.basicConfig(level='INFO')
unittest.main()
|
Python
| 0.000224
|
@@ -2580,16 +2580,66 @@
ion(%7B%7D)%0A
+ # The order of missing fields isn't fixed%0A
@@ -2694,23 +2694,133 @@
lds:
-
+%22, context.exception.description)%0A self.assertIn(%22
ticket
+%22
,
+context.exception.description)%0A self.assertIn(%22
appid
-.
%22, c
|
17d12027929365e8ebcc69c32642068cc6208678
|
Decode stdout in shell.run_cmd
|
powerline/lib/shell.py
|
powerline/lib/shell.py
|
# vim:fileencoding=utf-8:noet
from subprocess import Popen, PIPE
def run_cmd(pl, cmd, stdin=None):
try:
p = Popen(cmd, stdout=PIPE, stdin=PIPE)
except OSError as e:
pl.exception('Could not execute command ({0}): {1}', e, cmd)
return None
else:
stdout, err = p.communicate(stdin)
return stdout.strip()
def asrun(pl, ascript):
'''Run the given AppleScript and return the standard output and error.'''
return run_cmd(pl, ['osascript', '-'], ascript)
|
Python
| 0.999042
|
@@ -59,16 +59,76 @@
n, PIPE%0A
+from locale import getlocale, getdefaultlocale, LC_MESSAGES%0A
%0A%0Adef ru
@@ -346,16 +346,126 @@
(stdin)%0A
+%09%09encoding = getlocale(LC_MESSAGES)%5B1%5D or getdefaultlocale()%5B1%5D or 'utf-8'%0A%09%09stdout = stdout.decode(encoding)%0A
%09return
|
fb47031a32f30e04762f73dcf51a4864353fb74c
|
Add TODO
|
predicate/predicate.py
|
predicate/predicate.py
|
import re
from django.db.models.query_utils import Q
LOOKUP_SEP = '__'
QUERY_TERMS = set([
'exact', 'iexact', 'contains', 'icontains', 'gt', 'gte', 'lt', 'lte', 'in',
'startswith', 'istartswith', 'endswith', 'iendswith', 'range', 'year',
'month', 'day', 'week_day', 'isnull', 'search', 'regex', 'iregex',
])
def eval_wrapper(children):
"""
generator to yield child nodes, or to wrap filter expressions
"""
for child in children:
if isinstance(child, P):
yield child
elif isinstance(child, tuple) and len(child) == 2:
yield LookupExpression(child)
class P(Q):
"""
A Django 'predicate' construct
This is a variation on Q objects, but instead of being used to generate
SQL, they are used to test a model instance against a set of conditions.
"""
# allow the use of the 'in' operator for membership testing
def __contains__(self, obj):
return self.eval(obj)
def eval(self, instance):
"""
Returns true if the model instance matches this predicate
"""
evaluators = {"AND": all, "OR": any}
evaluator = evaluators[self.connector]
ret = evaluator(c.eval(instance) for c in eval_wrapper(self.children))
if self.negated:
return not ret
else:
return ret
def to_identifier(self):
s = ""
for c in sorted(self.children):
if isinstance(c, type(self)):
s += c.to_identifier()
else:
s += ''.join([str(val) for val in c])
return s.replace('_','')
class LookupExpression(object):
"""
A thin wrapper around a filter expression tuple of (lookup-type, value) to
provide an eval method
"""
def __init__(self, expr):
self.lookup, self.value = expr
self.field = None
def get_field(self, instance):
lookup_type = 'exact' # Default lookup type
parts = self.lookup.split(LOOKUP_SEP)
num_parts = len(parts)
if (len(parts) > 1 and parts[-1] in QUERY_TERMS):
# Traverse the lookup query to distinguish related fields from
# lookup types.
lookup_model = instance
for counter, field_name in enumerate(parts):
try:
lookup_field = getattr(lookup_model, field_name)
except AttributeError:
# Not a field. Bail out.
lookup_type = parts.pop()
return (lookup_model, lookup_field, lookup_type)
# Unless we're at the end of the list of lookups, let's attempt
# to continue traversing relations.
if (counter + 1) < num_parts:
try:
dummy = lookup_model._meta.get_field(field_name).rel.to
lookup_model = lookup_field
# print lookup_model
except AttributeError:
# # Not a related field. Bail out.
lookup_type = parts.pop()
return (lookup_model, lookup_field, lookup_type)
else:
return (instance, getattr(instance, parts[0]), lookup_type)
def eval(self, instance):
"""
return true if the instance matches the expression
"""
lookup_model, lookup_field, lookup_type = self.get_field(instance)
comparison_func = getattr(self, '_' + lookup_type, None)
if comparison_func:
return comparison_func(lookup_model, lookup_field)
raise ValueError("invalid lookup: {}".format(self.lookup))
# Comparison functions
def _exact(self, lookup_model, lookup_field):
return lookup_field == self.value
def _iexact(self, lookup_model, lookup_field):
return lookup_field.lower() == self.value.lower()
def _contains(self, lookup_model, lookup_field):
return self.value in lookup_field
def _icontains(self, lookup_model, lookup_field):
return self.value.lower() in lookup_field.lower()
def _gt(self, lookup_model, lookup_field):
return lookup_field > self.value
def _gte(self, lookup_model, lookup_field):
return lookup_field >= self.value
def _lt(self, lookup_model, lookup_field):
return lookup_field < self.value
def _lte(self, lookup_model, lookup_field):
return lookup_field <= self.value
def _startswith(self, lookup_model, lookup_field):
return lookup_field.startswith(self.value)
def _istartswith(self, lookup_model, lookup_field):
return lookup_field.lower().startswith(self.value.lower())
def _endswith(self, lookup_model, lookup_field):
return lookup_field.endswith(self.value)
def _iendswith(self, lookup_model, lookup_field):
return lookup_field.lower().endswith(self.value.lower())
def _in(self, lookup_model, lookup_field):
return lookup_field in self.value
def _range(self, lookup_model, lookup_field):
# TODO could be more between like
return self.value[0] < lookup_field < self.value[1]
def _year(self, lookup_model, lookup_field):
return lookup_field.year == self.value
def _month(self, lookup_model, lookup_field):
return lookup_field.month == self.value
def _day(self, lookup_model, lookup_field):
return lookup_field.day == self.value
def _week_day(self, lookup_model, lookup_field):
return lookup_field.weekday() == self.value
def _isnull(self, lookup_model, lookup_field):
if self.value:
return lookup_field == None
else:
return lookup_field != None
def _search(self, lookup_model, lookup_field):
return self._contains(lookup_model, lookup_field)
def _regex(self, lookup_model, lookup_field):
"""
Note that for queries - this can be DB specific syntax
here we just use Python
"""
return bool(re.search(self.value, lookup_field))
def _iregex(self, lookup_model, lookup_field):
return bool(re.search(self.value, lookup_field, flags=re.I))
|
Python
| 0.000002
|
@@ -928,16 +928,144 @@
, obj):%0A
+ # TODO: This overrides Q's __contains__ method. It should only have%0A # the custom behavior for non-Node objects.%0A
|
12b806a0c68ceb146eed3b4a9406f36e9f930ba6
|
Fix bug with closing socket without creating it again.
|
rl-rc-car/sensor_client.py
|
rl-rc-car/sensor_client.py
|
"""
This is used to gather our readings from the remote sensor server.
http://ilab.cs.byu.edu/python/socket/echoclient.html
"""
import socket
import numpy as np
import time
class SensorClient:
def __init__(self, host='192.168.2.9', port=8888, size=1024):
self.host = host
self.port = port
self.size = size
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def get_readings(self):
self.s.connect((self.host, self.port))
readings = self.s.recv(self.size)
self.s.close()
# Turn our weird stringed list into an actual list.
readings = readings.decode('utf-8')
readings = readings[1:-1]
readings = readings.split(', ')
readings = [float(i) for i in readings]
# Numpy it.
return np.array([readings])
if __name__ == '__main__':
# Testing it out.
from becho import becho, bechonet
network = bechonet.BechoNet(
num_actions=6, num_inputs=3,
nodes_1=256, nodes_2=256, verbose=True,
load_weights=True,
weights_file='saved-models/sonar-and-ir-9750.h5')
pb = becho.ProjectBecho(
network, num_actions=6, num_inputs=3,
verbose=True, enable_training=False)
sensors = SensorClient()
while True:
# Get the reading.
readings = sensors.get_readings()
print(readings)
# Get the action.
action = pb.get_action(readings)
print("Doing action %d" % action)
time.sleep(0.5)
|
Python
| 0
|
@@ -335,25 +335,48 @@
ze%0A%0A
- self.
+def get_readings(self):%0A
s = sock
@@ -425,50 +425,16 @@
AM)%0A
-%0A
-def get_readings(self):%0A self.
+
s.co
@@ -482,21 +482,16 @@
dings =
-self.
s.recv(s
@@ -508,21 +508,16 @@
-self.
s.close(
|
3880093f70312d2e044fcabe06190329aa845c13
|
normalize action sampler
|
rlvision/exps/pg_16_exp.py
|
rlvision/exps/pg_16_exp.py
|
"""Policy Gradient for Grid 16x16.
It's Keras 2!
Author: Yuhuang Hu
Email : duguyue100@gmail.com
"""
from __future__ import print_function
import os
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Flatten
from keras.layers import Conv2D, AveragePooling2D
from keras.layers import Activation
import rlvision
from rlvision import grid
def discount_rewards(r):
"""Calculate discount rewards."""
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(xrange(0, r.size)):
if r[t] != 0:
running_add = 0
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r
# load data
data, value, start_tot, traj_tot, goal_tot, imsize = grid.load_train_grid16()
data = np.asarray(data, dtype="float32")
value = np.asarray(value, dtype="float32")
print ("[MESSAGE] Data Loaded.")
# training 4000 samples, testing 1000 samples
num_train = 4000
num_test = 1000
# script parameters
input_dim = imsize[0]*imsize[1]
gamma = 0.99
update_freq = 1
learning_rate = 0.001
resume = False
network_type = "conv"
data_format = "channels_first"
num_output = 8
model_file = "pg16_model.h5"
model_path = os.path.join(rlvision.RLVISION_MODEL, model_file)
# define model
model = Sequential()
if network_type == "conv":
model.add(Conv2D(32, (3, 3), padding="same",
input_shape=(3, imsize[0], imsize[1]),
data_format=data_format))
model.add(Activation("relu"))
model.add(Conv2D(32, (3, 3), padding="same",
input_shape=(3, imsize[0], imsize[1]),
data_format=data_format))
model.add(Activation("relu"))
model.add(AveragePooling2D(2, 2))
model.add(Conv2D(32, (3, 3), padding="same",
input_shape=(3, imsize[0], imsize[1]),
data_format=data_format))
model.add(Activation("relu"))
model.add(Flatten())
model.add(Dense(num_output, activation="softmax"))
# print model
model.summary()
model.compile(loss="categorical_crossentropy", optimizer="adam")
if resume is True:
model.load_weights(model_path)
print ("[MESSAGE] Model built.")
# training schedule
reward_sum = 0
running_reward = None
episode_number = 0
xs, dlogps, drs, probs = [], [], [], []
train_X, train_Y = [], []
# go through entire game space
for game_idx in xrange(num_test):
for start_pos in start_tot[game_idx]:
game = grid.Grid(data[game_idx], value[game_idx], imsize,
start_pos)
# until the game is failed
while True:
# compute probability
aprob = model.predict(game.get_state()).flatten()
# sample feature
xs.append(game.get_state())
probs.append(aprob)
# sample decision
aprob = aprob/np.sum(aprob)
action_sampler = aprob.copy()
while True:
action = np.random.choice(num_output, 1, p=action_sampler)[0]
# check if the action is valid
if game.is_pos_valid(game.action2pos(action)) is True:
break
else:
# make sure the same action won't be sampled again
action_sampler[action] = 0.
y = np.zeros((num_output,))
y[action] = 1
# update game and get feedback
game.update_state_from_action(action)
# if the game finished then train the model
dlogps.append(np.array(y).astype("float32")-aprob)
reward, state = game.get_state_reward()
reward_sum += reward
drs.append(reward)
if state in [1, -1]:
episode_number += 1
exp = np.vstack(xs)
epdlogp = np.vstack(dlogps)
epr = np.vstack(drs)
discounted_epr = discount_rewards(epr)
discounted_epr -= np.mean(discounted_epr)
discounted_epr /= np.std(discounted_epr)
epdlogp *= discounted_epr
# prepare training batch
train_X.append(xs)
train_Y.append(epdlogp)
xs, dlogps, drs = [], [], []
if episode_number % update_freq == 0:
y_train = probs + learning_rate*np.squeeze(
np.vstack(train_Y))
model.train_on_batch(np.squeeze(np.vstack(train_X)),
y_train)
train_X, train_Y, probs = [], [], []
os.remove(model_path) \
if os.path.exists(model_path) else None
model.save_weights(model_path)
running_reward = reward_sum if running_reward is None \
else running_reward*0.99+reward_sum*0.01
print ("Environment reset imminent. Total Episode "
"Reward: %f. Running Mean: %f"
% (reward_sum, running_reward))
reward_sum = 0
print ("Episode %d Result: " % (episode_number) +
("Defeat!" if state == -1 else "Victory!"))
# to next game
break
|
Python
| 0.000004
|
@@ -2834,48 +2834,8 @@
ion%0A
- aprob = aprob/np.sum(aprob)%0A
@@ -3259,16 +3259,91 @@
n%5D = 0.%0A
+ action_sampler = action_sampler/np.sum(action_sampler)%0A
|
8dcb778c62c3c6722e2f6dabfd97f6f75c349e62
|
Set celery max tasks child to 1
|
celery_cgi.py
|
celery_cgi.py
|
import os
import logging
from celery import Celery
from temp_config.set_environment import DeployEnv
runtime_env = DeployEnv()
runtime_env.load_deployment_environment()
redis_server = os.environ.get('REDIS_HOSTNAME')
redis_port = os.environ.get('REDIS_PORT')
celery_tasks = [
'hms_flask.modules.hms_controller',
'pram_flask.tasks'
]
redis = 'redis://' + redis_server + ':' + redis_port + '/0'
logging.info("Celery connecting to redis server: " + redis)
celery = Celery('flask_qed', broker=redis, backend=redis, include=celery_tasks)
celery.conf.update(
CELERY_ACCEPT_CONTENT=['json'],
CELERY_TASK_SERIALIZER='json',
CELERY_RESULT_SERIALIZER='json',
CELERY_IGNORE_RESULT=True,
CELERY_TRACK_STARTED=True,
worker_max_memory_per_child = 50000
)
|
Python
| 0.999855
|
@@ -726,24 +726,60 @@
ARTED=True,%0A
+ worker_max_tasks_per_child = 1,%0A
worker_m
|
c82219ea0a651b01d7b9dd91286450e8ecab8fef
|
Make sure we clean up correctly after failed thrift connection
|
elasticsearch/connection/thrift.py
|
elasticsearch/connection/thrift.py
|
from __future__ import absolute_import
import time
try:
from .esthrift import Rest
from .esthrift.ttypes import Method, RestRequest
from thrift.transport import TTransport, TSocket, TSSLSocket
from thrift.protocol import TBinaryProtocol
from thrift.Thrift import TException
THRIFT_AVAILABLE = True
except ImportError:
THRIFT_AVAILABLE = False
from ..exceptions import ConnectionError, ImproperlyConfigured
from .pooling import PoolingConnection
class ThriftConnection(PoolingConnection):
"""
Connection using the `thrift` protocol to communicate with elasticsearch.
See https://github.com/elasticsearch/elasticsearch-transport-thrift for additional info.
"""
transport_schema = 'thrift'
def __init__(self, host='localhost', port=9500, framed_transport=False, use_ssl=False, **kwargs):
"""
:arg framed_transport: use `TTransport.TFramedTransport` instead of
`TTransport.TBufferedTransport`
"""
if not THRIFT_AVAILABLE:
raise ImproperlyConfigured("Thrift is not available.")
super(ThriftConnection, self).__init__(host=host, port=port, **kwargs)
self._framed_transport = framed_transport
self._tsocket_class = TSocket.TSocket
if use_ssl:
self._tsocket_class = TSSLSocket.TSSLSocket
self._tsocket_args = (host, port)
def _make_connection(self):
socket = self._tsocket_class(*self._tsocket_args)
socket.setTimeout(self.timeout * 1000.0)
if self._framed_transport:
transport = TTransport.TFramedTransport(socket)
else:
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocolAccelerated(transport)
client = Rest.Client(protocol)
transport.open()
return client
def perform_request(self, method, url, params=None, body=None, timeout=None, ignore=()):
request = RestRequest(method=Method._NAMES_TO_VALUES[method.upper()], uri=url,
parameters=params, body=body)
start = time.time()
try:
tclient = self._get_connection()
response = tclient.execute(request)
duration = time.time() - start
except TException as e:
self.log_request_fail(method, url, body, time.time() - start, exception=e)
raise ConnectionError('N/A', str(e), e)
finally:
self._release_connection(tclient)
if not (200 <= response.status < 300) and response.status not in ignore:
self.log_request_fail(method, url, body, duration, response.status)
self._raise_error(response.status, response.body)
self.log_request_success(method, url, url, body, response.status,
response.body, duration)
return response.status, response.headers or {}, response.body
|
Python
| 0
|
@@ -2106,16 +2106,39 @@
.time()%0A
+ tclient = None%0A
@@ -2466,16 +2466,44 @@
inally:%0A
+ if tclient:%0A
|
3d6fcb5c5ef05224f0129caf58507b555d17f35d
|
Fix indentation error in Flask
|
episode-2/flask/src/translation.py
|
episode-2/flask/src/translation.py
|
# -*- coding: utf-8 -*-
# Copyright 2016 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from watson_developer_cloud import LanguageTranslationV2 as LanguageTranslationService
def getTranslationService():
return LanguageTranslationService(username='<your username key for the Watson language translation service>',
password='<your password key for the service>')
def identifyLanguage(app, data):
txt = data.encode("utf-8", "replace")
language_translation = getTranslationService()
langsdetected = language_translation.identify(txt)
app.logger.info(json.dumps(langsdetected, indent=2))
primarylang = langsdetected["languages"][0]
retData = {key: primarylang[key] for key in ('language', 'confidence')}
app.logger.info(json.dumps(retData, indent=2))
return retData
def checkForTranslation(app, fromlang, tolang):
supportedModels = []
lt = getTranslationService()
models = lt.list_models()
modelList = models.get("models")
supportedModels = [model['model_id'] for model in modelList
if fromlang == model['source']
and tolang == model['target']]
return supportedModels
def performTranslation(app, txt, primarylang, targetlang):
lt = getTranslationService()
translation = lt.translate(txt, source=primarylang, target=targetlang)
theTranslation = None
if translation and ("translations" in translation):
theTranslation = translation['translations'][0]['translation']
return theTranslation
|
Python
| 0.000002
|
@@ -1878,18 +1878,16 @@
etlang)%0A
-
theTra
@@ -1902,18 +1902,16 @@
= None%0A
-
if tra
@@ -1960,18 +1960,16 @@
n):%0A
-
theTrans
@@ -2023,18 +2023,16 @@
ation'%5D%0A
-
return
|
4026d575cac94d98f8fa5467674020b18442359d
|
Update h-index.py
|
Python/h-index.py
|
Python/h-index.py
|
# Time: O(nlogn)
# Space: O(1)
# Given an array of citations (each citation is a non-negative integer)
# of a researcher, write a function to compute the researcher's h-index.
#
# According to the definition of h-index on Wikipedia:
# "A scientist has index h if h of his/her N papers have
# at least h citations each, and the other N − h papers have
# no more than h citations each."
#
# For example, given citations = [3, 0, 6, 1, 5],
# which means the researcher has 5 papers in total
# and each of them had received 3, 0, 6, 1, 5 citations respectively.
# Since the researcher has 3 papers with at least 3 citations each and
# the remaining two with no more than 3 citations each, his h-index is 3.
#
# Note: If there are several possible values for h, the maximum one is taken as the h-index.
#
class Solution(object):
def hIndex(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
citations.sort(reverse=True)
h = 0
for i, x in enumerate(citations):
if x >= i + 1:
h += 1
else:
break
return h
# Time: O(nlogn)
# Space: O(n)
class Solution2(object):
def hIndex(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
sorted(citations, reverse=True)
h = 0
return sum(1 if x >= i + 1 else 0 for i, x in enumerate(sorted(citations, reverse=True)))
|
Python
| 0.000002
|
@@ -1312,62 +1312,8 @@
%22%22%22%0A
- sorted(citations, reverse=True)%0A h = 0%0A
|
8deb3e45511950cc1a5d317f79f30bf59ed4821a
|
Update Changedate
|
changedate.py
|
changedate.py
|
""" Calcular Data a partir de uma quantidade de minutos """
MINUTOSENTRADA = 4000
OPERADOR = "+"
DATA_E, HORA_E = "31/12/2016 23:35".split(" ", 2)
DIA_E, MES_E, ANO_E = DATA_E.split("/", 3)
HR_E, MINU_E = HORA_E.split(":", 2)
# transformar tudo em minutos
# converter horas em minutos
MIN_TOT_E = (int(HR_E) * 60) + int(MINU_E) + MINUTOSENTRADA
print("Total de Minutos ", MIN_TOT_E)
# 5415 / 60 minutos = 90.25 = .25 * 60
TOTAL_HORAS = MIN_TOT_E / 60
# 90h e 15 mine
I, D = divmod(TOTAL_HORAS, 1)
TOTAL_MINUTOS = D * 60
# 90h / 24h = 3.75 3 dias
TOTAL_DIAS = TOTAL_HORAS / 24
I, D = divmod(TOTAL_DIAS, 1)
# 3d 3.75 (0.75 * 24) = 18 h
TOTAL_HORAS2 = D * 24
print(int(I), " Dias", int(TOTAL_HORAS2), " horas", int(TOTAL_MINUTOS), " minutos")
# 3d 18h e 15min
# 4000 min / 60 min = No. de horas 66.66
# 66h e 40 min ... peguei a dízima e multipliquei por 66*60
# Então fica assim...
# 66 h / 24 h = No. de dias
# Agora pego o número de dias
# 2d 2.75 (dizima 0.75 * 24)
# 0,75 * 24 = 18 h
# 2D 18H 40M
|
Python
| 0.000001
|
@@ -57,159 +57,257 @@
%22%22%22%0A
-MINUTOSENTRADA = 4000%0AOPERADOR = %22+%22%0ADATA_E, HORA_E = %2231/12/2016 23:35%22.split(%22 %22, 2)%0ADIA_E, MES_E, ANO_E = DATA_E.split(%22/%22, 3)%0AHR_E, MINU_E = HORA_E
+%0Adef alterar_data(data_ent, op, minutos_ent):%0A %22%22%22 Calcular nova data %22%22%22%0A spl_Data_ent, spl_Hora_ent = data_ent.split(%22 %22, 2)%0A spl_Dia_ent, spl_Mes_ent, spl_Ano_ent = spl_Data_ent.split(%22/%22, 3)%0A spl_Hora_ent, spl_Minu_ent = spl_Hora_ent
.spl
@@ -318,16 +318,20 @@
:%22, 2)%0A%0A
+
# transf
@@ -352,16 +352,20 @@
minutos%0A
+
# conver
@@ -388,70 +388,100 @@
utos
-%0A%0AMIN_TOT_E = (int(HR_E) * 60) + int(MINU_E) + MINUTOSENTRADA%0A
+ totais%0A Minutos_Totais = (int(spl_Hora_ent) * 60) + int(spl_Minu_ent) + minutos_ent%0A
prin
@@ -508,19 +508,28 @@
%22, M
-IN_TOT_E)%0A%0A
+inutos_Totais)%0A%0A
# 54
@@ -557,28 +557,83 @@
25 =
- .25 * 60%0ATOTAL_HORA
+%3E separar inteiro de casas decimais 0.25 * 60 = 15%0A # HORAS_CONV_MINUTO
S =
@@ -643,25 +643,33 @@
_TOT_E / 60%0A
-%0A
+ %0A
# 90h e 15 m
@@ -674,10 +674,14 @@
min
-e
%0A
+ #
I, D
@@ -694,29 +694,41 @@
mod(
-TOTAL_HORAS, 1)%0ATOTAL
+HORAS_CONV_MINUTOS, 1)%0A #RESTO
_MIN
@@ -741,17 +741,25 @@
D * 60%0A
-%0A
+ %0A
# 90h /
@@ -773,15 +773,62 @@
.75
-3 dias%0A
+=%3E separar inteiro de casas decimais = 0.75 / 24%0A #
TOTA
@@ -836,16 +836,21 @@
_DIAS =
+QTDE_
TOTAL_HO
@@ -858,17 +858,26 @@
AS / 24%0A
-%0A
+ %0A #
I, D = d
@@ -897,17 +897,25 @@
IAS, 1)%0A
-%0A
+ %0A
# 3d 3.7
@@ -936,16 +936,21 @@
= 18 h%0A
+ #
TOTAL_HO
@@ -963,17 +963,26 @@
D * 24%0A
-%0A
+ %0A #
print(in
@@ -1062,266 +1062,80 @@
%22)%0A%0A
-%0A# 3d 18h e 15min%0A# 4000 min / 60 min = No. de horas 66.66%0A# 66h e 40 min ... peguei a d%C3%ADzima e multipliquei por 66*60%0A# Ent%C3%A3o fica assim...%0A# 66 h / 24 h = No. de dias%0A# Agora pego o n%C3%BAmero de dias%0A# 2d 2.75 (dizima 0.75 * 24)%0A# 0,75 * 24 = 18 h%0A# 2D 18H 40M%0A
+if __name__ == (%22__main__%22):%0A alterar_data(%2231/12/2016 23:35%22,%22+%22, 4000)
%0A
|
174f0986900fbd56ed3b2af9ca3dacc0de1aa502
|
increase connect timeout for nodes and also added some logs to cli so that it can be asserted in sovrin client (#66)
|
plenum/config.py
|
plenum/config.py
|
import os
import sys
from collections import OrderedDict
from plenum.common.txn import ClientBootStrategy
from plenum.common.types import PLUGIN_TYPE_STATS_CONSUMER
# Each entry in registry is (stack name, ((host, port), verkey, pubkey))
nodeReg = OrderedDict([
('Alpha', ('127.0.0.1', 9701)),
('Beta', ('127.0.0.1', 9703)),
('Gamma', ('127.0.0.1', 9705)),
('Delta', ('127.0.0.1', 9707))
])
cliNodeReg = OrderedDict([
('AlphaC', ('127.0.0.1', 9702)),
('BetaC', ('127.0.0.1', 9704)),
('GammaC', ('127.0.0.1', 9706)),
('DeltaC', ('127.0.0.1', 9708))
])
baseDir = "~/.plenum/"
keyringsDir = "keyrings"
nodeDataDir = "data/nodes"
clientDataDir = "data/clients"
domainTransactionsFile = "transactions_sandbox"
poolTransactionsFile = "pool_transactions_sandbox"
walletDir = "wallet"
clientBootStrategy = ClientBootStrategy.PoolTxn
hashStore = {
"type": "file"
}
primaryStorage = None
secondaryStorage = None
OrientDB = {
"user": "root",
"password": "password",
"host": "127.0.0.1",
"port": 2424
}
DefaultPluginPath = {
# PLUGIN_BASE_DIR_PATH: "<abs path of plugin directory can be given here,
# if not given, by default it will pickup plenum/server/plugin path>",
PLUGIN_TYPE_STATS_CONSUMER: "stats_consumer"
}
PluginsDir = "plugins"
stewardThreshold = 20
# Monitoring configuration
PerfCheckFreq = 10
DELTA = 0.8
LAMBDA = 60
OMEGA = 5
SendMonitorStats = True
ThroughputWindowSize = 30
DashboardUpdateFreq = 5
ThroughputGraphDuration = 240
LatencyWindowSize = 30
LatencyGraphDuration = 240
notifierEventTriggeringConfig = {
'clusterThroughputSpike': {
'coefficient': 3,
'minCnt': 100,
'freq': 60
},
'nodeRequestSpike': {
'coefficient': 3,
'minCnt': 100,
'freq': 60
}
}
# Stats server configuration
STATS_SERVER_IP = '127.0.0.1'
STATS_SERVER_PORT = 30000
STATS_SERVER_MESSAGE_BUFFER_MAX_SIZE = 1000
RAETLogLevel = "terse"
RAETLogLevelCli = "mute"
RAETLogFilePath = os.path.join(os.path.expanduser(baseDir), "raet.log")
RAETLogFilePathCli = None
RAETMessageTimeout = 60
ViewChangeWindowSize = 60
# Timeout factor after which a node starts requesting consistency proofs if has
# not found enough matching
ConsistencyProofsTimeout = 5
# Timeout factor after which a node starts requesting transactions
CatchupTransactionsTimeout = 5
# Log configuration
logRotationWhen = 'D'
logRotationInterval = 1
logRotationBackupCount = 10
logRotationMaxBytes = 100 * 1024 * 1024
logFormat = '{asctime:s} | {levelname:8s} | {filename:20s} ({lineno:d}) | {funcName:s} | {message:s}'
logFormatStyle='{'
# OPTIONS RELATED TO TESTS
# Expected time for one stack to get connected to another
ExpectedConnectTime = 3.3 if sys.platform == 'win32' else 1.1
# After ordering every `CHK_FREQ` requests, replica sends a CHECKPOINT
CHK_FREQ = 100
# Difference between low water mark and high water mark
LOG_SIZE = 3*CHK_FREQ
CLIENT_REQACK_TIMEOUT = 5
CLIENT_REPLY_TIMEOUT = 10
CLIENT_MAX_RETRY_ACK = 5
CLIENT_MAX_RETRY_REPLY = 5
# The client when learns of new nodes or any change in configuration of
# other nodes, updates the genesis pool transaction file if this option is set
# to True. This option is overwritten by default for tests to keep multiple
# clients from reading an updated pool transaction file, this helps us
# emulate clients on different machines.
UpdateGenesisPoolTxnFile = True
# Since the ledger is stored in a flat file, this makes the ledger do
# an fsync on every write. Making it True can significantly slow
# down writes as shown in a test `test_file_store_perf.py` in the ledger
# repository
EnsureLedgerDurability = True
|
Python
| 0.000002
|
@@ -2774,17 +2774,17 @@
else 1.
-1
+4
%0A%0A# Afte
|
680d0ab93247b499b62c502b39ace6bdf9b19ea3
|
use the same type tag for all reference types
|
plic/Rapicorn.py
|
plic/Rapicorn.py
|
#!/usr/bin/env python
# plic - Pluggable IDL Compiler -*-mode:python-*-
# Copyright (C) 2008 Tim Janik
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""PLIC-Rapicorn - Rapicorn type generator for PLIC
More details at http://www.testbit.eu/.
"""
import Decls
def encode_int (int):
if int < 1000:
return "#%03u" % int
assert int < 268435456
chars = (128 + (int >> 21),
128 + ((int >> 14) & 0x7f),
128 + ((int >> 7) & 0x7f),
128 + (int & 0x7f))
return "%c%c%c%c" % chars
def encode_string (string):
return encode_int (len (string)) + string
def strcquote (string):
result = ''
for c in string:
oc = ord (c)
ec = { 92 : r'\\',
7 : r'\a',
8 : r'\b',
9 : r'\t',
10 : r'\n',
11 : r'\v',
12 : r'\f',
13 : r'\r',
12 : r'\f'
}.get (oc, '')
if ec:
result += ec
continue
if oc <= 31 or oc >= 127:
result += '\\' + oct (oc)[-3:]
elif c == '"':
result += r'\"'
else:
result += c
return '"' + result + '"'
class Generator:
def aux_strings (self, auxlist):
result = encode_int (len (auxlist))
for ad in auxlist:
result += encode_string (ad)
return result
def type_key (self, type_info):
s = { Decls.NUM : '___i',
Decls.REAL : '___f',
Decls.STRING : '___s',
Decls.ENUM : '___E',
Decls.SEQUENCE : '___Q',
Decls.RECORD : '___R',
Decls.INTERFACE : '___C',
}[type_info.storage]
return s
def type_info (self, type_name, type_info):
tp = type_info
aux = []
s = self.type_key (tp)
s += encode_string (type_name)
s += self.aux_strings (aux)
if tp.storage in (Decls.ENUM, Decls.SEQUENCE,
Decls.RECORD, Decls.INTERFACE):
s += encode_string (tp.full_name())
return encode_int (len (s)) + s
def type_decl_key (self, type_info):
s = { Decls.ENUM : '___e',
Decls.SEQUENCE : '___q',
Decls.RECORD : '___r',
Decls.INTERFACE : '___c',
}.get (type_info.storage, self.type_key (type_info))
return s
def type_decl (self, type_info):
tp = type_info
s = self.type_decl_key (type_info)
s += encode_string (type_info.name)
aux = []
s += self.aux_strings (aux)
if tp.storage == Decls.SEQUENCE:
s += self.type_info (tp.elements[0], tp.elements[1])
elif tp.storage == Decls.RECORD:
s += encode_int (len (tp.fields))
for fl in tp.fields:
s += self.type_info (fl[0], fl[1])
elif tp.storage == Decls.ENUM:
s += encode_int (len (tp.options))
for op in tp.options:
s += encode_string (op[0]) # ident
s += encode_string (op[1]) # label
s += encode_string (op[2]) # blurb
elif tp.storage == Decls.INTERFACE:
tp.prerequisites = []
s += encode_int (len (tp.prerequisites))
for pr in prerequisites:
s += encode_string (pr)
return encode_int (len (s)) + s
def namespace_string (self, namespace):
s = '__NS'
s += encode_string (namespace.name)
t = ''
for tp in namespace.types():
t += self.type_decl (tp)
s += encode_int (len (t))
s += t
return s
def generate_pack (self, namespace_list):
s = 'Rapicorn\r\n ' # magic
s += '_TP0'
t = ''
for ns in namespace_list:
t += self.namespace_string (ns)
s += encode_int (len (t))
s += t
return s
def generate (namespace_list, *args):
gg = Generator()
print "Type-Package:"
print strcquote (gg.generate_pack (namespace_list))
# control module exports
__all__ = ['generate']
|
Python
| 0
|
@@ -2056,17 +2056,17 @@
: '___
-E
+e
',%0A
@@ -2092,17 +2092,17 @@
: '___
-Q
+q
',%0A
@@ -2128,17 +2128,17 @@
: '___
-R
+r
',%0A
@@ -2164,17 +2164,17 @@
E : '___
-C
+c
',%0A
@@ -2204,32 +2204,112 @@
e%5D%0A return s%0A
+ reference_types = (Decls.ENUM, Decls.SEQUENCE, Decls.RECORD, Decls.INTERFACE)%0A
def type_info
@@ -2366,24 +2366,96 @@
aux = %5B%5D%0A
+ if tp.storage in self.reference_types:%0A s = '___V'%0A else:%0A
s = self
@@ -2722,267 +2722,8 @@
+ s%0A
- def type_decl_key (self, type_info):%0A s = %7B Decls.ENUM : '___e',%0A Decls.SEQUENCE : '___q',%0A Decls.RECORD : '___r',%0A Decls.INTERFACE : '___c',%0A %7D.get (type_info.storage, self.type_key (type_info))%0A return s%0A
de
@@ -2794,13 +2794,8 @@
ype_
-decl_
key
|
ed34c43b6ce270ada7d523ef07e9b2087d779cb9
|
remove explicit random.seed()
|
plugins/games.py
|
plugins/games.py
|
"""
games.py: Create a bot that provides game functionality (dice, 8ball, etc).
"""
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import random
import urllib.request
import urllib.error
from xml.etree import ElementTree
import utils
from log import log
import world
gameclient = utils.registerService("Games", manipulatable=True)
reply = gameclient.reply # TODO find a better syntax for ServiceBot.reply()
# commands
def dice(irc, source, args):
"""<num>d<sides>
Rolls a die with <sides> sides <num> times.
"""
if not args:
reply(irc, "No string given.")
return
try:
# Split num and sides and convert them to int.
num, sides = map(int, args[0].split('d', 1))
except ValueError:
# Invalid syntax. Show the command help.
gameclient.help(irc, source, ['dice'])
return
assert 1 < sides <= 100, "Invalid side count (must be 2-100)."
assert 1 <= num <= 100, "Cannot roll more than 100 dice at once."
results = []
for _ in range(num):
results.append(random.randint(1, sides))
# Convert results to strings, join them, format, and reply.
s = 'You rolled %s: %s (total: %s)' % (args[0], ' '.join([str(x) for x in results]), sum(results))
reply(irc, s)
gameclient.add_cmd(dice, 'd')
gameclient.add_cmd(dice)
eightball_responses = ["It is certain.",
"It is decidedly so.",
"Without a doubt.",
"Yes, definitely.",
"You may rely on it.",
"As I see it, yes.",
"Most likely.",
"Outlook good.",
"Yes.",
"Signs point to yes.",
"Reply hazy, try again.",
"Ask again later.",
"Better not tell you now.",
"Cannot predict now.",
"Concentrate and ask again.",
"Don't count on it.",
"My reply is no.",
"My sources say no.",
"Outlook not so good.",
"Very doubtful."]
def eightball(irc, source, args):
"""[<question>]
Asks the Magic 8-ball a question.
"""
reply(irc, random.choice(eightball_responses))
gameclient.add_cmd(eightball)
gameclient.add_cmd(eightball, '8ball')
gameclient.add_cmd(eightball, '8b')
def fml(irc, source, args):
"""[<id>]
Displays an entry from fmylife.com. If <id>
is not given, fetch a random entry from the API."""
try:
query = args[0]
except IndexError:
# Get a random FML from the API.
query = 'random'
# TODO: configurable language?
url = ('http://api.betacie.com/view/%s/nocomment'
'?key=4be9c43fc03fe&language=en' % query)
try:
data = urllib.request.urlopen(url).read()
except urllib.error as e:
reply(irc, 'Error: %s' % e)
return
tree = ElementTree.fromstring(data.decode('utf-8'))
tree = tree.find('items/item')
try:
category = tree.find('category').text
text = tree.find('text').text
fmlid = tree.attrib['id']
url = tree.find('short_url').text
except AttributeError as e:
log.debug("games.FML: Error fetching FML %s from URL %s: %s",
query, url, e)
reply(irc, "Error: That FML does not exist or there was an error "
"fetching data from the API.")
return
if not fmlid:
reply(irc, "Error: That FML does not exist.")
return
# TODO: customizable formatting
votes = "\x02[Agreed: %s / Deserved: %s]\x02" % \
(tree.find('agree').text, tree.find('deserved').text)
s = '\x02#%s [%s]\x02: %s - %s \x02<\x0311%s\x03>\x02' % \
(fmlid, category, text, votes, url)
reply(irc, s)
gameclient.add_cmd(fml)
# loading
def main(irc=None):
"""Main function, called during plugin loading at start."""
# seed the random
random.seed()
def die(irc):
utils.unregisterService('games')
|
Python
| 0
|
@@ -3806,144 +3806,8 @@
l)%0A%0A
-# loading%0Adef main(irc=None):%0A %22%22%22Main function, called during plugin loading at start.%22%22%22%0A%0A # seed the random%0A random.seed()%0A%0A
def
|
62a2b5ab62a5c1080cdc30e3334cc62f4a51d6a9
|
Make job mode API update change.
|
eurekaclinical/analytics/client.py
|
eurekaclinical/analytics/client.py
|
from eurekaclinical import APISession, API, Struct, construct_api_session_context_manager
class Job(Struct):
def __init__(self):
super(Job, self).__init__()
self.sourceConfigId = None
self.destinationId = None
self.dateRangePhenotypeKey = None
self.earliestDate = None
self.earliestDateSide = 'START'
self.latestDate = None
self.latestDateSide = 'START'
self.updateData = False
self.prompts = None
self.propositionIds = []
self.name = None
class Users(API):
def __init__(self, *args, **kwargs):
super(Users, self).__init__('/users/', *args, **kwargs)
def me(self):
return self._get(self.rest_endpoint + "me")
class Phenotypes(API):
def __init__(self, *args, **kwargs):
super(Phenotypes, self).__init__('/phenotypes/', *args, **kwargs)
class Concepts(API):
def __init__(self, *args, **kwargs):
super(Concepts, self).__init__('/concepts/', *args, **kwargs)
def get(self, key, summarize=False):
return self._get(self.rest_endpoint + key + "?summarize=" + str(summarize))
class Jobs(API):
def __init__(self, *args, **kwargs):
super(Jobs, self).__init__('/jobs/', *args, **kwargs)
def submit(self, job):
return self._post(self.rest_endpoint, job)
class AnalyticsSession(APISession):
def __init__(self, cas_session,
api_url='https://localhost:8000/eureka-webapp', verify_api_cert=True):
super(AnalyticsSession, self).__init__(cas_session, api_url=api_url, verify_api_cert=verify_api_cert)
self.__api_args = (cas_session, verify_api_cert, api_url)
@property
def users(self):
return Users(*self.__api_args)
@property
def phenotypes(self):
return Phenotypes(*self.__api_args)
@property
def concepts(self):
return Concepts(*self.__api_args)
@property
def jobs(self):
return Jobs(*self.__api_args)
get_session = construct_api_session_context_manager(AnalyticsSession)
|
Python
| 0
|
@@ -433,26 +433,26 @@
elf.
-updateData = False
+jobMode = 'UPDATE'
%0A
|
12223dfbc26b72c400b5436e53099ade27699086
|
fix #796
|
examples/ImageNetModels/alexnet.py
|
examples/ImageNetModels/alexnet.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: alexnet.py
import argparse
import os
import cv2
import numpy as np
import tensorflow as tf
from tensorpack import *
from tensorpack.dataflow import imgaug
from tensorpack.tfutils import argscope
from tensorpack.utils.gpu import get_num_gpu
from imagenet_utils import ImageNetModel, get_imagenet_dataflow
def visualize_conv1_weights(filters):
ctx = get_current_tower_context()
if not ctx.is_main_training_tower:
return
with tf.name_scope('visualize_conv1'):
filters = tf.reshape(filters, [11, 11, 3, 8, 12])
filters = tf.transpose(filters, [3, 0, 4, 1, 2]) # 8,11,12,11,3
filters = tf.reshape(filters, [1, 88, 132, 3])
tf.summary.image('visualize_conv1', filters, max_outputs=1, collections=['AAA'])
class Model(ImageNetModel):
weight_decay = 5e-4
data_format = 'NHWC' # LRN only supports NHWC
def get_logits(self, image):
gauss_init = tf.random_normal_initializer(stddev=0.01)
with argscope(Conv2D,
kernel_initializer=tf.variance_scaling_initializer(scale=2.)), \
argscope([Conv2D, FullyConnected], activation=tf.nn.relu), \
argscope([Conv2D, MaxPooling], data_format='channels_last'):
# necessary padding to get 55x55 after conv1
image = tf.pad(image, [[0, 0], [2, 2], [2, 2], [0, 0]])
l = Conv2D('conv1', image, filters=96, kernel_size=11, strides=4, padding='VALID')
# size: 55
visualize_conv1_weights(l.variables.W)
l = tf.nn.lrn(l, 2, bias=1.0, alpha=2e-5, beta=0.75, name='norm1')
l = MaxPooling('pool1', l, 3, strides=2, padding='VALID')
# 27
l = Conv2D('conv2', l, filters=256, kernel_size=5, split=2)
l = tf.nn.lrn(l, 2, bias=1.0, alpha=2e-5, beta=0.75, name='norm2')
l = MaxPooling('pool2', l, 3, strides=2, padding='VALID')
# 13
l = Conv2D('conv3', l, filters=384, kernel_size=3)
l = Conv2D('conv4', l, filters=384, kernel_size=3, split=2)
l = Conv2D('conv5', l, filters=256, kernel_size=3, split=2)
l = MaxPooling('pool3', l, 3, strides=2, padding='VALID')
l = FullyConnected('fc6', l, 4096,
kernel_initializer=gauss_init,
bias_initializer=tf.ones_initializer())
l = Dropout(l, rate=0.5)
l = FullyConnected('fc7', l, 4096, kernel_initializer=gauss_init)
l = Dropout(l, rate=0.5)
logits = FullyConnected('fc8', l, 1000, kernel_initializer=gauss_init)
return logits
def get_data(name, batch):
isTrain = name == 'train'
if isTrain:
augmentors = [
imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC),
imgaug.RandomCrop(224),
imgaug.Lighting(0.1,
eigval=np.asarray(
[0.2175, 0.0188, 0.0045][::-1]) * 255.0,
eigvec=np.array(
[[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]],
dtype='float32')[::-1, ::-1]),
imgaug.Flip(horiz=True)]
else:
augmentors = [
imgaug.ResizeShortestEdge(256, cv2.INTER_CUBIC),
imgaug.CenterCrop((224, 224))]
return get_imagenet_dataflow(args.data, name, batch, augmentors)
def get_config():
nr_tower = max(get_nr_gpu(), 1)
batch = args.batch
total_batch = batch * nr_tower
if total_batch != 128:
logger.warn("AlexNet needs to be trained with a total batch size of 128.")
BASE_LR = 0.01 * (total_batch / 128.)
logger.info("Running on {} towers. Batch size per tower: {}".format(nr_tower, batch))
dataset_train = get_data('train', batch)
dataset_val = get_data('val', batch)
infs = [ClassificationError('wrong-top1', 'val-error-top1'),
ClassificationError('wrong-top5', 'val-error-top5')]
callbacks = [
ModelSaver(),
GPUUtilizationTracker(),
EstimatedTimeLeft(),
ScheduledHyperParamSetter(
'learning_rate',
[(30, BASE_LR * 1e-1), (60, BASE_LR * 1e-2), (80, BASE_LR * 1e-3)]),
DataParallelInferenceRunner(
dataset_val, infs, list(range(nr_tower))),
]
return TrainConfig(
model=Model(),
data=StagingInput(QueueInput(dataset_train)),
callbacks=callbacks,
steps_per_epoch=1281167 // total_batch,
max_epoch=100,
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--data', help='ILSVRC dataset dir')
parser.add_argument('--batch', type=int, default=32, help='batch per GPU')
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
logger.set_logger_dir(os.path.join('train_log', 'AlexNet'))
config = get_config()
nr_tower = max(get_num_gpu(), 1)
trainer = SyncMultiGPUTrainerReplicated(nr_tower)
launch_train_with_config(config, trainer)
|
Python
| 0
|
@@ -3612,17 +3612,18 @@
ax(get_n
-r
+um
_gpu(),
@@ -4322,16 +4322,30 @@
%5B
+(0, BASE_LR),
(30, BAS
|
5294abf9253a077c0a23fe0e573174afd3728ac7
|
fix caching in test
|
corehq/ex-submodules/casexml/apps/stock/tests/test_logistics_consumption.py
|
corehq/ex-submodules/casexml/apps/stock/tests/test_logistics_consumption.py
|
import uuid
from decimal import Decimal
from django.test import TestCase
from casexml.apps.case.models import CommCareCase
from casexml.apps.stock.models import StockReport, StockTransaction
from casexml.apps.stock.tests import ago
from casexml.apps.stock import const
from corehq.apps.commtrack.models import CommtrackConfig, ConsumptionConfig
from corehq.apps.domain.models import Domain
from corehq.apps.products.models import SQLProduct
class LogisticsConsumptionTest(TestCase):
@classmethod
def setUpClass(cls):
cls.domain = Domain(name='test')
cls.domain.save()
cls.case_id = uuid.uuid4().hex
CommCareCase(
_id=cls.case_id,
domain='fakedomain',
).save()
cls.product_id = uuid.uuid4().hex
SQLProduct(product_id=cls.product_id).save()
def create_transactions(self, domain=None):
report = StockReport.objects.create(
form_id=uuid.uuid4().hex,
date=ago(2),
type=const.REPORT_TYPE_BALANCE,
domain=domain
)
txn = StockTransaction(
report=report,
section_id=const.SECTION_TYPE_STOCK,
type=const.TRANSACTION_TYPE_STOCKONHAND,
case_id=self.case_id,
product_id=self.product_id,
stock_on_hand=Decimal(10),
)
txn.save()
report2 = StockReport.objects.create(
form_id=uuid.uuid4().hex,
date=ago(1),
type=const.REPORT_TYPE_BALANCE,
domain=domain
)
txn2 = StockTransaction(
report=report2,
section_id=const.SECTION_TYPE_STOCK,
type=const.TRANSACTION_TYPE_STOCKONHAND,
case_id=self.case_id,
product_id=self.product_id,
stock_on_hand=Decimal(30),
)
txn2.save()
def setUp(self):
StockTransaction.objects.all().delete()
StockReport.objects.all().delete()
def test_report_without_config(self):
self.create_transactions(self.domain.name)
self.assertEqual(StockTransaction.objects.all().count(), 3)
receipts = StockTransaction.objects.filter(type='receipts')
self.assertEqual(receipts.count(), 1)
self.assertEqual(receipts[0].quantity, 20)
def test_report_without_domain(self):
self.create_transactions()
self.assertEqual(StockTransaction.objects.all().count(), 3)
receipts = StockTransaction.objects.filter(type='receipts')
self.assertEqual(receipts.count(), 1)
self.assertEqual(receipts[0].quantity, 20)
def test_report_with_exclude_disabled(self):
commtrack_config = CommtrackConfig(domain=self.domain.name)
commtrack_config.consumption_config = ConsumptionConfig()
commtrack_config.save()
self.create_transactions(self.domain.name)
self.assertEqual(StockTransaction.objects.all().count(), 3)
self.assertEqual(StockTransaction.objects.filter(type='receipts').count(), 1)
commtrack_config.delete()
def test_report_with_exclude_enabled(self):
commtrack_config = CommtrackConfig(domain=self.domain.name)
commtrack_config.consumption_config = ConsumptionConfig(exclude_invalid_periods=True)
commtrack_config.save()
self.create_transactions(self.domain.name)
self.assertEqual(StockTransaction.objects.all().count(), 2)
self.assertEqual(StockTransaction.objects.filter(type='receipts').count(), 0)
commtrack_config.delete()
|
Python
| 0.000001
|
@@ -262,16 +262,93 @@
t const%0A
+from corehq.apps.commtrack.consumption import should_exclude_invalid_periods%0A
from cor
@@ -2046,32 +2046,171 @@
all().delete()%0A%0A
+ def tearDown(self):%0A should_exclude_invalid_periods.clear(self.domain.name)%0A should_exclude_invalid_periods.clear(None)%0A%0A
def test_rep
|
91076c30d16d27ecfe9a8c347a9bf74b9560c5ce
|
Send updated items to webhooks. (#1680)
|
pogom/webhook.py
|
pogom/webhook.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
import requests
from .utils import get_args
from requests.packages.urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
log = logging.getLogger(__name__)
def send_to_webhook(message_type, message):
args = get_args()
if not args.webhooks:
# What are you even doing here...
log.warning('Called send_to_webhook() without webhooks.')
return
# Config / arg parser
num_retries = args.wh_retries
req_timeout = args.wh_timeout
backoff_factor = args.wh_backoff_factor
# Use requests & urllib3 to auto-retry.
# If the backoff_factor is 0.1, then sleep() will sleep for [0.1s, 0.2s,
# 0.4s, ...] between retries. It will also force a retry if the status
# code returned is 500, 502, 503 or 504.
session = requests.Session()
# If any regular response is generated, no retry is done. Without using
# the status_forcelist, even a response with status 500 will not be
# retried.
retries = Retry(total=num_retries, backoff_factor=backoff_factor,
status_forcelist=[500, 502, 503, 504])
# Mount handler on both HTTP & HTTPS.
session.mount('http://', HTTPAdapter(max_retries=retries))
session.mount('https://', HTTPAdapter(max_retries=retries))
data = {
'type': message_type,
'message': message
}
for w in args.webhooks:
try:
session.post(w, json=data, timeout=(None, req_timeout))
except requests.exceptions.ReadTimeout:
log.exception('Response timeout on webhook endpoint %s.', w)
except requests.exceptions.RequestException as e:
log.exception(e)
def wh_updater(args, queue, key_cache):
# The forever loop.
while True:
try:
# Loop the queue.
whtype, message = queue.get()
# Extract the proper identifier.
ident_fields = {
'pokestop': 'pokestop_id',
'pokemon': 'encounter_id',
'gym': 'gym_id'
}
ident = message.get(ident_fields.get(whtype))
# Only send if identifier isn't already in cache.
if ident is None:
log.warning(
'Trying to send webhook item of invalid type: %s.', whtype)
elif ident not in key_cache:
key_cache[ident] = 1
log.debug('Sending %s to webhook: %s.', whtype, ident)
send_to_webhook(whtype, message)
else:
# Make sure to call key_cache[ident] so it updates the LFU
# usage count. We just use it as a count for now, can come in
# useful for stats/debugging later.
key_cache[ident] = key_cache[ident] + 1
log.debug('Not resending %s to webhook: %s.', whtype, ident)
# Webhook queue moving too slow.
if queue.qsize() > 50:
log.warning(
'Webhook queue is > 50 (@%d); try increasing --wh-threads.', queue.qsize())
queue.task_done()
except Exception as e:
log.exception('Exception in wh_updater: %s.', e)
|
Python
| 0
|
@@ -2427,17 +2427,23 @@
dent%5D =
-1
+message
%0A
@@ -2630,13 +2630,47 @@
nt%5D
-so it
+in all branches so it%0A #
upd
@@ -2681,16 +2681,30 @@
the LFU
+ usage count.%0A
%0A
@@ -2718,176 +2718,414 @@
#
-usage count. We just use i
+If the objec
t
+h
as
-a count for now, can come in%0A # useful for stats/debugging later.%0A key_cache%5Bident%5D = key_cache%5Bident%5D + 1%0A
+changed in an important way, send new data%0A # to webhooks.%0A if __wh_object_changed(whtype, key_cache%5Bident%5D, message):%0A key_cache%5Bident%5D = message%0A send_to_webhook(whtype, message)%0A log.debug('Sending updated %25s to webhook: %25s.',%0A whtype, ident)%0A else:%0A
@@ -3173,32 +3173,62 @@
o webhook: %25s.',
+%0A
whtype, ident)%0A
@@ -3556,8 +3556,1159 @@
s.', e)%0A
+%0A%0A# Helpers%0A%0A# Determine if a webhook object has changed in any important way (and%0A# requires a resend).%0Adef __wh_object_changed(whtype, old, new):%0A # Only test for important fields: don't trust last_modified fields.%0A if whtype == 'pokestop':%0A # lure_expiration is a UTC timestamp so it's good (Y).%0A fields = %5B'enabled', 'latitude', 'longitude',%0A 'lure_expiration', 'active_fort_modifier'%5D%0A elif whtype == 'pokemon':%0A fields = %5B'spawnpoint_id', 'pokemon_id', 'latitude', 'longitude', 'disappear_time',%0A 'move_1', 'move_2', 'individual_stamina', 'individual_defense', 'individual_attack'%5D%0A elif whtype == 'gym':%0A fields = %5B'team_id', 'guard_pokemon_id',%0A 'gym_points', 'enabled', 'latitude', 'longitude'%5D%0A else:%0A log.critical('Received an object of unknown type %25s.', whtype)%0A return False%0A%0A return not __dict_fields_equal(fields, old, new)%0A%0A%0A# Determine if two dicts have equal values for all keys in a list.%0Adef __dict_fields_equal(keys, a, b):%0A for k in keys:%0A if a.get(k) != b.get(k):%0A return False%0A%0A return True%0A
|
11cc0c5f8aae526eddb372fbe339f649f2c654eb
|
Update pattern for inline comments to allow anything after '#'
|
poyo/patterns.py
|
poyo/patterns.py
|
# -*- coding: utf-8 -*-
INDENT = r"(?P<indent>^ *)"
VARIABLE = r"(?P<variable>.+):"
VALUE = r"(?P<value>((?P<q2>['\"]).*?(?P=q2))|[^#]+?)"
NEWLINE = r"$\n"
BLANK = r" +"
INLINE_COMMENT = r"( +#\w*)?"
COMMENT = r"^ *#.*" + NEWLINE
BLANK_LINE = r"^[ \t]*" + NEWLINE
SECTION = INDENT + VARIABLE + INLINE_COMMENT + NEWLINE
SIMPLE = INDENT + VARIABLE + BLANK + VALUE + INLINE_COMMENT + NEWLINE
NULL = r"null|Null|NULL|~"
TRUE = r"true|True|TRUE"
FALSE = r"false|False|FALSE"
INT = r"[-+]?[0-9]+"
FLOAT = r"([-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?)"
STR = r"(?P<quotes>['\"]?).*(?P=quotes)"
|
Python
| 0
|
@@ -191,10 +191,9 @@
( +#
-%5Cw
+.
*)?%22
|
0f9b5bdba841d707e236bb8ed8df5ba4aa7806c2
|
Allow a None value for os_config_path.
|
praw/settings.py
|
praw/settings.py
|
# This file is part of PRAW.
#
# PRAW is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# PRAW is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# PRAW. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
from praw.compat import configparser # pylint: disable-msg=E0611
def _load_configuration():
config = configparser.RawConfigParser()
module_dir = os.path.dirname(sys.modules[__name__].__file__)
if 'APPDATA' in os.environ: # Windows
os_config_path = os.environ['APPDATA']
elif 'XDG_CONFIG_HOME' in os.environ: # Modern Linux
os_config_path = os.environ['XDG_CONFIG_HOME']
else: # Legacy Linux
os_config_path = os.path.join(os.environ['HOME'], '.config')
locations = [os.path.join(module_dir, 'praw.ini'),
os.path.join(os_config_path, 'praw.ini'),
'praw.ini']
if not config.read(locations):
raise Exception('Could not find config file in any of: %s' % locations)
return config
CONFIG = _load_configuration()
|
Python
| 0
|
@@ -1072,12 +1072,32 @@
el
-se:
+if 'HOME' in os.environ:
# L
@@ -1177,16 +1177,56 @@
onfig')%0A
+ else:%0A os_config_path = None%0A
loca
@@ -1263,32 +1263,33 @@
ir, 'praw.ini'),
+
%0A
@@ -1293,67 +1293,110 @@
- os.path.join(os_config_path, 'praw.ini'),%0A
+'praw.ini'%5D%0A if os_config_path is not None:%0A locations.insert(1,os.path.join(os_config_path,
'pr
@@ -1394,33 +1394,34 @@
path, 'praw.ini'
-%5D
+))
%0A if not conf
|
28c3f4e17a1f7a003d75353005cd96c854bd30d9
|
replace lb:no_nsfw
|
cogs/nekos.py
|
cogs/nekos.py
|
"""
The MIT License (MIT)
Copyright (c) 2018 tilda
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
# noinspection PyPackageRequirements
import discord
import json
# noinspection PyPackageRequirements
from discord.ext import commands
# noinspection PyPackageRequirements
import utils.errors
from cogs.utils.plainreq import get_req
from cogs.utils.endpoints import nekos
class Animemes:
def __init__(self, bot):
self.bot = bot
self.config = json.load(open('config.json'))
@commands.command()
async def neko(self, ctx):
"""Shows a neko"""
async with get_req(ctx.bot.session, nekos['sfw']) as neko:
if neko.status == 200:
img = await neko.json()
neko_em = discord.Embed(colour=0x690E8)
neko_em.set_image(url=img['neko'])
neko_em.set_footer(text='source: nekos.life')
await ctx.send(embed=neko_em)
else:
raise utils.errors.ServiceError(f'dude rip (http {neko.status})')
@commands.command()
async def lneko(self, ctx):
"""NSFW: Shows a random lewd neko pic
Disable this command by putting "[lb:no_nsfw]" in your channel topic.
"""
if ctx.channel.is_nsfw():
if '[lb:no_nsfw]' in ctx.channel.topic:
raise utils.errors.NSFWException()
else:
async with get_req(ctx.bot.session, nekos['nsfw']) as lneko:
if lneko.status == 200:
img = await lneko.json()
# noinspection PyPep8Naming
lneko_em = discord.Embed(colour=0x690E8)
lneko_em.set_image(url=img['neko'])
lneko_em.set_footer(text='source: nekos.life')
await ctx.send(embed=lneko_em)
else:
raise utils.errors.ServiceError(f'dude rip (http {lneko.status})')
else:
raise utils.errors.NSFWException('you really think you can do this'
'in a non nsfw channel? lol')
def setup(bot):
bot.add_cog(Animemes(bot))
|
Python
| 0.002582
|
@@ -2150,19 +2150,16 @@
tting %22%5B
-lb:
no_nsfw%5D
@@ -2250,11 +2250,8 @@
f '%5B
-lb:
no_n
|
a307b6a059e1da4fc415296016280e5149bbd061
|
Update radio.py
|
cogs/radio.py
|
cogs/radio.py
|
from .utils import config, checks, formats
import discord
from discord.ext import commands
import discord.utils
from .utils.api.pycopy import Copy
import random, json, asyncio
from urllib.parse import unquote
class Radio:
"""The radio-bot related commands."""
def __init__(self, bot):
self.bot = bot
if not discord.opus.is_loaded():
discord.opus.load_opus('/usr/local/lib/libopus.so') #FreeBSD path
self.player = None
self.stopped = True
self.q = asyncio.Queue()
self.play_next_song = asyncio.Event()
self.current_song = None
copy_creds = self.load_copy_creds()
self.copycom = Copy(copy_creds['login'], copy_creds['passwd'])
self.songs = []
self.update_song_list()
def load_copy_creds(self):
with open('copy_creds.json') as f:
return json.load(f)
@property
def is_playing(self):
return self.player is not None and self.player.is_playing() and not self.stopped
def toggle_next_song(self):
if not self.stopped:
self.bot.loop.call_soon_threadsafe(self.play_next_song.set)
def update_song_list(self):
self.songs = self.copycom.list_files('radio/')
@commands.command()
async def join(self, *, channel : discord.Channel = None):
"""Join voice channel.
"""
if channel is None or channel.type is not discord.ChannelType.voice:
await self.bot.say('Cannot find a voice channel by that name. {0}'.format(channel.type))
return
await self.bot.join_voice_channel(channel)
@commands.command(pass_context=True)
async def leave(self):
"""Leave voice channel.
"""
await ctx.invoke(self.stop)
await self.bot.voice.disconnect()
@commands.command()
async def pause(self):
"""Pause.
"""
if self.player is not None:
self.player.pause()
@commands.command()
async def resume(self):
"""Resume playing.
"""
if self.player is not None and not self.is_playing():
self.player.resume()
@commands.command()
async def skip(self):
"""Skip song and play next.
"""
if self.player is not None and self.is_playing():
self.player.stop()
self.toggle_next_song()
@commands.command()
async def stop():
"""Stop playing song.
"""
if self.is_playing():
self.stopped = True
self.player.stop()
@commands.command(pass_context=True)
async def play(self, ctx):
"""Start playing song from queue.
"""
if self.player is not None:
if not self.is_playing():
await ctx.invoke(self.resume)
return
else:
await self.bot.say('Already playing a song')
return
while True:
if not self.bot.is_voice_connected():
await ctx.invoke(self.join, channel=ctx.message.author.voice_channel)
continue
if self.q.empty():
await self.q.put(random.choice(self.songs))
self.play_next_song.clear()
self.current = await self.q.get()
self.player = self.bot.voice.create_ffmpeg_player(
self.copycom.direct_link('radio/' + self.current),
after=self.toggle_next_song,
#options="-loglevel debug -report",
headers = dict(self.copycom.session.headers))
self.stopped = False
self.player.start()
fmt = 'Playing song "{0}"'
song_name = unquote(self.current.split('/')[-1])
await bot.say(fmt.format(song_name))
self.bot.change_status(discord.Game(name=song_name))
await self.play_next_song.wait()
def setup(bot):
bot.add_cog(Radio(bot))
|
Python
| 0.000001
|
@@ -3972,16 +3972,21 @@
await
+self.
bot.say(
|
d84a4efcf880bb668b2721af3f4ce18220e8baab
|
Use np.genfromtext to handle missing values
|
xvistaprof/reader.py
|
xvistaprof/reader.py
|
#!/usr/bin/env python
# encoding: utf-8
"""
Reader for XVISTA .prof tables.
"""
import numpy as np
from astropy.table import Table
from astropy.io import registry
def xvista_table_reader(filename):
dt = [('R', np.float), ('SB', np.float), ('SB_err', np.float),
('ELL', np.float), ('PA', np.float), ('EMAG', np.float),
('ELLMAG', np.float), ('ELLMAG_err', np.float), ('XC', np.float),
('YC', np.float), ('FRACONT', np.float), ('A1', np.float),
('A2', np.float), ('A4', np.float), ('CIRCMAG', np.float)]
data = np.loadtxt(filename, dtype=np.dtype(dt), skiprows=15)
return Table(data)
registry.register_reader('xvistaprof', Table, xvista_table_reader)
|
Python
| 0.000022
|
@@ -267,26 +267,24 @@
,%0A
-
('ELL', np.f
@@ -320,34 +320,32 @@
AG', np.float),%0A
-
('ELLM
@@ -396,34 +396,32 @@
XC', np.float),%0A
-
('YC',
@@ -483,18 +483,16 @@
-
('A2', n
@@ -560,12 +560,15 @@
np.
-load
+genfrom
txt(
@@ -608,16 +608,84 @@
prows=15
+,%0A missing_values='*', filling_values=np.nan
)%0A re
|
0dde9454d05a6d5533454fbac8996c560d007c67
|
make a proper hook/task split in cython.
|
yaku/tools/cython.py
|
yaku/tools/cython.py
|
import os
import sys
from yaku.task_manager \
import \
extension, get_extension_hook
from yaku.task \
import \
Task
from yaku.compiled_fun \
import \
compile_fun
from yaku.utils \
import \
ensure_dir, find_program
import yaku.errors
@extension(".pyx")
def cython_task(self, node):
out = node.change_ext(".c")
target = node.parent.declare(out.name)
ensure_dir(target.name)
task = Task("cython", inputs=[node], outputs=[target])
task.gen = self
task.env_vars = []
task.env = self.env
self.env["CYTHON_INCPATH"] = ["-I%s" % p for p in
self.env["CYTHON_CPPPATH"]]
task.func = compile_fun("cython", "cython ${SRC} -o ${TGT} ${CYTHON_INCPATH}",
False)[0]
return [task]
def configure(ctx):
sys.stderr.write("Looking for cython... ")
if detect(ctx):
sys.stderr.write("yes\n")
else:
sys.stderr.write("no!\n")
raise yaku.errors.ToolNotFound()
ctx.env["CYTHON_CPPPATH"] = []
def detect(ctx):
if find_program("cython") is None:
return False
else:
return True
|
Python
| 0
|
@@ -295,16 +295,128 @@
%22.pyx%22)%0A
+def cython_hook(self, node):%0A self.sources.append(node.change_ext(%22.c%22))%0A return cython_task(self, node)%0A%0A
def cyth
@@ -535,24 +535,25 @@
arget.name)%0A
+%0A
task = T
|
6e1544b0a6c9ad62b53e57102e84456041437443
|
Update triggerwatcher.py
|
st2common/st2common/services/triggerwatcher.py
|
st2common/st2common/services/triggerwatcher.py
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=assignment-from-none
import eventlet
from kombu.mixins import ConsumerMixin
from kombu import Connection
from st2common import log as logging
from st2common.persistence.trigger import Trigger
from st2common.transport import reactor, publishers
from st2common.transport import utils as transport_utils
import st2common.util.queues as queue_utils
LOG = logging.getLogger(__name__)
class TriggerWatcher(ConsumerMixin):
sleep_interval = 0 # sleep to co-operatively yield after processing each message
def __init__(self, create_handler, update_handler, delete_handler,
trigger_types=None, queue_suffix=None, exclusive=False):
"""
:param create_handler: Function which is called on TriggerDB create event.
:type create_handler: ``callable``
:param update_handler: Function which is called on TriggerDB update event.
:type update_handler: ``callable``
:param delete_handler: Function which is called on TriggerDB delete event.
:type delete_handler: ``callable``
:param trigger_types: If provided, handler function will only be called
if the trigger in the message payload is included
in this list.
:type trigger_types: ``list``
:param exclusive: If the Q is exclusive to a specific connection which is then
single connection created by TriggerWatcher. When the connection
breaks the Q is removed by the message broker.
:type exclusive: ``bool``
"""
# TODO: Handle trigger type filtering using routing key
self._create_handler = create_handler
self._update_handler = update_handler
self._delete_handler = delete_handler
self._trigger_types = trigger_types
self._trigger_watch_q = self._get_queue(queue_suffix, exclusive=exclusive)
self.connection = None
self._load_thread = None
self._updates_thread = None
self._handlers = {
publishers.CREATE_RK: create_handler,
publishers.UPDATE_RK: update_handler,
publishers.DELETE_RK: delete_handler
}
def get_consumers(self, Consumer, channel):
return [Consumer(queues=[self._trigger_watch_q],
accept=['pickle'],
callbacks=[self.process_task])]
def process_task(self, body, message):
LOG.debug('process_task')
LOG.debug(' body: %s', body)
LOG.debug(' message.properties: %s', message.properties)
LOG.debug(' message.delivery_info: %s', message.delivery_info)
routing_key = message.delivery_info.get('routing_key', '')
handler = self._handlers.get(routing_key, None)
try:
if not handler:
LOG.debug('Skipping message %s as no handler was found.', message)
return
trigger_type = getattr(body, 'type', None)
if self._trigger_types and trigger_type not in self._trigger_types:
LOG.debug('Skipping message %s since\'t trigger_type doesn\'t match (type=%s)',
message, trigger_type)
return
try:
handler(body)
except Exception as e:
LOG.exception('Handling failed. Message body: %s. Exception: %s',
body, e.message)
finally:
message.ack()
eventlet.sleep(self.sleep_interval)
def start(self):
try:
self.connection = Connection(transport_utils.get_messaging_urls())
self._updates_thread = eventlet.spawn(self.run)
self._load_thread = eventlet.spawn(self._load_triggers_from_db)
except:
LOG.exception('Failed to start watcher.')
self.connection.release()
def stop(self):
try:
self._updates_thread = eventlet.kill(self._updates_thread)
self._load_thread = eventlet.kill(self._load_thread)
finally:
self.connection.release()
# Note: We sleep after we consume a message so we give a chance to other
# green threads to run. If we don't do that, ConsumerMixin will block on
# waiting for a message on the queue.
def on_consume_end(self, connection, channel):
super(TriggerWatcher, self).on_consume_end(connection=connection,
channel=channel)
eventlet.sleep(seconds=self.sleep_interval)
def on_iteration(self):
super(TriggerWatcher, self).on_iteration()
eventlet.sleep(seconds=self.sleep_interval)
def _load_triggers_from_db(self):
for trigger_type in self._trigger_types:
for trigger in Trigger.query(type=trigger_type):
LOG.debug('Found existing trigger: %s in db.' % trigger)
self._handlers[publishers.CREATE_RK](trigger)
@staticmethod
def _get_queue(queue_suffix, exclusive):
queue_name = queue_utils.get_queue_name(queue_name_base='st2.trigger.watch',
queue_name_suffix=queue_suffix,
add_random_uuid_to_suffix=True
)
return reactor.get_trigger_cud_queue(queue_name, routing_key='#', exclusive=exclusive)
|
Python
| 0
|
@@ -3924,19 +3924,16 @@
%25s since
-%5C't
trigger
|
43a3278149f067bb26589b312c052a3117b62870
|
update status
|
Cogs/Remind.py
|
Cogs/Remind.py
|
import asyncio
import discord
import time
import parsedatetime
from datetime import datetime
from operator import itemgetter
from discord.ext import commands
from Cogs import ReadableTime
from Cogs import DisplayName
from Cogs import Nullify
# This is the Remind module. It sends a pm to a user after a specified amount of time
# Reminder = { "End" : timeToEnd, "Message" : whatToSay }
class Remind:
# Init with the bot reference, and a reference to the settings var
def __init__(self, bot, settings):
self.bot = bot
self.settings = settings
async def onready(self):
# Check all reminders - and start timers
for server in self.bot.servers:
for member in server.members:
reminders = self.settings.getUserStat(member, server, "Reminders")
if len(reminders):
# We have a list
for reminder in reminders:
self.bot.loop.create_task(self.checkRemind(member, reminder))
async def checkRemind(self, member, reminder):
# Start our countdown
countDown = int(reminder['End'])-int(time.time())
if countDown > 0:
# We have a positive countdown - let's wait
await asyncio.sleep(countDown)
# Check if member is online - if so - remind them
if not str(member.status).lower() == "offline":
# Well, they're not Offline...
reminders = self.settings.getUserStat(member, member.server, "Reminders")
# Verify reminder is still valid
if not reminder in reminders:
return
server = reminder['Server']
message = reminder['Message']
if not message:
message = 'You wanted me to remind you of something...'
msg = 'In *{}*, you wanted me to remind you:\n\n{}'.format(server, message)
await self.bot.send_message(member, msg)
reminders.remove(reminder)
self.settings.setUserStat(member, member.server, "Reminders", reminders)
async def status(self, member):
# Check the user's status - and if they have any reminders
# If so - pm them - if not, ignore
if not str(member.status).lower() == "offline":
# They're not offline
currentTime = int(time.time())
reminders = self.settings.getUserStat(member, member.server, "Reminders")
removeList = []
if len(reminders):
# We have a list
for reminder in reminders:
timeLeft = int(reminder['End'])-currentTime
if timeLeft <= 0:
# Out of time - PM
message = reminder['Message']
server = reminder['Server']
if not message:
message = 'You wanted me to remind you of something...'
msg = 'In *{}*, you wanted me to remind you:\n\n{}'.format(server, message)
await self.bot.send_message(member, msg)
removeList.append(reminder)
if len(removeList):
# We have spent reminders
for reminder in removeList:
reminders.remove(reminder)
self.settings.setUserStat(member, member.server, "Reminders", reminders)
@commands.command(pass_context=True)
async def remindme(self, ctx, message : str = None, *, endtime : str = None):
"""Set a reminder."""
if not endtime or not message:
msg = 'Usage: `{}remindme "[message]" [endtime]`'.format(ctx.prefix)
await self.bot.send_message(ctx.message.channel, msg)
return
# Get current time - and end time
currentTime = int(time.time())
cal = parsedatetime.Calendar()
time_struct, parse_status = cal.parse(endtime)
start = datetime(*time_struct[:6])
end = time.mktime(start.timetuple())
# Get the time from now to end time
timeFromNow = end-currentTime
# Get our readable time
readableTime = ReadableTime.getReadableTimeBetween(int(currentTime), int(end))
# Add reminder
reminders = self.settings.getUserStat(ctx.message.author, ctx.message.server, "Reminders")
reminder = { 'End' : end, 'Message' : message, 'Server' : ctx.message.server.name }
reminders.append(reminder)
self.settings.setUserStat(ctx.message.author, ctx.message.server, "Reminders", reminders)
# Start timer for reminder
self.bot.loop.create_task(self.checkRemind(ctx.message.author, reminder))
# Confirm the reminder
msg = 'Okay *{}*, I\'ll remind you in *{}*.'.format(DisplayName.name(ctx.message.author), readableTime)
await self.bot.send_message(ctx.message.channel, msg)
@commands.command(pass_context=True)
async def reminders(self, ctx):
"""List up to 10 pending reminders."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.server, "SuppressMentions").lower() == "yes":
suppress = True
else:
suppress = False
member = ctx.message.author
myReminders = self.settings.getUserStat(member, member.server, "Reminders")
msg = 'You don\'t currently have any reminders set. You can add some with the `{}remindme "[message]" [time]` command.'.format(ctx.prefix)
if not len(myReminders):
# No reminders
await self.bot.send_message(ctx.message.channel, msg)
return
mySorted = sorted(myReminders, key=lambda x:int(x['End']))
currentTime = int(time.time())
total = 10 # Max number to list
remain = 0
if len(mySorted) < 10:
# Less than 10 - set the total
total = len(mySorted)
else:
# More than 10 - let's find out how many remain after
remain = len(mySorted)-10
if len(mySorted):
# We have at least 1 item
msg = '***{}\'s*** **Remaining Reminders:**\n'.format(DisplayName.name(member))
for i in range(0, total):
endTime = int(mySorted[i]['End'])
# Get our readable time
readableTime = ReadableTime.getReadableTimeBetween(currentTime, endTime)
msg = '{}\n{}. {} - in *{}*'.format(msg, i+1, mySorted[i]['Message'], readableTime)
if remain == 1:
msg = '{}\n\nYou have *{}* additional reminder.'.format(msg, remain)
elif remain > 1:
msg = '{}\n\nYou have *{}* additional reminders.'.format(msg, remain)
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await self.bot.send_message(ctx.message.channel, msg)
@commands.command(pass_context=True)
async def clearmind(self, ctx):
"""Clear all reminders."""
member = ctx.message.author
self.settings.setUserStat(member, member.server, "Reminders", [])
msg = 'Alright *{}*, your calendar has been cleared of reminders!'.format(DisplayName.name(ctx.message.author))
await self.bot.send_message(ctx.message.channel, msg)
|
Python
| 0.000001
|
@@ -1831,24 +1831,39 @@
status(self,
+ member_before,
member):%0A%09%09
@@ -6257,28 +6257,29 @@
ge(ctx.message.channel, msg)
+%0A
|
af96c316f485ebed2ad342aa2ea720d8b699f649
|
bump version
|
ydcommon/__init__.py
|
ydcommon/__init__.py
|
"""
YD Technology common libraries
"""
VERSION = (0, 1, 1)
__version__ = '.'.join((str(each) for each in VERSION[:4]))
def get_version():
"""
Returns shorter version (digit parts only) as string.
"""
version = '.'.join((str(each) for each in VERSION[:3]))
if len(VERSION) > 3:
version += str(VERSION[3])
return version
|
Python
| 0
|
@@ -50,17 +50,17 @@
(0, 1,
-1
+2
)%0A%0A__ver
|
de48a47cf813177e824026a994a5a814f5cc1a2d
|
fix socket.TCP_KEEPIDLE error on Mac OS
|
routeros_api/api_socket.py
|
routeros_api/api_socket.py
|
import socket
from routeros_api import exceptions
try:
import errno
except ImportError:
errno = None
EINTR = getattr(errno, 'EINTR', 4)
def get_socket(hostname, port, timeout=15.0):
api_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
api_socket.settimeout(timeout)
while True:
try:
api_socket.connect((hostname, port))
except socket.error as e:
if e.args[0] != EINTR:
raise exceptions.RouterOsApiConnectionError(e)
else:
break
set_keepalive(api_socket, after_idle_sec=10)
return SocketWrapper(api_socket)
# http://stackoverflow.com/a/14855726
def set_keepalive(sock, after_idle_sec=1, interval_sec=3, max_fails=5):
"""Set TCP keepalive on an open socket.
It activates after 1 second (after_idle_sec) of idleness,
then sends a keepalive ping once every 3 seconds (interval_sec),
and closes the connection after 5 failed ping (max_fails), or 15 seconds
"""
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, after_idle_sec)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, interval_sec)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, max_fails)
class DummySocket(object):
def close(self):
pass
def settimeout(self, timeout):
pass
class SocketWrapper(object):
def __init__(self, socket):
self.socket = socket
def send(self, bytes):
return self.socket.sendall(bytes)
def receive(self, length):
while True:
try:
return self._receive_and_check_connection(length)
except socket.error as e:
if e.args[0] == EINTR:
continue
else:
raise
def _receive_and_check_connection(self, length):
bytes = self.socket.recv(length)
if bytes:
return bytes
else:
raise exceptions.RouterOsApiConnectionClosedError
def close(self):
return self.socket.close()
def settimeout(self, timeout):
self.socket.settimeout(timeout)
|
Python
| 0
|
@@ -987,16 +987,60 @@
%22%22%22%0A
+ if hasattr(socket, %22SO_KEEPALIVE%22):%0A
sock
@@ -1094,16 +1094,60 @@
IVE, 1)%0A
+ if hasattr(socket, %22TCP_KEEPIDLE%22):%0A
sock
@@ -1211,24 +1211,69 @@
r_idle_sec)%0A
+ if hasattr(socket, %22TCP_KEEPINTVL%22):%0A
sock.set
@@ -1332,24 +1332,67 @@
terval_sec)%0A
+ if hasattr(socket, %22TCP_KEEPCNT%22):%0A
sock.set
|
b957a84ea368f681e4a37c3259305116447dc1c8
|
Fix incorrect test import.
|
statsmodels/tsa/statespace/tests/test_tools.py
|
statsmodels/tsa/statespace/tests/test_tools.py
|
"""
Tests for tools
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
from dismalpy.ssm import tools
# from .results import results_sarimax
from numpy.testing import (
assert_equal, assert_array_equal, assert_almost_equal, assert_raises,
assert_raises_regex
)
class TestCompanionMatrix(object):
cases = [
(2, np.array([[0,1],[0,0]])),
([1,-1,-2], np.array([[1,1],[2,0]])),
([1,-1,-2,-3], np.array([[1,1,0],[2,0,1],[3,0,0]]))
]
def test_cases(self):
for polynomial, result in self.cases:
assert_equal(tools.companion_matrix(polynomial), result)
class TestDiff(object):
x = np.arange(10)
cases = [
# diff = 1
([1,2,3], 1, None, 1, [1, 1]),
# diff = 2
(x, 2, None, 1, [0]*8),
# diff = 1, seasonal_diff=1, k_seasons=4
(x, 1, 1, 4, [0]*5),
(x**2, 1, 1, 4, [8]*5),
(x**3, 1, 1, 4, [60, 84, 108, 132, 156]),
# diff = 1, seasonal_diff=2, k_seasons=2
(x, 1, 2, 2, [0]*5),
(x**2, 1, 2, 2, [0]*5),
(x**3, 1, 2, 2, [24]*5),
(x**4, 1, 2, 2, [240, 336, 432, 528, 624]),
]
def test_cases(self):
# Basic cases
for series, diff, seasonal_diff, k_seasons, result in self.cases:
# Test numpy array
x = tools.diff(series, diff, seasonal_diff, k_seasons)
assert_almost_equal(x, result)
# Test as Pandas Series
series = pd.Series(series)
# Rewrite to test as n-dimensional array
series = np.c_[series, series]
result = np.c_[result, result]
# Test Numpy array
x = tools.diff(series, diff, seasonal_diff, k_seasons)
assert_almost_equal(x, result)
# Test as Pandas Dataframe
series = pd.DataFrame(series)
x = tools.diff(series, diff, seasonal_diff, k_seasons)
assert_almost_equal(x, result)
class TestIsInvertible(object):
cases = [
([1, -0.5], True),
([1, 1-1e-9], True),
([1, 1], False),
([1, 0.9,0.1], True),
(np.array([1,0.9,0.1]), True),
(pd.Series([1,0.9,0.1]), True)
]
def test_cases(self):
for polynomial, invertible in self.cases:
assert_equal(tools.is_invertible(polynomial), invertible)
class TestConstrainStationaryUnivariate(object):
cases = [
(np.array([2.]), -2./((1+2.**2)**0.5))
]
def test_cases(self):
for unconstrained, constrained in self.cases:
result = tools.constrain_stationary_univariate(unconstrained)
assert_equal(result, constrained)
class TestValidateMatrixShape(object):
# name, shape, nrows, ncols, nobs
valid = [
('TEST', (5,2), 5, 2, None),
('TEST', (5,2), 5, 2, 10),
('TEST', (5,2,10), 5, 2, 10),
]
invalid = [
('TEST', (5,), 5, None, None),
('TEST', (5,1,1,1), 5, 1, None),
('TEST', (5,2), 10, 2, None),
('TEST', (5,2), 5, 1, None),
('TEST', (5,2,10), 5, 2, None),
('TEST', (5,2,10), 5, 2, 5),
]
def test_valid_cases(self):
for args in self.valid:
# Just testing that no exception is raised
tools.validate_matrix_shape(*args)
def test_invalid_cases(self):
for args in self.invalid:
assert_raises_regex(
ValueError, args[0], tools.validate_matrix_shape, *args
)
class TestValidateVectorShape(object):
# name, shape, nrows, ncols, nobs
valid = [
('TEST', (5,), 5, None),
('TEST', (5,), 5, 10),
('TEST', (5,10), 5, 10),
]
invalid = [
('TEST', (5,2,10), 5, 10),
('TEST', (5,), 10, None),
('TEST', (5,10), 5, None),
('TEST', (5,10), 5, 5),
]
def test_valid_cases(self):
for args in self.valid:
# Just testing that no exception is raised
tools.validate_vector_shape(*args)
def test_invalid_cases(self):
for args in self.invalid:
assert_raises_regex(
ValueError, args[0], tools.validate_vector_shape, *args
)
|
Python
| 0
|
@@ -177,20 +177,34 @@
rom
-dismalpy.ssm
+statsmodels.tsa.statespace
imp
|
85ac83b7e1ff6830e135b9f2c8643e00aaf93e99
|
Add comment
|
yunohost_firewall.py
|
yunohost_firewall.py
|
# -*- coding: utf-8 -*-
import os
import sys
try:
import miniupnpc
except ImportError:
sys.stderr.write('Error: Yunohost CLI Require miniupnpc lib\n')
sys.exit(1)
try:
import yaml
except ImportError:
sys.stderr.write('Error: Yunohost CLI Require yaml lib\n')
sys.stderr.write('apt-get install python-yaml\n')
sys.exit(1)
from yunohost import YunoHostError, win_msg
def firewall_allow(protocol=None, port=None, ipv6=None, upnp=False):
"""
Allow port in iptables
Keyword arguments:
protocol -- Protocol used
port -- Port to open
ipv6 -- Boolean ipv6
upnp -- Boolean upnp
Return
Dict
"""
port = int(port)
if (upnp):
add_portmapping(protocol, upnp, ipv6)
if 0 < port < 65536:
if protocol == "Both":
update_yml(port, 'TCP', 'a', ipv6)
update_yml(port, 'UDP', 'a', ipv6)
else:
update_yml(port, protocol, 'a', ipv6)
win_msg(_("Port successfully openned"))
else:
raise YunoHostError(22, _("Port not between 1 and 65535 : ")+ str(port))
return firewall_reload(upnp)
def firewall_disallow(protocol=None, port=None, ipv6=None, upnp=False):
"""
Disallow port in iptables
Keyword arguments:
protocol -- Protocol used
port -- Port to open
ipv6 -- Boolean ipv6
upnp -- Boolan upnp
Return
Dict
"""
port = int(port)
if protocol == "Both":
update_yml(port, 'TCP', 'r', ipv6)
update_yml(port, 'UDP', 'r', ipv6)
else:
update_yml(port, protocol, 'r', ipv6)
win_msg(_("Port successfully closed"))
return firewall_reload(upnp)
def firewall_list():
"""
Allow port in iptables
Keyword arguments:
None
Return
Dict
"""
with open ('firewall.yml') as f:
firewall = yaml.load(f)
return firewall
def firewall_reload(upnp=False):
'''
Reload iptables configuration
Keyword arguments:
upnp -- Boolean upnp
Return
Dict
'''
with open('firewall.yml', 'r') as f:
firewall = yaml.load(f)
os.system ("iptables -P INPUT ACCEPT")
os.system ("iptables -F")
os.system ("iptables -X")
if 22 not in firewall['ipv4']['TCP']:
update_yml(22, 'TCP', 'a', False)
os.system ("ip6tables -P INPUT ACCEPT")
os.system ("ip6tables -F")
os.system ("ip6tables -X")
if 22 not in firewall['ipv6']['TCP']:
update_yml(22, 'TCP', 'a', False)
add_portmapping('TCP', upnp, False);
add_portmapping('UDP', upnp, False);
add_portmapping('TCP', upnp, True);
add_portmapping('UDP', upnp, True);
os.system ("iptables -P INPUT DROP")
os.system ("ip6tables -P INPUT DROP")
os.system("service fail2ban restart")
win_msg(_("Firewall successfully reloaded"))
return firewall_list()
def update_yml(port=None, protocol=None, mode=None, ipv6=None):
"""
Update firewall.yml
Keyword arguments:
protocol -- Protocol used
port -- Port to open
mode -- a=append r=remove
ipv6 -- Boolean ipv6
Return
None
"""
if ipv6: ip = 'ipv6'
else: ip = 'ipv4'
with open('firewall.yml', 'r') as f:
firewall = yaml.load(f)
if mode == 'a':
if port not in firewall[ip][protocol]:
firewall[ip][protocol].append(port)
else:
raise YunoHostError(22,_("Port already openned :")+ str(port))
else:
if port in firewall[ip][protocol]:
firewall[ip][protocol].remove(port)
else:
raise YunoHostError(22,_("Port already closed :")+ str(port))
firewall[ip][protocol].sort()
os.system("mv firewall.yml firewall.yml.old")
with open('firewall.yml', 'w') as f:
yaml.dump(firewall, f)
def add_portmapping(protocol=None, upnp=False, ipv6=None):
"""
Send a port mapping rules to igd device
Keyword arguments:
protocol -- Protocol used
port -- Port to open
Return
None
"""
os.system ("iptables -P INPUT ACCEPT")
if upnp:
upnp = miniupnpc.UPnP()
upnp.discoverdelay = 200
nbigd = upnp.discover()
if nbigd:
try:
upnp.selectigd()
except:
firewall_reload(False)
raise YunoHostError(167,_("No upnp devices found"))
else:
firewall_reload(False)
raise YunoHostError(22,_("Can't connect to the igd device"))
# list the redirections :
for i in xrange(100):
p = upnp.getgenericportmapping(i)
if p is None: break
upnp.deleteportmapping(p[0], p[1])
if ipv6: ip = 'ipv6'
else: ip = 'ipv4'
with open('firewall.yml', 'r') as f:
firewall = yaml.load(f)
for i,port in enumerate (firewall[ip][protocol]):
os.system ("iptables -A INPUT -p "+ protocol +" -i eth0 --dport "+ str(port) +" -j ACCEPT")
if upnp:
upnp.addportmapping(port, protocol, upnp.lanaddr, port, 'yunohost firewall : port %u' % port, '')
os.system ("iptables -P INPUT DROP")
def firewall_installupnp():
"""
Add upnp cron
Keyword arguments:
None
Return
None
"""
os.system("touch /etc/cron.d/yunohost-firewall")
os.system("echo '*/50 * * * * root yunohost firewall reload -u>>/dev/null'>/etc/cron.d/yunohost-firewall")
win_msg(_("UPNP cron installed"))
def firewall_removeupnp():
try:
os.remove("/etc/cron.d/yunohost-firewall")
except:
raise YunoHostError(167,_("UPNP cron was not installed!"))
win_msg(_("UPNP cron removed"))
def firewall_stop():
os.system ("iptables -P INPUT ACCEPT")
os.system ("iptables -F")
os.system ("iptables -X")
os.system ("ip6tables -P INPUT ACCEPT")
os.system ("ip6tables -F")
os.system ("ip6tables -X")
firewall_removeupnp()
|
Python
| 0
|
@@ -5558,24 +5558,122 @@
moveupnp():%0A
+ %22%22%22%0A Remove upnp cron%0A Keyword arguments:%0A None%0A Return%0A None%0A %22%22%22%0A%0A
try:%0A
@@ -5866,16 +5866,110 @@
_stop():
+%0A %22%22%22%0A Stop firewall%0A Keyword arguments:%0A None%0A Return%0A None%0A %22%22%22
%0A%0A os
|
d3f49e2d75106d9af23dcf961f8d16b45eee0c8c
|
Fix determining line numbers.
|
z80/_disassembler.py
|
z80/_disassembler.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Z80 CPU Emulator.
# https://github.com/kosarev/z80
#
# Copyright (C) 2017-2020 Ivan Kosarev.
# ivan@kosarev.info
#
# Published under the MIT license.
import bisect
from ._error import Error
class _SourcePos(object):
def __init__(self, offset, source_file):
self.__offset = offset
self.__source_file = source_file
def __repr__(self):
file = self.__source_file
line, line_no, column_no = file.get_coordinates(self.__offset)
return '%s\n%s^\n%s:%s:%s' % (line, ' ' * column_no, file,
line_no, column_no)
class _SourceFile(object):
def __init__(self, filename):
self.__filename = filename
with open(self.__filename) as f:
self.__image = f.read()
self.__line_breaks = tuple(
i for i, c in enumerate(self.__image) if c == '\n')
def __repr__(self):
return self.__filename
def get_image(self):
return self.__image
def get_coordinates(self, offset):
i = bisect.bisect_left(self.__line_breaks, offset)
line_start = self.__line_breaks[i - 1] + 1 if i > 0 else 0
line_end = (self.__line_breaks[i] if i < len(self.__line_breaks)
else len(self.__image))
line = self.__image[line_start:line_end]
line_no = i
column_no = offset - line_start
return line, line_no, column_no
class _Literal(object):
def __init__(self, text, source_pos):
self.__text = text
self.__source_pos = source_pos
def __repr__(self):
return '%r: %r' % (self.__source_pos, self.__text)
def get_text(self):
return self.__text
def get_source_pos(self):
return self.__source_pos
class _SourceError(Error):
def __init__(self, subject, message):
super().__init__('%r: %s' % (subject, message))
class _SourceParser(object):
def __init__(self, source_file):
self.__source_file = source_file
self.__image = source_file.get_image()
self.__offset = 0
def get_current_pos(self):
return _SourcePos(self.__offset, self.__source_file)
def __get_front(self):
return self.__image[self.__offset:]
def __peek_char(self):
return self.__get_front()[:1]
def __follows_with(self, s):
return self.__get_front().startswith(s)
def __skip(self, n):
if n == 0:
return None
lit = _Literal(self.__get_front()[:n], self.get_current_pos())
self.__offset += n
return lit
def __skip_to_offset(self, offset):
assert self.__offset <= offset
return self.__skip(offset - self.__offset)
def __skip_to_end(self):
return self.__skip_to_offset(len(self.__image))
def skip_to(self, delims):
pos = len(self.__image)
for delim in delims:
i = self.__image.find(delim, self.__offset)
if i >= 0 and pos > i:
pos = i
if pos is None:
return self.__skip_to_end()
return self.__skip_to_offset(pos)
def __skip_while(self, allowed):
end = self.__offset
while self.__image[end:].startswith(allowed):
end += len(allowed)
return self.__skip_to_offset(end)
def skip_spaces(self):
return self.__skip_while(' ')
def eat(self, s):
if not self.__follows_with(s):
return None
return self.__skip(len(s))
def skip_to_and_eat(self, s):
self.skip_to((s,))
return self.eat(s)
def skip_rest_of_line(self):
return self.skip_to('\n')
class _ProfileTag(object):
def __init__(self, kind, addr, comment):
self._kind = kind
self._addr = addr
self._comment = comment
def get_kind(self):
return self._kind
def get_addr(self):
return self._addr
def get_comment(self):
return self._comment
def __repr__(self):
comment = self.get_comment()
comment = '' if not comment else ' : %s' % comment
return '@@ 0x%04x %s%s' % (self.get_addr(), self.get_kind(), comment)
class _InstrTag(_ProfileTag):
ID = 'instr'
def __init__(self, addr, comment):
super().__init__(self.ID, addr, comment)
class _TagParser(_SourceParser):
_DELIMITERS = ' :'
def __init__(self, source_file):
super().__init__(source_file)
def __parse_token(self):
return self.skip_to(self._DELIMITERS)
def __parse_address(self):
tok = self.__parse_token()
if not tok:
raise _SourceError(self.get_current_pos(),
'Tag address expected.')
try:
return int(tok.get_text(), base=0)
except ValueError:
raise _SourceError(tok, 'Malformed tag address.')
def __parse_name(self):
tok = self.__parse_token()
if not tok:
raise _SourceError(self.get_current_pos(),
'Tag name expected.')
return tok
def __parse_optional_comment(self):
if self.eat(':'):
self.skip_spaces()
return self.skip_rest_of_line().get_text()
tok = self.skip_rest_of_line()
if not tok:
return None
raise _SourceError(tok, 'End of line or a comment expected.')
def __parse_instr_tag(self, addr, name):
comment = self.__parse_optional_comment()
return _InstrTag(addr, comment)
_TAG_PARSERS = {
_InstrTag.ID: __parse_instr_tag,
}
# Parses and returns a subsequent tag.
def __iter__(self):
if self.skip_to_and_eat('@@'):
self.skip_spaces()
addr = self.__parse_address()
self.skip_spaces()
name = self.__parse_name()
self.skip_spaces()
parser = self._TAG_PARSERS.get(name.get_text(), None)
if not parser:
raise _SourceError(name, 'Unknown tag.')
yield parser(self, addr, name)
class _Profile(object):
__tags = dict()
def load_if_exists(self, filename):
try:
parser = _TagParser(_SourceFile(filename))
for tag in parser:
if tag:
self.__tags.setdefault(tag.get_addr(), []).append(tag)
except FileNotFoundError:
pass
class _Disassembler(object):
def __init__(self, image):
self.__image = image
def disassemble(self, profile):
self._tags_to_process = []
assert 0, profile._Profile__tags
|
Python
| 0.000039
|
@@ -1384,16 +1384,20 @@
e_no = i
+ + 1
%0A
|
5a31ed001626937772a30ab46b94fe2b4bb5cfb8
|
allow 2013 candidates
|
fecreader/summary_data/management/commands/add_candidates.py
|
fecreader/summary_data/management/commands/add_candidates.py
|
from django.core.management.base import BaseCommand, CommandError
from ftpdata.models import Candidate
from summary_data.models import Candidate_Overlay
from summary_data.utils.overlay_utils import make_candidate_overlay_from_masterfile
election_year = 2014
cycle = str(election_year)
class Command(BaseCommand):
help = "Add new candidates"
requires_model_validation = False
def handle(self, *args, **options):
candidates = Candidate.objects.filter(cycle=cycle, cand_election_year__in=[2013,2014])
# We'll miss folks who put the wrong election year in their filing, but...
for candidate in candidates:
# will doublecheck that it doesn't already exist before creating it
make_candidate_overlay_from_masterfile(candidate.cand_id, cand_election_year=candidate.cand_election_year)
|
Python
| 0.000005
|
@@ -799,21 +799,16 @@
and_id,
-cand_
election
|
f5b8611e785fc5e833d2b257dd05231893aa71b5
|
Fix our usage of the new WebHelpers API
|
fedoracommunity/mokshaapps/packages/controllers/downloads.py
|
fedoracommunity/mokshaapps/packages/controllers/downloads.py
|
# This file is part of Fedora Community.
# Copyright (C) 2008-2010 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from tg import expose, tmpl_context
from tw.api import Widget
from tw.forms import SingleSelectField
try:
from webhelpers.number import format_byte_size
except ImportError:
from webhelpers.rails.number import number_to_human_size as format_byte_size
from moksha.lib.base import Controller
from moksha.lib.helpers import Category, MokshaApp, defaultdict
from moksha.api.connectors import get_connector
from helpers import PackagesDashboardContainer
from fedoracommunity.lib.utils import architectures
log = logging.getLogger(__name__)
class ReleaseDownloadsFilter(SingleSelectField):
options = []
attrs = {'onchange': """
moksha.html_load(moksha.url('/apps/fedoracommunity.packages/package/downloads/downloads_table'), {
'package': package_name,
'release': $('#downloads_widget_releases').val(),
}, function(r) {
var $stripped = moksha.filter_resources(r);
$('div.package_downloads').html($stripped);
}, $('#' + uid + ' .overlay'));
"""}
class DownloadsDashboard(PackagesDashboardContainer):
layout = [Category('content-col-apps',[
MokshaApp(None,
'fedoracommunity.packages/package/downloads/downloads_table',
params={'package': '', 'release': 'dist-rawhide'},
css_class='package_downloads'),
])]
downloads_dashboard = DownloadsDashboard('downloads_dashboard')
class SourceDashboard(PackagesDashboardContainer):
layout = [Category('content-col-apps',[
MokshaApp(None,
'fedoracommunity.packages/package/downloads/source_table',
params={'package': ''}),
])]
source_dashboard = SourceDashboard('source_dashboard')
class DownloadsWidget(Widget):
template = "mako:fedoracommunity.mokshaapps.packages.templates.downloads_widget"
params = ['id', 'package', 'release', 'latest_spec', 'latest_srpm',
'arches', 'releases', 'architectures']
children = [ReleaseDownloadsFilter('releases')]
architectures = architectures
def update_params(self, d):
super(DownloadsWidget, self).update_params(d)
koji = get_connector('koji')
rpms = koji._koji_client.getLatestRPMS(d.release, package=d.package)
arches = defaultdict(list)
for download in rpms[0]:
download['size'] = format_byte_size(download['size'])
download['nvr'] = '%s-%s-%s.%s.rpm' % (download['name'],
download['version'], download['release'],
download['arch'])
download['url'] = \
'http://kojipkgs.fedoraproject.org/packages/%s/%s/%s/%s/%s' % (
d.package, download['version'], download['release'],
download['arch'], download['nvr'])
arches[download['arch']].append(download)
if len(arches['src']):
d.latest_srpm = arches['src'][0]
del(arches['src'])
# Determine the branch name from the release.
pkgdb = get_connector('pkgdb')
collections = pkgdb.get_collection_table(active_only=True)
collection = pkgdb.get_collection_by_koji_name(d.release)
if collection:
branch = collection['branchname']
else:
raise Exception("Cannot find branchname for %s" % d.release)
d.latest_spec = 'http://cvs.fedoraproject.org/viewvc/rpms/%s/%s/%s.spec?view=markup' % (d.package, branch, d.package)
d.arches = arches
d.releases = pkgdb.get_fedora_releases()
downloads_widget = DownloadsWidget('downloads_widget')
class SourceDownloadsWidget(Widget):
template = "mako:fedoracommunity.mokshaapps.packages.templates.source_downloads_widget"
params = ['id', 'package', 'sources']
def update_params(self, d):
super(SourceDownloadsWidget, self).update_params(d)
sources = []
releases = []
dist_tags = {}
koji = get_connector('koji')._koji_client
koji.multicall = True
pkgdb = get_connector('pkgdb')
collections = pkgdb.get_collection_table(active_only=True)
for id, collection in collections.items():
if collection['name'] == 'Fedora':
tag = collection['koji_name']
releases.append(tag)
name = collection['name']
version = collection['version']
if name == 'Fedora' and version == 'devel':
name = 'Rawhide'
version = ''
dist_tags[tag] = '%s %s' % (name,
version)
if 'rawhide' not in tag:
tag += '-updates'
koji.getLatestRPMS(tag, package=d.package, arch='src')
results = koji.multiCall()
koji.multicall = False
for i, result in enumerate(results):
if 'faultCode' in result:
log.warning('Skipping koji result: %s' % result['faultString'])
continue
if not result[0][0]:
log.warning('Skipping koji result for %s' % releases[i])
continue
build = result[0][0][0]
build['nvr'] = '%s-%s-%s.%s.rpm' % (build['name'],
build['version'], build['release'], build['arch'])
sources.append({
'release': dist_tags[releases[i]],
'released_version': '%s-%s' % (build['version'],
build['release']),
'size': number_to_human_size(build['size']),
'nvr': build['nvr'],
'url': 'http://kojipkgs.fedoraproject.org/packages/%s/%s/%s/%s/%s' % (build['name'], build['version'], build['release'], build['arch'], build['nvr']),
})
d.sources = sources
source_downloads_widget = SourceDownloadsWidget('source_downloads_widget')
class DownloadsController(Controller):
@expose('mako:moksha.templates.widget')
def index(self, package):
tmpl_context.widget = downloads_dashboard
return dict(options={'package': package})
@expose('mako:moksha.templates.widget')
def source(self, package):
tmpl_context.widget = source_dashboard
return dict(options={'package': package})
@expose('mako:moksha.templates.widget')
def downloads_table(self, package, release, *args, **kw):
tmpl_context.widget = downloads_widget
return dict(options={'package': package, 'release': release})
@expose('mako:moksha.templates.widget')
def source_table(self, package, *args, **kw):
tmpl_context.widget = source_downloads_widget
return dict(options={'package': package})
|
Python
| 0.00004
|
@@ -6375,31 +6375,27 @@
'size':
-number_to_human
+format_byte
_size(bu
|
0abb461f65bb21b9eb6347347852e3faf238475e
|
Add degenerated face test.
|
Core/Repair.py
|
Core/Repair.py
|
# -*- coding:utf-8 -*-
#
# External dependencies
#
from .Mesh import Mesh
from numpy import array, isfinite, zeros
#
# Check several parameters of a given mesh
#
def CheckMesh( mesh ) :
# Initialisation
log_message = ''
# Vertex number
if len(mesh.vertices) < 3 :
log_message += 'Not enough vertices\n'
# Face number
if len(mesh.faces) < 1 :
log_message += 'Not enough faces\n'
# Face normal number
if len(mesh.face_normals) and (len(mesh.face_normals) != len(mesh.faces)) :
log_message += 'Face normal number doesn\'t match face number\n'
# Vertex normal number
if len(mesh.vertex_normals) and (len(mesh.vertex_normals) != len(mesh.vertices)) :
log_message += 'Vertex normal number doesn\'t match vertex number\n'
# Color number
if len(mesh.colors) and (len(mesh.colors) != len(mesh.vertices)) :
log_message += 'Color number doesn\'t match vertex number\n'
# Texture coordinate number
if len(mesh.textures) and (len(mesh.textures) != len(mesh.vertices)) :
log_message += 'Texture coordinate number doesn\'t match vertex number\n'
# Texture filename
if len(mesh.textures) and not mesh.texture_name :
log_message += 'Empty texture filename\n'
# Face indices
if (mesh.faces < 0).any() or (mesh.faces >= len(mesh.vertices)).any() :
log_message += 'Bad face indices\n'
# Vertex coordinates
if not isfinite(mesh.vertices).all() :
log_message += 'Bad vertex coordinates\n'
# Face normals
if not isfinite(mesh.face_normals).all() :
log_message += 'Bad face normals\n'
# Vertex normals
if not isfinite(mesh.vertex_normals).all() :
log_message += 'Bad vertex normals\n'
# Colors
if (mesh.colors < 0).any() or (mesh.colors > 1).any() :
log_message += 'Bad color values\n'
# Texture coordinates
if (mesh.textures < 0).any() or (mesh.textures > 1).any() :
log_message += 'Bad texture coordinates\n'
# Return the log message
return log_message
#
# Check neighborhood parameters of a given mesh
#
def CheckNeighborhood( mesh ) :
# Initialization
log_message = ''
# Check isolated vertices
if (array([len(neighbor) for neighbor in mesh.neighbor_faces]) == 0).any() :
log_message += 'Isolated vertices\n'
# Return the log message
return log_message
#
# Remove isolated vertices in a given mesh
# TODO : process colors and texture coordinates
#
def RemoveIsolatedVertices( mesh ) :
# Register isolated vertices
isolated_vertices = (array([len(neighbor) for neighbor in mesh.neighbor_faces]) == 0)
#
# Pouet
#
border_vertices = zeros( len(self.vertices) )
border_vertices[ self.faces[:,0] ] += 1
border_vertices[ self.faces[:,1] ] += 1
border_vertices[ self.faces[:,2] ] += 1
if any( border_vertices == 0 ) : print "Isolated vertices"
# Do nothing if there is no isolated vertex
if not isolated_vertices.any() : return
# Create the new vertex array and a lookup table
new_vertices = []
index = 0
lut = zeros( len(mesh.vertices), dtype=int )
for ( v, isolated ) in enumerate( isolated_vertices ) :
if isolated : continue
new_vertices.append( mesh.vertices[v] )
lut[v] = index
index += 1
# Create a new face array
new_faces = lut[mesh.faces].reshape( len(mesh.faces), 3 )
# Update the mesh
mesh.vertices = array( new_vertices )
mesh.faces = new_faces
mesh.UpdateNormals()
mesh.UpdateNeighbors()
#
# Invert the orientation of every face in a given mesh
#
def InvertFacesOrientation( mesh ) :
# Swap two vertices in each face
mesh.faces[ :, [0, 1, 2] ] = mesh.faces[ :, [1, 0, 2] ]
# Recompute face and vertex normals
mesh.UpdateNormals()
|
Python
| 0.000009
|
@@ -2237,16 +2237,20 @@
Remove
+the
isolated
@@ -3314,16 +3314,274 @@
ors()%0A%0A%0A
+#%0A# Remove the degenerated faces of a given mesh%0A#%0Adef RemoveDegeneratedFaces( mesh ) :%0A%09%09%0A%09tris = mesh.vertices%5B mesh.faces %5D%0A%09face_normals = cross( tris%5B::,1%5D - tris%5B::,0%5D , tris%5B::,2%5D - tris%5B::,0%5D )%0A%09print (sqrt((face_normals**2).sum(axis=1))%3E0).all()%0A%0A%0A
#%0A# Inve
@@ -3800,28 +3800,30 @@
rmals%0A%09mesh.UpdateNormals()%0A
+%09%0A
|
bf4a197618bf09a164f03a53cd6998bcd6ee8196
|
Fix function name
|
google/cloud/security/common/data_access/violation_format.py
|
google/cloud/security/common/data_access/violation_format.py
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides formatting functions for violations"""
import json
def format_policy_violation(violation):
"""Format the policy violation data into a tuple.
Also flattens the RuleViolation, since it consists of the resource,
rule, and members that don't meet the rule criteria.
Various properties of RuleViolation may also have values that exceed the
declared column length, so truncate as necessary to prevent MySQL errors.
Args:
violation (namedtuple): The Policy RuleViolation. This is a named
tumple with the following attributes 'resource_type','resource_id',
'rule_name', 'violation_type' and 'violation_data'
Yields:
tuple: A tuple of the rule violation properties.
"""
resource_type = violation.resource_type
if resource_type:
resource_type = resource_type[:255]
resource_id = violation.resource_id
if resource_id:
resource_id = str(resource_id)[:255]
rule_name = violation.rule_name
if rule_name:
rule_name = rule_name[:255]
yield (resource_type,
resource_id,
rule_name,
violation.rule_index,
violation.violation_type,
json.dumps(violation.violation_data))
def format_groups_violation(violation):
"""Format the groups violation data into a tuple.
Args:
violation (namedtuple): The groups violation. This is a named tuple
with the following attributes 'member_email','parent.member_email',
'violated_rule_names'
Yields:
tuple: A tuple of the violation properties.
"""
member_email = violation.member_email
if member_email:
member_email = member_email[:255]
group_email = violation.parent.member_email
if group_email:
group_email = group_email[:255]
violated_rule_names = json.dumps(violation.violated_rule_names)
yield (member_email,
group_email,
violated_rule_names)
|
Python
| 0.999896
|
@@ -649,15 +649,8 @@
mat_
-policy_
viol
|
052062a3c267b56d9ee99dd772b34ecdf5b27961
|
add rmtag as alias for removetag
|
Source/Tagging.py
|
Source/Tagging.py
|
import jsonpickle
import tabulate
import BotpySE as bp
# Our own little re wrapper libraryo
import regex as re
class Tag:
def __init__(self, name, regex, user_id, user_name):
normalized_regex = re.compile(regex).pattern
self.name = name
self.regex = normalized_regex
self.user_id = user_id
self.user_name = user_name
self.format = "[tag:" + self.name + "]"
class TagManager:
def __init__(self, filename='./tags.json'):
self.tags = list()
self.filename = filename
try:
with open(filename, 'r') as file_handle:
tags = jsonpickle.decode(file_handle.read())
self.tags = tags
except FileNotFoundError:
pass
def add(self, tag):
self.tags.append(tag)
self.save()
def remove(self, name):
for tag in self.tags:
if tag.name == name:
self.tags.remove(tag)
self.save()
return True
return False
def remove_matching(self, expr):
r = re.compile(expr)
remove = []
for tag in self.tags:
if r.search(tag.regex):
remove.append(tag)
for tag in remove:
self.tags.remove(tag)
self.save()
return remove
def filter_post(self, post):
tags = list()
for tag in self.tags:
if re.search(tag.regex, post):
tags.append(tag.format)
return " ".join(tags) + post
def list(self):
for tag in self.tags:
yield tag
def save(self):
encoded = jsonpickle.encode(self.tags)
with open(self.filename, "w") as file_handle:
file_handle.write(encoded)
class CommandListTags(bp.Command):
@staticmethod
def usage():
return ["listtags", "list tags", "tags", "all tags"]
def run(self):
tag_list = list()
for tag in self.command_manager.tags.list():
tag_list.append([tag.name, tag.regex, tag.user_name])
table = tabulate.tabulate(
tag_list, headers=["Name", "Regex", "Added By"], tablefmt="orgtbl")
self.post(" " + re.sub('\n', '\n ', table), False)
class CommandAddTag(bp.Command):
@staticmethod
def usage():
# Regexes may have a space; thus last part of usage is always "..."
return [
"addtag * ...", "add tag * ...",
"add tag with name * and regex ...",
"add tag * matching ...",
"add tag * for ..."]
def privileges(self):
return 1
def run(self):
user_id = self.message.user.id
user_name = self.message.user.name
tag_name = self.arguments[0]
if len(self.arguments) == 1:
self.reply('Need two arguments: tag and regex')
return
regex = ' '.join(self.arguments[1:])
try:
self.command_manager.tags.add(
Tag(tag_name, regex, user_id, user_name))
self.reply("Added [tag:{0}] for regex {1}".format(tag_name, regex))
except re.error as err:
self.reply("Could not add tag for regex {0}: {1}".format(
regex, err))
class CommandRemoveTag(bp.Command):
@staticmethod
def usage():
return [
"removetag ...", "remove tag ...",
"delete tag ...", "destroy tag ...",
"poof tag ...", "deletetag ..."]
def privileges(self):
return 1
def run(self):
regex = ' '.join(self.arguments)
try:
removed = self.command_manager.tags.remove_matching(regex)
except re.error as re_err:
self.reply("Could not remove tag for regex `{0}`: `{1}`".format(
regex, re_err))
return
if not removed:
self.reply("No tag found with regex `{0}`".format(regex))
return
self.reply("Removed {0} tags matching regex `{1}`.".format(
len(removed), regex))
|
Python
| 0
|
@@ -3365,32 +3365,45 @@
remove tag ...%22,
+ %22rmtag ...%22,
%0A %22de
|
17b9ccbcf940c653c2ee0994eefec87ca2961b75
|
Fix extension scraper on Python 3.x
|
Release/Product/Python/PythonTools/ExtensionScraper.py
|
Release/Product/Python/PythonTools/ExtensionScraper.py
|
# ############################################################################
#
# Copyright (c) Microsoft Corporation.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# vspython@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
# ###########################################################################
import imp
import sys
from os import path
try:
# disable error reporting in our process, bad extension modules can crash us, and we don't
# want a bunch of Watson boxes popping up...
import ctypes
ctypes.windll.kernel32.SetErrorMode(3) # SEM_FAILCRITICALERRORS / SEM_NOGPFAULTERRORBOX
except:
pass
# Expects either:
# scrape [filename] [output_path]
# Scrapes the file and saves the analysis to the specified filename, exits w/ nonzero exit code if anything goes wrong.
if len(sys.argv) == 4:
if sys.argv[1] == 'scrape':
filename = sys.argv[2]
mod_name = path.splitext(path.basename(filename))[0]
try:
module = imp.load_dynamic(mod_name, filename)
except ImportError, e:
print e
sys.exit(1)
import PythonScraper
analysis = PythonScraper.generate_module(module)
PythonScraper.write_analysis(sys.argv[3], analysis)
|
Python
| 0.000001
|
@@ -1483,12 +1483,44 @@
rror
-, e:
+:%0D%0A e = sys.exc_info()%5B1%5D
%0D%0A
|
9780274756ef4bc2966a0f8290ca28bd3c1e8163
|
update dev version after 0.31.1 tag [skip ci]
|
py/desisim/_version.py
|
py/desisim/_version.py
|
__version__ = '0.31.1'
|
Python
| 0
|
@@ -14,10 +14,18 @@
'0.31.1
+.dev1940
'%0A
|
bc905d9d01060fdaff2765f64e810b4eb927820e
|
fix arcseciv typo
|
py/legacypipe/units.py
|
py/legacypipe/units.py
|
def get_units_for_columns(cols, bands=[], extras=None):
deg = 'deg'
degiv = '1/deg^2'
arcsec = 'arcsec'
arcseciv = '1/arcsec^2'
flux = 'nanomaggy'
fluxiv = '1/nanomaggy^2'
pm = 'mas/yr'
pmiv = '1/(mas/yr)^2'
unitmap = dict(
ra=deg, dec=deg, ra_ivar=degiv, dec_ivar=degiv,
ebv='mag',
shape_r=arcsec,
shape_r_ivar=arcsec_iv)
unitmap.update(pmra=pm, pmdec=pm, pmra_ivar=pmiv, pmdec_ivar=pmiv,
parallax='mas', parallax_ivar='1/mas^2')
unitmap.update(gaia_phot_g_mean_mag='mag',
gaia_phot_bp_mean_mag='mag',
gaia_phot_rp_mean_mag='mag')
# units used in forced phot.
unitmap.update(exptime='sec',
flux=flux, flux_ivar=fluxiv,
apflux=flux, apflux_ivar=fluxiv,
psfdepth=fluxiv, galdepth=fluxiv,
sky='nanomaggy/arcsec^2',
psfsize=arcsec,
fwhm='pixels',
ccdrarms=arcsec, ccddecrms=arcsec,
skyrms='counts/sec',
dra=arcsec, ddec=arcec,
dra_ivar=arcsec_iv, ddec_ivar=arcsec_iv)
# Fields that have band suffixes
funits = dict(
flux=flux, flux_ivar=fluxiv,
apflux=flux, apflux_ivar=fluxiv, apflux_resid=flux,
apflux_blobresid=flux,
psfdepth=fluxiv, galdepth=fluxiv, psfsize=arcsec,
fiberflux=flux, fibertotflux=flux,
lc_flux=flux, lc_flux_ivar=fluxiv,
)
for b in bands:
unitmap.update([('%s_%s' % (k, b), v)
for k,v in funits.items()])
if extras is not None:
unitmap.update(extras)
# Create a list of units aligned with 'cols'
units = [unitmap.get(c, '') for c in cols]
return units
|
Python
| 0.018115
|
@@ -372,33 +372,32 @@
pe_r_ivar=arcsec
-_
iv)%0A unitmap.
@@ -1157,17 +1157,16 @@
r=arcsec
-_
iv, ddec
@@ -1177,17 +1177,16 @@
r=arcsec
-_
iv)%0A
|
edc13c1309d550a3acc5b833d0efedaf7be4045e
|
Fix several off-by-one errors in split_tex_string() and add regression tests.
|
pybtex/bibtex/utils.py
|
pybtex/bibtex/utils.py
|
# Copyright (C) 2007, 2008, 2009 Andrey Golovizin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
def bibtex_len(s):
"""Return the number of characters in s, taking TeX' special chars into accoount.
"""
#FIXME stub
return len(s)
def split_name_list(string):
"""
Split a list of names, separated by ' and '.
>>> split_name_list('Johnson and Peterson')
['Johnson', 'Peterson']
>>> split_name_list('Armand and Peterson')
['Armand', 'Peterson']
>>> split_name_list('Armand and anderssen')
['Armand', 'anderssen']
>>> split_name_list('What a Strange{ }and Bizzare Name! and Peterson')
['What a Strange{ }and Bizzare Name!', 'Peterson']
>>> split_name_list('What a Strange and{ }Bizzare Name! and Peterson')
['What a Strange and{ }Bizzare Name!', 'Peterson']
"""
return split_tex_string(string, ' and ')
def split_tex_string(string, sep=' ', strip=True):
"""Split a string using the given separator, ignoring separators at brace level > 0."""
brace_level = 0
name_start = 0
result = []
end = len(string) - 1
sep_len = len(sep)
for pos, char in enumerate(string):
if char == '{':
brace_level += 1
elif char == '}':
brace_level -= 1
elif (
brace_level == 0 and
string[pos:pos + len(sep)].lower() == sep and
pos > 0 and
pos + len(sep) < end
):
result.append(string[name_start:pos])
name_start = pos + len(sep)
result.append(string[name_start:])
if strip:
return [part.strip() for part in result]
else:
return result
|
Python
| 0
|
@@ -1598,20 +1598,150 @@
vel %3E 0.
+%0A %3E%3E%3E split_tex_string('')%0A %5B%5D%0A %3E%3E%3E split_tex_string('a')%0A %5B'a'%5D%0A %3E%3E%3E split_tex_string('on a')%0A %5B'on', 'a'%5D%0A
%22%22%22%0A
-
%0A bra
@@ -1792,19 +1792,26 @@
%5B%5D%0A
+string_l
en
-d
= len(s
@@ -1820,12 +1820,8 @@
ing)
- - 1
%0A
@@ -1836,24 +1836,36 @@
= len(sep)%0A
+ pos = 0%0A
for pos,
@@ -2159,19 +2159,26 @@
(sep) %3C
+string_l
en
-d
%0A
@@ -2267,24 +2267,60 @@
+ len(sep)%0A
+ if name_start %3C string_len:%0A
result.a
|
d883d551f74bbfa0c0ee0db6b94aa98a3af41fca
|
Expanded NUMERIC_MISSING_VALUES to have 64 values
|
pybufrkit/constants.py
|
pybufrkit/constants.py
|
"""
pybufrkit.constants
~~~~~~~~~~~~~~~~~~~
Various constants used in the module.
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import os
# Set the base directory accordingly for PyInstaller
# noinspection PyUnresolvedReferences,PyProtectedMember
BASE_DIR = sys._MEIPASS if getattr(sys, 'frozen', False) else os.path.dirname(__file__)
# Default directory to read the definition JSON files
DEFAULT_DEFINITIONS_DIR = os.path.join(BASE_DIR, 'definitions')
# Default directory to load the BUFR tables
DEFAULT_TABLES_DIR = os.path.join(BASE_DIR, 'tables')
NBITS_PER_BYTE = 8
MESSAGE_START_SIGNATURE = b'BUFR'
MESSAGE_STOP_SIGNATURE = b'7777'
PARAMETER_TYPE_UNEXPANDED_DESCRIPTORS = 'unexpanded_descriptors'
PARAMETER_TYPE_TEMPLATE_DATA = 'template_data'
BITPOS_START = 'bitpos_start'
# A list of numbers that corresponds to missing values for a number of bits up to 32
NUMERIC_MISSING_VALUES = [2 ** i - 1 for i in range(33)]
# Number of bits for represent number of bits used for difference
NBITS_FOR_NBITS_DIFF = 6
UNITS_STRING = 'CCITT IA5'
UNITS_FLAG_TABLE = 'FLAG TABLE'
UNITS_CODE_TABLE = 'CODE TABLE'
UNITS_COMMON_CODE_TABLE_C1 = 'Common CODE TABLE C-1'
INDENT_CHARS = ' '
|
Python
| 0.999172
|
@@ -915,10 +915,10 @@
to
-32
+64
%0ANUM
@@ -970,10 +970,10 @@
nge(
-33
+65
)%5D%0A%0A
|
4438b80d7e4fbbbb3ee097416ffa7c54e2c02fb4
|
Fix more instances of type -> isinstance
|
pyexperiment/Config.py
|
pyexperiment/Config.py
|
"""Provides an easy way to configure a python application. Basically
implements a singleton configuration at module level.
Basic usage: Load the (singleton) configuration with load, access the
values like you would in a dictionary.
Written by Peter Duerr.
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
import os
import configobj
import validate
from collections import MutableMapping
from pyexperiment.utils.Singleton import Singleton
from pyexperiment.utils.printers import print_bold
class Config(Singleton):
"""Represents a singleton configuration object.
"""
CONFIG_SPEC_PATH = 'configspec.ini'
"""Path of the file with the specification for configurations.
""" # pylint:disable=W0105
CONFIG = None
"""The global configuration. Initialized with load. Access it with
get_value and set_value.
""" # pylint:disable=W0105
def __init__(self):
"""Initializer
"""
# Members will be initialized by load
self.config = None
self.read_from_file = None
self.filename = None
super(Config, self).__init__()
def override_with_args(self, config, options=None):
"""Override configuration with command line arguments and validate
against specification.
"""
# Override options with command line arguments
if options is not None:
for key, value in options:
config_level = config
split_key = key.split('.')
if len(split_key) == 1:
if 'basic' not in config:
config['basic'] = configobj.Section(config_level, 1,
config, {})
config['basic'][key] = value
else:
depth = 1
while len(split_key) > 1:
if not split_key[0] in config_level:
config_level[split_key[0]] \
= configobj.Section(
config_level, depth, config, {})
else:
pass
config_level = config_level[split_key[0]]
split_key = split_key[1:]
depth += 1
config_level[split_key[0]] = value
# Validate it
validator = validate.Validator()
result = config.validate(validator, copy=True, preserve_errors=True)
if type(result) != bool:
raise RuntimeError("Configuration does not adhere"
" to the specification: %s" %
configobj.flatten_errors(self.config, result))
else:
if result:
return config
else:
raise RuntimeError("Something strange going on...")
def load(self, filename,
spec_filename=CONFIG_SPEC_PATH,
options=None,
default_spec=None):
"""Loads a configuration from filename (or string). Missing values
will be read from the specification file or string.
"""
# Check if config file exists
read_from_file = os.path.isfile(filename)
if read_from_file:
self.filename = filename
# Create the configuration (overriding the default with user
# specs if necessary)
user_config = configobj.ConfigObj(filename, configspec=spec_filename)
user_config = self.override_with_args(user_config, options)
if default_spec is not None:
default_config = configobj.ConfigObj(filename,
configspec=default_spec)
default_config = self.override_with_args(default_config, options)
default_config.merge(user_config)
self.config = default_config
else:
self.config = user_config
# Add some more info
self.config.read_from_file = read_from_file
def save(self, filename):
"""Write configuration to file
"""
if self.config is None:
raise RuntimeError("Configuration not initialized yet.")
else:
if filename is None:
print("Too few arguments (provide filename for configuration)")
return
with open(filename, 'wb') as outfile:
self.config.write(outfile)
def b__len__(self):
"""Get the number of configuration items
"""
if self.config is not None:
return len(self.config)
else:
return 0
def __iter__(self):
"""Returns an iterator over the configuration
"""
return iter(self.config)
def __getitem__(self, name):
"""Get configuration item. The name should be of the form
section.subsection...item
"""
if self.config is None:
raise RuntimeError("Configuration not loaded yet")
else:
split_name = name.split(".")
level = 0
section = self.config
while level < len(split_name) - 1:
try:
section = section[split_name[level]]
level += 1
except AttributeError as err:
raise AttributeError(
"Configuration does not contain section '%s',"
" (err: '%s')", ".".join(level[0:level]), err)
try:
value = section[split_name[level]]
except AttributeError as err:
raise AttributeError(
"Configuration does not contain value '%s', (err: '%s')",
name, err)
return value
def __setitem__(self, name, value):
"""Set configuration item
"""
raise NotImplementedError("Not implemented yet. Cannot set %s -> %s",
name, value)
def __delitem__(self, name):
"""Delete configuration item
"""
raise NotImplementedError(
"Not implemented yet. Cannot delete %s -> %s", name)
def __repr__(self):
"""Pretty print the configuration
"""
repr_str = ''
if self.read_from_file:
repr_str += "Configuration read from '%s':\n" % self.filename
else:
repr_str += "Configuration created from specs:\n"
def repr_section(dictionary, prefix=""):
"""Print a section of the configuration
"""
repr_str = ''
for key in dictionary.keys():
if type(dictionary[key]) == configobj.Section:
repr_str += prefix + key + '\n'
repr_str += repr_section(dictionary[key], prefix + ' ')
else:
repr_str += (
prefix + key + ' = ' + repr(dictionary[key]) + '\n'
)
return repr_str
repr_str += repr_section(self.config)
return repr_str
|
Python
| 0.000068
|
@@ -2634,19 +2634,29 @@
if
-typ
+not isinstanc
e(result
) !=
@@ -2655,17 +2655,15 @@
sult
-) !=
+,
bool
+)
:%0A
@@ -6815,11 +6815,17 @@
if
-typ
+isinstanc
e(di
@@ -6837,20 +6837,17 @@
ary%5Bkey%5D
-) ==
+,
configo
@@ -6856,16 +6856,17 @@
.Section
+)
:%0A
|
52cc92a7b3c174af9eb287e3444f65718b1de35a
|
version bump
|
pykechain/__about__.py
|
pykechain/__about__.py
|
name = 'pykechain'
description = 'KE-chain Python SDK'
version = '3.0.0-rc14'
author = 'KE-works BV'
email = 'support+pykechain@ke-works.com'
|
Python
| 0.000001
|
@@ -70,13 +70,8 @@
.0.0
--rc14
'%0A%0Aa
|
5dfd10ac73a374797ed53d2318bdccd616036140
|
Add test for mismatching type variables on arguments or return values.
|
sagitta/test/test_arrow.py
|
sagitta/test/test_arrow.py
|
#!/usr/bin/env python
# encoding: utf-8
#
# pylint: disable=C0111,E0611,E1101,R0201
# C0111: Missing docstring
# E0611: No name 'x' in module 'y'
# E1101: Module 'x' has no 'y' member
# R0201: Method could be a function
import math
import pytest
from sagitta.arrow import arrow, signature, typed
from sagitta.cat import Num, Real, Int, Bool, Ord
from sagitta.typevar import A
from sagitta.exceptions import StrictTypeError
from sagitta.test import raises
class TestTypedInit(object):
def test_init(self):
t = arrow(Bool, A, Bool, A=Num)
assert t._fun is Bool
assert t.signature == signature(A, Bool, A=Num)
@pytest.mark.parametrize('types', [
[], [A]
])
def test_less_than_two_types_raises(self, types):
with raises(TypeError, 'Needs at least two types:'):
args = [int] + types
arrow(*args)
def test_uncallable_fun_raises(self):
uncallable = 5
with raises(StrictTypeError, 'is not callable'):
arrow(uncallable, A, A)
class TestTypedCall(object):
"""
It should check argument and return types.
"""
# ---- Functions to be typed in tests ----
@staticmethod
def no(x):
return not(x)
@staticmethod
def add(x, y):
return x + y
@staticmethod
def argcount(*args):
return len(args)
# -----
def test_one_arg_fun(self):
ano = arrow(self.no, A, Bool)
val = True
assert ano(val) is not(val)
assert type(ano(val)) is bool
def test_two_args_fun(self):
asum = arrow(self.add, A, A, Int)
args = (1, 2)
assert asum(*args) == self.add(*args)
assert type(asum(*args)) is int
def test_wrong_arg_count_raises(self):
with raises(StrictTypeError, 'Wrong number of arguments in'):
t = arrow(self.no, A, Bool)
t('one', 'extra') # <-- Expects only one argument
@pytest.mark.parametrize('length', range(1, 4))
def test_wrong_arg_types_raises(self, length):
arglist = [Bool, Int, Real]
misfits = [4, 0.7, 1 + 1j]
with raises(StrictTypeError, 'is of wrong type for signature'):
t = arrow(*[self.argcount] + arglist[:length] + [Int])
t(*misfits[:length])
def test_wrong_return_type_raises(self):
with raises(StrictTypeError, 'is of wrong type for signature'):
t = arrow(self.argcount, A, Bool) # <-- Bool should be Int
t(1)
class TestArrowComposition(object):
"""
Test arrow composition operations
"""
def test_compose(self):
sum_of_squares = lambda x, y: x**2 + y**2
f = arrow(sum_of_squares, Int, Int, Int)
g = arrow(math.sqrt, Int, Real)
assert (f >> g)(3, 4) == 5.0
def test_compose_left(self):
sum_of_squares = lambda x, y: x**2 + y**2
f = arrow(sum_of_squares, Int, Int, Int)
g = arrow(math.sqrt, Int, Real)
assert (g << f)(3, 4) == 5.0
class TestTypedDecorator(object):
"""
It should work as a function decorator.
"""
def test_function_decorator(self):
@typed(A, A, Bool, A=Ord)
def less_than(a, b):
return a < b
assert less_than(3, 7) is True
assert less_than(7, 7) is False
assert less_than(7, 3) is False
assert type(less_than) is arrow
|
Python
| 0
|
@@ -2456,11 +2456,10 @@
# %3C-
-- B
+ b
ool
@@ -2490,16 +2490,383 @@
t(1)%0A%0A
+ def test_mismatching_typevariables_raises(self):%0A with raises(StrictTypeError, 'Expected argument'):%0A asum = arrow(self.add, A, A, A)%0A asum(3, 4.0) # %3C- both should be of the same type%0A with raises(StrictTypeError, 'Expected argument'):%0A afloat = arrow(float, A, A) # %3C- should return float%0A afloat(1)%0A%0A
class Te
|
a1dd37c9127501ad440c7777d14fb28b1b59b85b
|
Add list guests function
|
characters.py
|
characters.py
|
from adventurelib import Item, Bag
class Man(Item):
subject_pronoun = 'he'
object_pronoun = 'him'
class Woman(Item):
subject_pronoun = 'she'
object_pronoun = 'her'
dr_black = the_victim = Man('Dr. Black', 'Dr Black', 'the victim')
dr_black.def_name = 'the victim'
dr_black.description = """\
Dr. Black was the much beloved host and owner of Tudor Close. His untimely
death has come as a shock and surprise to most of tonight's guests."""
miss_scarlet = Woman('Miss Scarlet')
miss_scarlet.def_name = 'Miss Scarlet'
miss_scarlet.description = """\
Miss Scarlet is well liked by the younger gentlemen at tonight's gathering.
She is mistrusted by some and seems to have quite the salacious reputation."""
col_mustard = Man('Colonel Mustard', 'Col. Mustard', 'Col Mustard')
col_mustard.def_name = 'Colonel Mustard'
col_mustard.description = """\
The Colonel is a stern man who accepts no "nonsense". His long and esteemed
military career has left him with"""
mrs_white = Woman('Mrs. White', 'Mrs White')
mrs_white.def_name = 'Mrs. White'
rev_green = Man(
'Reverend Green', 'Rev. Green', 'Rev Green', 'Mr. Green', 'Mr Green')
rev_green.def_name = 'Reverend Green'
mrs_peacock = Woman('Mrs. Peacock', 'Mrs Peacock')
mrs_peacock.def_name = 'Mrs. Peacock'
prof_plum = Man('Professor Plum', 'Prof. Plum', 'Prof Plum')
prof_plum.def_name = 'Prefessor Plum'
everyone = Bag([
miss_scarlet, col_mustard, mrs_white, rev_green, mrs_peacock, prof_plum
])
if __name__ == '__main__':
assert prof_plum == everyone.find('Prof. Plum')
assert prof_plum != everyone.find('Plum')
|
Python
| 0
|
@@ -27,16 +27,22 @@
tem, Bag
+, when
%0A%0A%0Aclass
@@ -366,19 +366,23 @@
of
-Tudor Close
+Albermore Manor
. Hi
@@ -1385,24 +1385,22 @@
Plum'%0A%0A
-everyone
+guests
= Bag(%5B
@@ -1481,16 +1481,177 @@
um%0A%5D)%0A%0A%0A
+@when('list guests')%0Adef list_rooms():%0A print(%22A nearby guest list for tonight's gathering has the following names:%22)%0A for c in guests:%0A print(c)%0A%0A%0A
if __nam
@@ -1693,24 +1693,22 @@
plum ==
-everyone
+guests
.find('P
@@ -1747,16 +1747,14 @@
!=
-everyone
+guests
.fin
|
544cd6b962f8763cc11486a162ee3a86e019565f
|
fix typo (#5430)
|
python/mxnet/rnn/io.py
|
python/mxnet/rnn/io.py
|
# coding: utf-8
# pylint: disable=too-many-arguments, too-many-locals
"""Definition of various recurrent neural network cells."""
from __future__ import print_function
import bisect
import random
import numpy as np
from ..io import DataIter, DataBatch
from .. import ndarray
def encode_sentences(sentences, vocab=None, invalid_label=-1, invalid_key='\n', start_label=0):
"""Encode sentences and (optionally) build a mapping
from string tokens to integer indices. Unknown keys
will be added to vocabulary.
Parameters
----------
sentences : list of list of str
A list of sentences to encode. Each sentence
should be a list of string tokens.
vocab : None or dict of str -> int
Optional input Vocabulary
invalid_label : int, default -1
Index for invalid token, like <end-of-sentence>
invalid_key : str, default '\n'
Key for invalid token. Use '\n' for end
of sentence by default.
start_label : int
lowest index.
Returns
-------
result : list of list of int
encoded sentences
vocab : dict of str -> int
result vocabulary
"""
idx = start_label
if vocab is None:
vocab = {invalid_key: invalid_label}
new_vocab = True
else:
new_vocab = False
res = []
for sent in sentences:
coded = []
for word in sent:
if word not in vocab:
assert new_vocab, "Unknow token %s"%word
if idx == invalid_label:
idx += 1
vocab[word] = idx
idx += 1
coded.append(vocab[word])
res.append(coded)
return res, vocab
class BucketSentenceIter(DataIter):
"""Simple bucketing iterator for language model.
Label for each step is constructed from data of
next step.
Parameters
----------
sentences : list of list of int
encoded sentences
batch_size : int
batch_size of data
invalid_label : int, default -1
key for invalid label, e.g. <end-of-sentence>
dtype : str, default 'float32'
data type
buckets : list of int
size of data buckets. Automatically generated if None.
data_name : str, default 'data'
name of data
label_name : str, default 'softmax_label'
name of label
layout : str
format of data and label. 'NT' means (batch_size, length)
and 'TN' means (length, batch_size).
"""
def __init__(self, sentences, batch_size, buckets=None, invalid_label=-1,
data_name='data', label_name='softmax_label', dtype='float32',
layout='NTC'):
super(BucketSentenceIter, self).__init__()
if not buckets:
buckets = [i for i, j in enumerate(np.bincount([len(s) for s in sentences]))
if j >= batch_size]
buckets.sort()
ndiscard = 0
self.data = [[] for _ in buckets]
for i, sent in enumerate(sentences):
buck = bisect.bisect_left(buckets, len(sent))
if buck == len(buckets):
ndiscard += 1
continue
buff = np.full((buckets[buck],), invalid_label, dtype=dtype)
buff[:len(sent)] = sent
self.data[buck].append(buff)
self.data = [np.asarray(i, dtype=dtype) for i in self.data]
print("WARNING: discarded %d sentences longer than the largest bucket."%ndiscard)
self.batch_size = batch_size
self.buckets = buckets
self.data_name = data_name
self.label_name = label_name
self.dtype = dtype
self.invalid_label = invalid_label
self.nddata = []
self.ndlabel = []
self.major_axis = layout.find('N')
self.default_bucket_key = max(buckets)
if self.major_axis == 0:
self.provide_data = [(data_name, (batch_size, self.default_bucket_key))]
self.provide_label = [(label_name, (batch_size, self.default_bucket_key))]
elif self.major_axis == 1:
self.provide_data = [(data_name, (self.default_bucket_key, batch_size))]
self.provide_label = [(label_name, (self.default_bucket_key, batch_size))]
else:
raise ValueError("Invalid layout %s: Must by NT (batch major) or TN (time major)")
self.idx = []
for i, buck in enumerate(self.data):
self.idx.extend([(i, j) for j in range(0, len(buck) - batch_size + 1, batch_size)])
self.curr_idx = 0
self.reset()
def reset(self):
self.curr_idx = 0
random.shuffle(self.idx)
for buck in self.data:
np.random.shuffle(buck)
self.nddata = []
self.ndlabel = []
for buck in self.data:
label = np.empty_like(buck)
label[:, :-1] = buck[:, 1:]
label[:, -1] = self.invalid_label
self.nddata.append(ndarray.array(buck, dtype=self.dtype))
self.ndlabel.append(ndarray.array(label, dtype=self.dtype))
def next(self):
if self.curr_idx == len(self.idx):
raise StopIteration
i, j = self.idx[self.curr_idx]
self.curr_idx += 1
if self.major_axis == 1:
data = self.nddata[i][j:j+self.batch_size].T
label = self.ndlabel[i][j:j+self.batch_size].T
else:
data = self.nddata[i][j:j+self.batch_size]
label = self.ndlabel[i][j:j+self.batch_size]
return DataBatch([data], [label],
bucket_key=self.buckets[i],
provide_data=[(self.data_name, data.shape)],
provide_label=[(self.label_name, label.shape)])
|
Python
| 0.000054
|
@@ -1460,16 +1460,17 @@
%22Unknow
+n
token %25
|
4a4cdc839be140dc9486b2cffd8c065365a70404
|
Add /chatgroup tpahere to chatgroups help message
|
chatgroups.py
|
chatgroups.py
|
#pylint: disable = F0401
from helpers import *
from basecommands import simplecommand
from java.util.UUID import fromString as juuid
from traceback import format_exc as trace
groups = open_json_file("chatgroups", {})
cg_defaultkey = ":"
cg_keys = open_json_file("chatgroup_keys", {})
cg_toggle_list = []
def get_key(uuid):
key = cg_keys.get(uuid)
return key if key != None else cg_defaultkey
@hook.command("chatgroup")
def on_chatgroup_command(sender, command, label, args):
plugin_header(sender, "ChatGroups")
sender_id = uid(sender)
if len(args) == 1 and args[0] == "leave":
if sender_id in groups.keys():
groupchat(sender, "left the group", True)
group = groups[sender_id]
del(groups[sender_id])
save_groups()
else:
msg(sender, "&aYou can't leave no group, derp!")
elif len(args) == 1 and args[0] == "info":
if sender_id in groups.keys():
group = groups[sender_id]
msg(sender, "&aCurrent chatgroup: %s" % group)
users = [user.getDisplayName() for user in [server.getPlayer(juuid(uuid)) for uuid, ugroup in groups.iteritems() if ugroup == group] if user]
msg(sender, "&aUsers in this group:")
msg(sender, "&a%s" % ", ".join(users))
else:
msg(sender, "&aYou're in no chatgroup.")
elif len(args) == 2 and args[0] == "join":
groups[sender_id] = args[1]
groupchat(sender, "joined the group", True)
save_groups()
msg(sender, "&aYour chatgroup is set to '%s'" % args[1])
msg(sender, "&aUse chat like '&e%s<message>' to send messages to this group." % get_key(sender_id))
elif len(args) == 1 and args[0] == "key":
msg(sender, "&aYour chatgroup key is currently: '&c%s&a'" % get_key(sender_id))
elif len(args) == 1 and args[0] == "tpahere":
if sender_id in groups.keys():
do_for_chatgroup(groups[sender_id], send_tpa_request, sender)
msg(sender, "&aSent a tpahere request to all users in your chatgroup")
else:
msg(sender, "&cYou have to be in a chatgroup to do that")
else:
msg(sender, "&e/chatgroup join <name>")
msg(sender, "&e/chatgroup leave")
msg(sender, "&e/chatgroup info")
msg(sender, "&e/chatgroup key")
@hook.command("cgt")
def on_cgt_command(sender, command, label, args):
p = uid(sender)
if p in cg_toggle_list:
cg_toggle_list.remove(p)
msg(sender, "&8[&bCG&8] &e&oCG toggle: off")
else:
cg_toggle_list.append(p)
msg(sender, "&8[&bCG&8] &e&oCG toggle: on")
return True
def groupchat(sender, message, ann = False):
group = groups.get(uid(sender))
if group == None:
msg(sender, "&cYou are not in a group!")
return
name = sender.getDisplayName()
if ann:
mesg = "&8[&bCG&8] &e&o%s&e&o %s" % (name, message)
else:
mesg = "&8[&bCG&8] &f%s&f: &6%s" % (name, message)
info("[ChatGroups] %s (%s): %s" % (sender.getDisplayName(), group, message))
do_for_chatgroup(group, msg, mesg)
def do_for_chatgroup(group, func, args):
for receiver in server.getOnlinePlayers():
if groups.get(uid(receiver)) == group:
func(receiver, args)
def send_tpa_request(receiver, sender):
if not receiver == sender:
runas(sender, "/tpahere " + receiver.getName())
def save_groups():
save_json_file("chatgroups", groups)
@hook.event("player.AsyncPlayerChatEvent", "normal")
def on_chat(event):
sender = event.getPlayer()
msge = event.getMessage()
if not event.isCancelled():
sender_id = uid(sender)
key = get_key(sender_id)
keylen = len(key)
if msge[:keylen] == key and sender_id in groups.keys():
groupchat(sender, msge[keylen:])
event.setCancelled(True)
elif sender_id in cg_toggle_list:
groupchat(sender, msge)
event.setCancelled(True)
@simplecommand("chatgroupkey",
aliases = ["cgkey"],
senderLimit = 0,
helpNoargs = True,
helpSubcmd = True,
description = "Sets a key character for chatting to your chatgroup",
usage = "<key>")
def chatgroupkey_command(sender, command, label, args):
key = " ".join(args)
uuid = uid(sender)
if key.lower() == "default" or key == cg_defaultkey:
del cg_keys[uuid]
save_keys()
return "&aYour chatgroup key was set to the default character: '&c%s&a'" % cg_defaultkey
cg_keys[uid(sender)] = key
save_keys()
return "&aYour chatgroup key was set to: '&c%s&a'" % key
def save_keys():
save_json_file("chatgroup_keys", cg_keys)
|
Python
| 0
|
@@ -2358,16 +2358,60 @@
p key%22)%0A
+ msg(sender, %22&e/chatgroup tpahere%22)%0A
%0A%0A@hook.
|
e2741bf38aef8014572feee2f9e8c3e1e10e5b67
|
Add message for `qisrc push` when pushing for review.
|
python/qisrc/review.py
|
python/qisrc/review.py
|
## Copyright (c) 2012 Aldebaran Robotics. All rights reserved.
## Use of this source code is governed by a BSD-style license that can be
## found in the COPYING file.
""" Handling pushing changes to gerrit
"""
import os
import re
import sys
import qibuild.log
import urlparse
from qibuild import ui
import qisrc.git
import qibuild.interact
import qibuild.config
def parse_git_url(url):
""" Parse a git url. Return a tuple: username, server, port
"""
match = re.match(r"""
(ssh://)?
(?P<username>[a-zA-Z0-9\._-]+)
@
(?P<server>[a-zA-Z0-9\._-]+)
(:(?P<port>\d+))?
""", url, re.VERBOSE)
if not match:
return None
groupdict = match.groupdict()
username = groupdict["username"]
server = groupdict["server"]
port = groupdict["port"]
return (username, server, port)
def http_to_ssh(url, project_name, username, gerrit_ssh_port=29418):
""" Return an ssh url from a http gerrit url and a username
"""
# Extract server from url:
# pylint: disable-msg=E1103
netloc = urlparse.urlsplit(url).netloc
server = netloc.split(":")[0]
res = "ssh://%s@%s:%i/%s" % (username, server, gerrit_ssh_port, project_name)
return res
def fetch_gerrit_hook(path, username, server, port):
""" Fetch the ``commit-msg`` hook from gerrit
"""
git_hooks_dir = os.path.join(path, ".git", "hooks")
if sys.platform.startswith("win"):
# scp on git bash does not handle DOS paths:
git_hooks_dir = qibuild.sh.to_posix_path(git_hooks_dir, fix_drive=True)
cmd = ["scp", "-P" , str(port),
"%s@%s:hooks/commit-msg" % (username, server),
git_hooks_dir]
qibuild.command.call(cmd, quiet=True)
def check_gerrit_connection(username, server, gerrit_ssh_port=29418):
""" Check that the user can connect to gerrit with ssh """
cmd = ["ssh", "-p", str(gerrit_ssh_port),
"%s@%s" % (username, server),
"gerrit", "version"]
try:
qibuild.command.call(cmd, quiet=True)
except qibuild.command.CommandFailedException:
return False
return True
def ask_gerrit_username(server, gerrit_ssh_port=29418):
""" Run a wizard to try to configure gerrit access
If that fails, ask the user for its username
If that fails, give up and suggest upload the public key
"""
ui.info(ui.green, "Configuring gerrit ssh access ...")
# works on UNIX and git bash:
username = os.environ.get("USERNAME")
if not username:
username = qibuild.interact.ask_string("Please enter your username")
if not username:
return
ui.info("Checking gerrit connection with %s@%s:%i" %
(username, server, gerrit_ssh_port))
if check_gerrit_connection(username, server, gerrit_ssh_port):
ui.info("Success")
return username
ui.warning("Could not connect to ssh using username", username)
try_other = qibuild.interact.ask_yes_no("Do you want to try with another username?")
if not try_other:
return
username = qibuild.interact.ask_string("Please enter your username")
if not username:
return
if check_gerrit_connection(username, server, gerrit_ssh_port):
return username
def warn_gerrit():
"""Emit a warning telling the user that:
* connection to gerrit has failed
* qisrc push won't work
"""
ui.warning("""Failed to configure gerrit connection
`qisrc push` won't work
When you have resolved this problem, just re-run ``qisrc sync -a``""")
def setup_project(project_path, project_name, review_url, branch):
""" Setup a project for code review.
* Figure out the user name
* Add a remote called 'gerrit'
* Add the hook
:return: a boolean to tell wether it's worth trying
for other projects
"""
git = qisrc.git.Git(project_path)
# Extract server from url:
# pylint: disable-msg=E1103
netloc = urlparse.urlsplit(review_url).netloc
server = netloc.split(":")[0]
# Get username
qibuild_cfg = qibuild.config.QiBuildConfig()
qibuild_cfg.read(create_if_missing=True)
access = qibuild_cfg.get_server_access(server)
if access:
username = access.username
else:
username = ask_gerrit_username(server)
if not username:
return False
# Add it to config so we ask only once
qibuild_cfg.set_server_access(server, username)
qibuild_cfg.write()
# Set a remote named 'gerrit'
remote_url = http_to_ssh(review_url, project_name, username)
git.set_remote("gerrit", remote_url)
# Configure review.remote in git/config so that
# qisrc push knows what to do:
git.set_config("review.remote", "gerrit")
# Install the hook
commit_hook = os.path.join(project_path, ".git", "hooks", "commit-msg")
if os.path.exists(commit_hook):
return True
ui.info("Configuring project for code review ...", end="")
(username, server, port) = parse_git_url(remote_url)
fetch_gerrit_hook(project_path, username, server, port)
ui.info(ui.green, "[OK]")
return True
def push(project_path, branch, review=True, dry_run=False, reviewers=None):
""" Push the changes for review.
Unless review is False, in this case, simply update
the remote gerrit branch
:param reviewers: A list of reviewers to invite to review
"""
git = qisrc.git.Git(project_path)
review_remote = git.get_config("review.remote")
args = list()
if dry_run:
args.append("--dry-run")
if not review_remote:
# Repository not configured for code review:
# we just follow the normal 'git push' behavior
git.push(*args)
return
args.append(review_remote)
if review:
args.append("%s:refs/for/%s" % (branch, branch))
if reviewers:
receive_pack = "git receive-pack"
for reviewer in reviewers:
receive_pack += " --reviewer=%s" % reviewer
args = ["--receive-pack=%s" % receive_pack] + args
else:
args.append("%s:%s" % (branch, branch))
git.push(*args)
|
Python
| 0
|
@@ -5761,32 +5761,86 @@
%0A if review:%0A
+ ui.info('Pushing code to gerrit for review.')%0A
args.app
|
bb430cedf3cf92e4b165f58845bb451f025f6bff
|
print out -h by default
|
cihai/core.py
|
cihai/core.py
|
#!/usr/bin/env python
# -*- coding: utf8 - *-
"""Cihai client object."""
from __future__ import (absolute_import, division, print_function,
unicode_literals, with_statement)
import argparse
import logging
import os
import kaptan
from appdirs import AppDirs
from sqlalchemy import Table, create_engine
from cihai import db, exc
from cihai._compat import string_types
from cihai.util import merge_dict
log = logging.getLogger(__name__)
dirs = AppDirs(
"cihai", # appname
"cihai team" # app author
)
def default_config():
config_reader = kaptan.Kaptan()
default_config_file = os.path.abspath(os.path.join(
os.path.dirname(__file__),
"config.yml",
))
return config_reader.import_config(default_config_file).get()
def expand_config(d):
"""Expand configuration XDG variables.
*Environmentable variables* are expanded via :py:func:`os.path.expandvars`.
So ``${PWD}`` would be replaced by the current PWD in the shell,
``${USER}`` would be the user running the app.
*XDG variables* are expanded via :py:meth:`str.format`. These do not have a
dollar sign. They are:
- ``{user_cache_dir}``
- ``{user_config_dir}``
- ``{user_data_dir}``
- ``{user_log_dir}``
- ``{site_config_dir}``
- ``{site_data_dir}``
:param d: dictionary of config info
:type d: dict
"""
context = {
'user_cache_dir': dirs.user_cache_dir,
'user_config_dir': dirs.user_config_dir,
'user_data_dir': dirs.user_data_dir,
'user_log_dir': dirs.user_log_dir,
'site_config_dir': dirs.site_config_dir,
'site_data_dir': dirs.site_data_dir
}
for k, v in d.items():
if isinstance(v, dict):
expand_config(v)
if isinstance(v, string_types):
d[k] = os.path.expanduser(os.path.expandvars(d[k]))
d[k] = d[k].format(**context)
class Storage(object):
"""Mixin generic sqlalchemy yum-yums for relational data."""
def __init__(self, cihai, engine, metadata):
"""Initialize Storage back-end.
:param engine: engine to connect to database with.
:param type:class:`sqlalchemy.engine.Engine`
"""
#: :class:`Cihai` application object.
self.cihai = cihai
#: :class:`sqlalchemy.engine.Engine` instance.
self.engine = engine
#: :class:`sqlalchemy.schema.MetaData` instance.
self.metadata = metadata
def get_table(self, table_name):
"""Return :class:`~sqlalchemy.schema.Table`.
:param table_name: name of sql table
:type table_name: str
:rtype: :class:`sqlalchemy.schema.Table`
"""
return Table(table_name, self.metadata, autoload=True)
def table_exists(self, table_name):
"""Return True if table exists in db."""
return True if table_name in self.metadata.tables else False
def get_datapath(self, filename):
"""Return absolute filepath in relation to :attr:`self.data_path`.
:param filename: file name relative to ``./data``.
:type filename: str
:returns: Absolute path to data file.
:rtype: str
"""
data_path = self.cihai.config['data_path']
return os.path.join(data_path, filename)
class Cihai(object):
"""Cihai query client. May use :meth:`~.get()` to grab 中文.
Cihai object is inspired by `pypa/warehouse`_ Warehouse applicaton object.
.. _pypa/warehouse: https://github.com/pypa/warehouse
"""
def __init__(self, config, engine=None):
#: configuration dictionary. Available as attributes. ``.config.debug``
self.config = merge_dict(default_config(), config)
#: Expand template variables
expand_config(self.config)
#: absolute path to cihai data files.
if 'data_path' not in self.config:
self.config['data_path'] = os.path.abspath(os.path.join(
os.path.dirname(__file__), 'data/'
))
if engine is None and self.config['database']['url']:
engine = create_engine(self.config['database']['url'])
#: :class:`sqlalchemy.engine.Engine` instance.
self.engine = engine
#: :class:`sqlalchemy.schema.MetaData` instance.
self.metadata = db.metadata
self.metadata.bind = self.engine
@classmethod
def from_file(cls, config_path=None, *args, **kwargs):
"""Create a Cihai instance from a JSON or YAML config.
:param config_path: path to custom config file
:type confiig_path: str
:rtype: :class:`Cihai`
"""
config_reader = kaptan.Kaptan()
config = {}
if config_path:
if not os.path.exists(config_path):
raise exc.CihaiException(
'{0} does not exist.'.format(os.path.abspath(config_path)))
if not any(
config_path.endswith(ext) for ext in
('json', 'yml', 'yaml', 'ini')
):
raise exc.CihaiException(
'{0} does not have a yaml,yml,json,ini extension.'
.format(os.path.abspath(config_path))
)
else:
custom_config = config_reader.import_config(config_path).get()
config = merge_dict(config, custom_config)
return cls(config)
@classmethod
def from_cli(cls, argv):
"""Cihai from :py:class:`argparse` / CLI args.
:param argv: list of arguments, i.e. ``['-c', 'dev/config.yml']``.
:type argv: list
:rtype: :class:`Cihai`
"""
parser = argparse.ArgumentParser(prog="cihai")
parser.add_argument("-c", "--config", dest="_config")
args = parser.parse_args(argv)
config = args._config if args._config is not None else None
return cls.from_file(config)
|
Python
| 0.000019
|
@@ -5824,16 +5824,95 @@
s(argv)%0A
+ if not args._config:%0A parser.print_help()%0A else:%0A
@@ -5970,14 +5970,15 @@
lse
-None%0A%0A
+%7B%7D%0A
|
02ebf235704ea6c61969a20ff86717c796dd5e06
|
Fix typo: get_destination_queues should be get_queue
|
queue_util/consumer.py
|
queue_util/consumer.py
|
"""Listens to 1 (just one!) queue and consumes messages from it endlessly.
We set up a consumer with two things:
1) The name of the source queue (`source_queue_name`)
2) A callable that will process
The `handle_data` method must process the data. It can return nothing or a
sequence of `queue_name, data` pairs.
If it returns the latter, then the data will be sent to the given `queue_name`.
e.g.
def handle_data(data):
new_data = do_some_calc(data)
# Forward the new_data to another queue.
#
yield ("next_target", new_data)
"""
import logging
import kombu
class Consumer(object):
def __init__(self, source_queue_name, handle_data, rabbitmq_host, serializer=None, compression=None):
self.serializer = serializer
self.compression = compression
self.queue_cache = {}
# Connect to the source queue.
#
self.broker = kombu.BrokerConnection(rabbitmq_host)
self.source_queue = self.get_queue(source_queue_name, serializer=serializer, compression=compression)
# The handle_data method will be applied to each item in the queue.
#
self.handle_data = handle_data
def get_queue(self, queue_name, serializer=None, compression=None):
kwargs = {}
# Use 'defaults' if no args were supplied for serializer/compression.
#
serializer = serializer or self.serializer
if serializer:
kwargs["serializer"] = serializer
compression = compression or self.compression
if compression:
kwargs["compression"] = compression
# The cache key is the name and connection args.
# This is so that (if needed) a fresh connection can be made with
# different serializer/compression args.
#
cache_key = (queue_name, serializer, compression,)
if cache_key not in self.queue_cache:
self.queue_cache[cache_key] = self.broker.SimpleQueue(queue_name, **kwargs)
return self.queue_cache[cache_key]
def run_forever(self):
"""Keep running (unless we get a Ctrl-C).
"""
while True:
try:
message = self.source_queue.get(block=True)
data = message.payload
new_messages = self.handle_data(data)
except KeyboardInterrupt:
logging.info("Caught Ctrl-C. Byee!")
# Break out of our loop.
#
break
else:
# Queue up the new messages (if any).
#
if new_messages:
for queue_name, data in new_messages:
destination_queue = self.get_destination_queues(queue_name)
destination_queue.put(data)
# We're done with the original message.
#
message.ack()
|
Python
| 0.999999
|
@@ -2705,26 +2705,13 @@
get_
-destination_
queue
-s
(que
|
657696ce42d0e5da781ac5dbf8f635b7ca011b8a
|
sort choices in output of ck_choices.py
|
ck_choices.py
|
ck_choices.py
|
#!/usr/bin/env python
"""
makes modifications to the controlled vocabulary (implemented as
ckanext-scheming "choices")
HvW - 2016-06-07
"""
from pprint import pprint
import argparse
import sys
import json
import os
LOCAL_SCHEMA=("/usr/lib/ckan/default/src/ckanext-eaw_schema/ckanext/" +
"eaw_schema/eaw_schema_dataset.json")
def mkparser():
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description=
"""
Make modifications to the controlled vocabulary FIELD
(implemented as ckanext-scheming "choices").
""", epilog=
"""
Examples:
ck_choices.py variables "new_var_1,New Variable One" newvar2,"Another One"
adds two new terms to the dataset_field "variables".
ck_choices.py variables --del new_var_1 newvar2
deletes them.
"""
)
parser.add_argument('field', nargs='?', help='the schema field to be modified',
metavar='FIELD')
parser.add_argument('--listfields', action='store_true', help='list fields'+\
' (the rest of the command line is ignored)')
parser.add_argument('--del', action='store_true', help='delete terms '+
'(default is adding terms)')
parser.add_argument('--resource', action='store_true', help='action ' +
'refers to resource field (default is dataset field)')
parser.add_argument('terms', nargs='*', help='the terms to be added '
+'(removed). Have the format "value,label" for adding,' +
' and "value" for removing', metavar='TERM')
return(parser)
def listfields(schema):
fields = schema['dataset_fields']
for f in ('{}, {}\n{}\n'.format(x['field_name'], x['label'],
[c['value'] for c in x['choices']])
for x in fields if 'choices' in x):
print f
def postparse(params, parser):
if params['listfields']:
return
terms = params['terms']
print("terms: {}".format(terms))
if not terms or not params['field']:
parser.print_help()
sys.exit(1)
terms = [tuple(x.split(',')) for x in terms]
print("terms: {}".format(terms))
if params['del'] and not all([len(x) == 1 for x in terms]):
parser.print_help()
sys.exit(1)
elif not params['del'] and not all([len(x) == 2 for x in terms]):
parser.print_help()
sys.exit(1)
terms = [x[0] if len(x) == 1 else x for x in terms]
return(terms)
def load_schema(schemafile):
try:
with open(schemafile) as sf:
schema = json.load(sf)
except ValueError:
raise(SystemExit("Schema file {} doesn't parse into JSON"
.format(schemafile), 1))
except IOError:
raise(SystemExit("Could not open schema file 'testschema'", 1))
return(schema)
def check_unique(field, choices, terms):
for t in [x[0] for x in terms]:
if t in [x['value'] for x in choices]:
raise SystemExit('{} already in {}'.format(t, field))
def update_field(schema, typ, field, remove, terms):
" typ: 'dataset_field' or 'resource_field'"
def _build_choices(terms):
ch = [{'value': t[0], 'label': t[1]} for t in terms]
return ch
def _get_val_index(val, choices):
idx = [x.get('value') for x in choices].index(val)
return(idx)
try:
f = [x for x in schema[typ] if x["field_name"] == field]
except KeyError:
raise SystemExit('Could not find field_type "{}"'.format(typ))
if not f:
raise SystemExit("Could not find field \"{}\" in \"{}\""
.format(field, typ))
assert(len(f) == 1)
c = f[0]['choices']
if not remove:
check_unique(field, c, terms)
c.extend(_build_choices(terms))
else:
try:
rmidx = [_get_val_index(val, c) for val in terms]
except ValueError:
raise SystemExit('Not all terms found in ' +
'field "{}" in "{}"'.format(field, typ))
if len(rmidx) < len(terms):
raise SystemExit('Not all terms found in ' +
'field "{}" in "{}"'.format(field, typ))
cnew = [x[1] for x in enumerate(c) if x[0] not in rmidx]
f[0]['choices'] = cnew
return(schema)
def write_schema(newschema, path):
with open(path, 'w') as f:
json.dump(newschema, f, indent=2)
def main():
schema = load_schema(LOCAL_SCHEMA)
parser = mkparser()
params = vars(parser.parse_args())
terms = postparse(params, parser)
if params['listfields']:
listfields(schema)
sys.exit()
field = params['field']
remove = params['del']
typ = 'resource_fields' if params['resource'] else 'dataset_fields'
newschema = update_field(schema, typ, field, remove, terms)
write_schema(newschema, LOCAL_SCHEMA)
if __name__ == '__main__':
main()
|
Python
| 0.999988
|
@@ -1831,16 +1831,23 @@
+sorted(
%5Bc%5B'valu
@@ -1873,16 +1873,17 @@
ices'%5D%5D)
+)
%0A
|
22ccb20e5f1637059b366da6e5fdf7e33f207e7a
|
improving the classifier
|
classifier.py
|
classifier.py
|
#! /usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Classifier
==========
After Feature Extraction, that returns a data of the format
[(filename, linenum, vote, sentence, feat1, feat2, ...)]
"""
from __future__ import division
from optparse import OptionParser
import json
from pprint import pprint
import random
import math
import nltk
from cPickle import dump
from cPickle import load
import parser
import extractor
def getUserInput():
optionparser = OptionParser()
optionparser.add_option('-i', '--input', dest='inputfile')
(option, args) = optionparser.parse_args()
if not option.inputfile:
return optionparser.error('html file input not provided.\n Usage: --url="path.to.appurl"')
return { 'file' : option.inputfile }
def fileExtractor(f):
fObj = open(f)
data = json.load(fObj)
fObj.close()
return data
def featureAggregator(extract):
outputdata = []
for app in extract:
outputdata.append(featureExtractor(app))
return outputdata
def featureExtractor(app):
featDict = {}
fObj = open('mySentClassifier.pickle')
cl = load(fObj)
fObj.close()
featDict['price'] = getAppPrice(app)
# featList['numrev'] = getNumReviews(app)
featDict['1starrating'] = getOneStarRating(app)
featDict['2starrating'] = getTwoStarRating(app)
featDict['3starRating'] = getThreeStarRating(app)
featDict['4starRating'] = getFourStarRating(app)
featDict['5starRating'] = getFiveStarRating(app)
featDict['hasPrivacy'] = getPrivacyState(app)
featDict['revSent'] = getReviewSentiment(app, cl)
return featDict
def getAppPrice(app):
return app['price']
def getNumReviews(app):
return len(app['reviews'])
def getOneStarRating(app):
for appRatingCount in app['rating']:
if appRatingCount[0] == ' 1 ':
return appRatingCount[1]
def getTwoStarRating(app):
for appRatingCount in app['rating']:
if appRatingCount[0] == ' 2 ':
return appRatingCount[1]
def getThreeStarRating(app):
for appRatingCount in app['rating']:
if appRatingCount[0] == ' 3 ':
return appRatingCount[1]
def getFourStarRating(app):
for appRatingCount in app['rating']:
if appRatingCount[0] == ' 4 ':
return appRatingCount[1]
def getFiveStarRating(app):
for appRatingCount in app['rating']:
if appRatingCount[0] == ' 5 ':
return appRatingCount[1]
def getPrivacyState(app):
if app['devprivacyurl'] == 'N.A.':
return False
else:
return True
def getReviewSentiment(app, classifier):
revAggSentiment = 0
for rev in app['reviews']:
sentList = nltk.tokenize.sent_tokenize(rev[1])
sentAggSentiment = 0
for sent in sentList:
sent = unicode(sent.strip())
# print sent
featdata = extractor.featureExtractor(sent)
# pprint(featdata)
cl= classifier.classify(featdata)
if cl == 'pos':
label = 1
elif cl == 'neutral':
label = 0
else:
label = -1
sentAggSentiment += label
revAggSentiment += sentAggSentiment
print "review Sentiment: ", revAggSentiment
return revAggSentiment
def classifier(extract, fold=10):
labeldata = 'fair'
data = []
for app in extract:
for rev in app['reviews']:
revlower = rev[1].lower()
# print "reviews" , revlower
if revlower.find('fake') != -1:
labeldata = 'unfair'
features = featureExtractor(app)
data.append([labeldata, list(features.values())])
pprint(data)
# for d in data:
# if d[1][1] == False:
# pprint(d)
# random.shuffle(data)
# claccuracy = []
# size = int(math.floor(len(data) / 10.0))
# for i in range(fold):
# test_this_round = data[i*size:][:size]
# train_this_round = data[:i*size] + data[(i+1)*size:]
# acc = myclassifier(train_this_round, test_this_round)
# claccuracy.append(acc)
def myclassifier(train_data, test_data):
classifier = nltk.NaiveBayesClassifier.train(train_data)
# print classifier.show_most_informative_features()
return nltk.classify.accuracy(classifier, test_data)
def main():
userinput = getUserInput()
print userinput['file']
extract = fileExtractor(userinput['file'])
# features = featureAggregator(extract)
classifier(extract)
if __name__ == "__main__":
main()
|
Python
| 0.841188
|
@@ -411,16 +411,39 @@
xtractor
+%0Afrom os import listdir
%0A%0Adef ge
@@ -554,16 +554,77 @@
tfile')%0A
+ optionparser.add_option('-d', '--dir', dest='directory')%0A
%0A%0A (o
@@ -683,25 +683,25 @@
option.
-inputfile
+directory
:%0A
@@ -794,16 +794,16 @@
url%22')%0A%0A
-
retu
@@ -832,16 +832,42 @@
nputfile
+, 'dir' : option.directory
%7D%0A%0A%0A%0Ade
@@ -4549,21 +4549,72 @@
nput
-%5B'file'%5D
+%0A%0A for f in listdir(userinput%5B'dir'%5D):%0A print f
%0A%0A
+ #
ext
@@ -4652,16 +4652,39 @@
'file'%5D)
+%0A%0A # pprint(extract)
%0A # f
@@ -4719,24 +4719,26 @@
extract)%0A
+ #
classifier(
|
919d67e6f46d4f991cc5caa5893beebfe94e0d9e
|
Add hash mock
|
cli/crypto.py
|
cli/crypto.py
|
from ctypes import *
import base64
import os
def generate_hex_sstr():
publicKey64 = "Not implemente"
privateKey64 = "Not implemente"
return (publicKey64,privateKey64)
|
Python
| 0.000004
|
@@ -173,8 +173,51 @@
eKey64)%0A
+%0Adef hash(msg):%0A return %22Not implemente%22
|
2e6485ea0d80426222d0cc84d1d4074d845cd6d1
|
disable kcov test on arm
|
tests/integration_tests/build/test_coverage.py
|
tests/integration_tests/build/test_coverage.py
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests pertaining to line/branch test coverage for the Firecracker code base.
# TODO
- Put the coverage in `s3://spec.firecracker` and update it automatically.
target should be put in `s3://spec.firecracker` and automatically updated.
"""
import os
import platform
import re
import pytest
import framework.utils as utils
import host_tools.cargo_build as host # pylint: disable=import-error
import host_tools.proc as proc
# AMD has a slightly different coverage due to
# the appearance of the brand string. On Intel,
# this contains the frequency while on AMD it does not.
# Checkout the cpuid crate. In the future other
# differences may appear.
COVERAGE_DICT = {"Intel": 84.9, "AMD": 84.08, "ARM": 82.58}
PROC_MODEL = proc.proc_type()
COVERAGE_MAX_DELTA = 0.05
CARGO_KCOV_REL_PATH = os.path.join(host.CARGO_BUILD_REL_PATH, 'kcov')
KCOV_COVERAGE_FILE = 'index.js'
"""kcov will aggregate coverage data in this file."""
KCOV_COVERED_LINES_REGEX = r'"covered_lines":"(\d+)"'
"""Regex for extracting number of total covered lines found by kcov."""
KCOV_TOTAL_LINES_REGEX = r'"total_lines" : "(\d+)"'
"""Regex for extracting number of total executable lines found by kcov."""
@pytest.mark.timeout(120)
@pytest.mark.skipif(
platform.machine() != "x86_64",
reason="no need to test it on multiple platforms"
)
def test_ensure_mod_tests():
"""Check that files containing unit tests have a 'tests' module defined."""
# List all source files containing rust #[test] attribute,
# (excluding generated files and integration test directories).
# Take the list and check each file contains 'mod tests {', output file
# name if it doesn't.
cmd = (
'/bin/bash '
'-c '
'"grep '
'--files-without-match '
'\'mod tests {\' '
'\\$(grep '
'--files-with-matches '
'--recursive '
'--exclude-dir=src/*_gen/* '
'\'\\#\\[test\\]\' ../src/*/src)" '
)
# The outer grep returns 0 even if it finds files without the match, so we
# ignore the return code.
result = utils.run_cmd(cmd, no_shell=False, ignore_return_code=True)
error_msg = (
'Tests found in files without a "tests" module:\n {}'
'To ensure code coverage is reported correctly, please check that '
'your tests are in a module named "tests".'.format(result.stdout)
)
assert not result.stdout, error_msg
@pytest.mark.timeout(400)
def test_coverage(test_session_root_path, test_session_tmp_path):
"""Test line coverage with kcov.
The result is extracted from the $KCOV_COVERAGE_FILE file created by kcov
after a coverage run.
"""
proc_model = [item for item in COVERAGE_DICT if item in PROC_MODEL]
assert len(proc_model) == 1, "Could not get processor model!"
coverage_target_pct = COVERAGE_DICT[proc_model[0]]
exclude_pattern = (
'${CARGO_HOME:-$HOME/.cargo/},'
'build/,'
'tests/,'
'usr/lib/gcc,'
'lib/x86_64-linux-gnu/,'
'test_utils.rs,'
# The following files/directories are auto-generated
'bootparam.rs,'
'elf.rs,'
'mpspec.rs,'
'msr_index.rs,'
'_gen'
)
exclude_region = '\'mod tests {\''
target = "{}-unknown-linux-musl".format(platform.machine())
cmd = (
'RUSTFLAGS="{}" CARGO_TARGET_DIR={} cargo kcov --all '
'--target {} --output {} -- '
'--exclude-pattern={} '
'--exclude-region={} --verify'
).format(
host.get_rustflags(),
os.path.join(test_session_root_path, CARGO_KCOV_REL_PATH),
target,
test_session_tmp_path,
exclude_pattern,
exclude_region
)
# By default, `cargo kcov` passes `--exclude-pattern=$CARGO_HOME --verify`
# to kcov. To pass others arguments, we need to include the defaults.
utils.run_cmd(cmd)
coverage_file = os.path.join(test_session_tmp_path, KCOV_COVERAGE_FILE)
with open(coverage_file) as cov_output:
contents = cov_output.read()
covered_lines = int(re.findall(KCOV_COVERED_LINES_REGEX, contents)[0])
total_lines = int(re.findall(KCOV_TOTAL_LINES_REGEX, contents)[0])
coverage = covered_lines / total_lines * 100
print("Number of executable lines: {}".format(total_lines))
print("Number of covered lines: {}".format(covered_lines))
print("Thus, coverage is: {:.2f}%".format(coverage))
coverage_low_msg = (
'Current code coverage ({:.2f}%) is below the target ({}%).'
.format(coverage, coverage_target_pct)
)
min_coverage = coverage_target_pct - COVERAGE_MAX_DELTA
assert coverage >= min_coverage, coverage_low_msg
# Get the name of the variable that needs updating.
namespace = globals()
cov_target_name = [name for name in namespace if namespace[name]
is COVERAGE_DICT][0]
coverage_high_msg = (
'Current code coverage ({:.2f}%) is above the target ({}%).\n'
'Please update the value of {}.'
.format(coverage, coverage_target_pct, cov_target_name)
)
assert coverage - coverage_target_pct <= COVERAGE_MAX_DELTA,\
coverage_high_msg
|
Python
| 0
|
@@ -2550,16 +2550,124 @@
ut(400)%0A
+@pytest.mark.skipif(%0A platform.machine() != %22x86_64%22,%0A reason=%22unstable on aarch64 as of right now%22%0A)%0A
def test
|
d262bcf59c9779a387e9f7d213030c958b85d891
|
fix sur les phrase
|
sara_flexbe_states/src/sara_flexbe_states/StoryboardSetStoryKey.py
|
sara_flexbe_states/src/sara_flexbe_states/StoryboardSetStoryKey.py
|
#!/usr/bin/env python
from flexbe_core import EventState, Logger
import rospy
from vizbox.msg import Story
class StoryboardSetStoryFromAction(EventState):
"""
set_story
-- titre string the title
-- actionList string[][] the steps
<= done what's suppose to be written is written
"""
def __init__(self):
"""set the story"""
super(StoryboardSetStoryFromAction, self).__init__(outcomes=['done'], input_keys=['titre', 'actionList'])
self.pub = rospy.Publisher("/story", Story)
def execute(self, userdata):
"""execute what needs to be executed"""
self.msg = Story()
self.msg.title = userdata.titre
story = []
for action in userdata.actionList:
print(action[0].lower())
if action[0].lower() == "move":
story.append("Move to the "+action[1])
elif action[0].lower() == "find":
story.append("Find the " + action[1])
elif action[0].lower() == "findPerson":
story.append("Find " + action[1])
elif action[0].lower() == "guide":
story.append("Guide to " + action[1])
elif action[0].lower() == "pick":
story.append("Pick the " + action[1])
elif action[0].lower() == "give":
story.append("Give to " + action[1])
elif action[0].lower() == "say":
story.append("Say something")
elif action[0].lower() == "ask":
story.append("Ask a question")
elif action[0].lower() == "follow":
story.append("Follow " + action[1])
elif action[0].lower() == "count":
story.append("Count the number of " + action[1])
elif action[0].lower() == "place":
story.append("Place on the " + action[1])
elif action[0].lower() == "answer":
story.append("Answer a question")
else:
print("nothing")
print( str(story))
self.msg.storyline = story
self.pub.publish(self.msg)
Logger.loginfo('Success to publish the story')
return 'done'
|
Python
| 0.007946
|
@@ -1042,16 +1042,127 @@
erson%22:%0A
+ if action%5B1%5D == %22%22:%0A story.append(%22Find a person%22)%0A else%0A
@@ -1195,32 +1195,32 @@
%22 + action%5B1%5D)%0A
-
elif
@@ -1446,24 +1446,138 @@
== %22give%22:%0A
+ if action%5B1%5D == %22%22:%0A story.append(%22Give to a person%22)%0A else%0A
@@ -1609,32 +1609,32 @@
%22 + action%5B1%5D)%0A
-
elif
@@ -1696,21 +1696,12 @@
nd(%22
-Say something
+Talk
%22)%0A
|
2479537a63f12807d51d490e39b77c258f33c526
|
add django.contrib.sites to installed app for preventing warning message
|
trytalk/settings/default.py
|
trytalk/settings/default.py
|
"""
Django settings for trytalk project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(os.environ.get('DJANGO_DEBUG', ''))
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'rest_framework',
'rest_framework.authtoken',
'rest_framework_swagger',
'request',
'accounts',
'fb_accounts',
'twitter_accounts',
'mobile_notifications',
'questions',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'request.middleware.RequestMiddleware',
)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'accounts.backends.GuestModelBackend',
'fb_accounts.backends.FacebookIDModelBackend',
'twitter_accounts.backends.TwitterIDModelBackend',
)
ROOT_URLCONF = 'trytalk.urls'
WSGI_APPLICATION = 'trytalk.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'zh-TW'
TIME_ZONE = 'Asia/Taipei'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# E-mail settings
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD')
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = []
# REST Framework settings
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 20,
'DATETIME_FORMAT': '%Y-%m-%dT%H:%M:%S',
}
# Celery settings
BROKER_URL = 'amqp://guest:guest@localhost:5672//'
# Django Site settings
SITE_ID = 1
|
Python
| 0
|
@@ -1044,16 +1044,41 @@
b.gis',%0A
+%09'django.contrib.sites',%0A
%09'rest_f
|
4b204727fbcfc6dba4fdd61a8f890e3a49c4bd7a
|
revert inhibiition of return since it breaks salt-run which is expecting the return. restore after fixing salt-run
|
salt/daemons/flo/worker.py
|
salt/daemons/flo/worker.py
|
# -*- coding: utf-8 -*-
'''
The core behaviors used by minion and master
'''
# pylint: disable=W0232
from __future__ import absolute_import
# Import python libs
import time
import os
import multiprocessing
import logging
from six.moves import range
# Import salt libs
import salt.daemons.masterapi
from raet import raeting
from raet.lane.stacking import LaneStack
from raet.lane.yarding import RemoteYard
from salt.utils import kinds
# Import ioflo libs
import ioflo.base.deeding
log = logging.getLogger(__name__)
# convert to set once list is larger than about 3 because set hashes
INHIBIT_RETURN = ['_return'] # cmd for which we should not send return
class SaltRaetWorkerFork(ioflo.base.deeding.Deed):
'''
Fork off the worker procs
FloScript:
do salt raet worker fork at enter
'''
Ioinits = {'opts': '.salt.opts',
'worker_verify': '.salt.var.worker_verify',
'access_keys': '.salt.access_keys'}
def _make_workers(self):
'''
Spin up a process for each worker thread
'''
for index in range(int(self.opts.value['worker_threads'])):
time.sleep(0.01)
proc = multiprocessing.Process(
target=self._worker, kwargs={'windex': index + 1}
)
proc.start()
def _worker(self, windex):
'''
Spin up a worker, do this in multiprocess
windex is worker index
'''
self.opts.value['__worker'] = True
behaviors = ['salt.daemons.flo']
preloads = [('.salt.opts', dict(value=self.opts.value)),
('.salt.var.worker_verify', dict(value=self.worker_verify.value))]
preloads.append(('.salt.var.fork.worker.windex', dict(value=windex)))
preloads.append(
('.salt.access_keys', dict(value=self.access_keys.value)))
console_logdir = self.opts.value.get('ioflo_console_logdir', '')
if console_logdir:
consolepath = os.path.join(console_logdir, "worker_{0}.log".format(windex))
else: # empty means log to std out
consolepath = ''
ioflo.app.run.start(
name='worker{0}'.format(windex),
period=float(self.opts.value['ioflo_period']),
stamp=0.0,
real=self.opts.value['ioflo_realtime'],
filepath=self.opts.value['worker_floscript'],
behaviors=behaviors,
username='',
password='',
mode=None,
houses=None,
metas=None,
preloads=preloads,
verbose=int(self.opts.value['ioflo_verbose']),
consolepath=consolepath,
)
def action(self):
'''
Run with an enter, starts the worker procs
'''
self._make_workers()
class SaltRaetWorkerSetup(ioflo.base.deeding.Deed):
'''
FloScript:
do salt raet worker setup at enter
'''
Ioinits = {
'opts': '.salt.opts',
'windex': '.salt.var.fork.worker.windex',
'access_keys': '.salt.access_keys',
'remote_loader': '.salt.loader.remote',
'local_loader': '.salt.loader.local',
'inode': '.salt.lane.manor.',
'stack': 'stack',
'local': {'ipath': 'local',
'ival': {'lanename': 'master'}}
}
def action(self):
'''
Set up the uxd stack and behaviors
'''
name = "worker{0}".format(self.windex.value)
# master application kind
kind = self.opts.value['__role']
if kind not in kinds.APPL_KINDS:
emsg = ("Invalid application kind = '{0}' for Master Worker.".format(kind))
log.error(emsg + "\n")
raise ValueError(emsg)
if kind == 'master':
lanename = 'master'
else: # workers currently are only supported for masters
emsg = ("Invalid application kind '{0}' for Master Worker.".format(kind))
log.error(emsg + '\n')
raise ValueError(emsg)
sockdirpath = self.opts.value['sock_dir']
self.stack.value = LaneStack(
name=name,
lanename=lanename,
sockdirpath=sockdirpath)
self.stack.value.Pk = raeting.packKinds.pack
manor_yard = RemoteYard(
stack=self.stack.value,
name='manor',
lanename=lanename,
dirpath=sockdirpath)
self.stack.value.addRemote(manor_yard)
self.remote_loader.value = salt.daemons.masterapi.RemoteFuncs(
self.opts.value)
self.local_loader.value = salt.daemons.masterapi.LocalFuncs(
self.opts.value,
self.access_keys.value)
init = {}
init['route'] = {
'src': (None, self.stack.value.local.name, None),
'dst': (None, manor_yard.name, 'worker_req')
}
self.stack.value.transmit(init, self.stack.value.fetchUidByName(manor_yard.name))
self.stack.value.serviceAll()
def __del__(self):
self.stack.server.close()
class SaltRaetWorkerRouter(ioflo.base.deeding.Deed):
'''
FloScript:
do salt raet worker router
'''
Ioinits = {
'lane_stack': '.salt.lane.manor.stack',
'road_stack': '.salt.road.manor.stack',
'opts': '.salt.opts',
'worker_verify': '.salt.var.worker_verify',
'remote_loader': '.salt.loader.remote',
'local_loader': '.salt.loader.local',
}
def action(self):
'''
Read in a command and execute it, send the return back up to the
main master process
'''
self.lane_stack.value.serviceAll()
while self.lane_stack.value.rxMsgs:
msg, sender = self.lane_stack.value.rxMsgs.popleft()
try:
s_estate, s_yard, s_share = msg['route']['src']
d_estate, d_yard, d_share = msg['route']['dst']
except (ValueError, IndexError):
log.error('Received invalid message: {0}'.format(msg))
return
log.debug("**** Worker Router rxMsg\n msg= {0}\n".format(msg))
if 'load' in msg:
cmd = msg['load'].get('cmd')
if not cmd:
continue
elif cmd.startswith('__'):
continue
ret = {}
if d_share == 'remote_cmd':
if hasattr(self.remote_loader.value, cmd):
ret['return'] = getattr(self.remote_loader.value, cmd)(msg['load'])
elif d_share == 'local_cmd':
if hasattr(self.local_loader.value, cmd):
ret['return'] = getattr(self.local_loader.value, cmd)(msg['load'])
else:
ret = {'error': 'Invalid request'}
if cmd == 'publish' and 'pub' in ret.get('return', {}):
r_share = 'pub_ret'
ret['__worker_verify'] = self.worker_verify.value
else:
r_share = s_share
if cmd not in INHIBIT_RETURN:
ret['route'] = {
'src': (None, self.lane_stack.value.local.name, None),
'dst': (s_estate, s_yard, r_share)
}
self.lane_stack.value.transmit(ret,
self.lane_stack.value.fetchUidByName('manor'))
self.lane_stack.value.serviceAll()
|
Python
| 0.000063
|
@@ -599,16 +599,21 @@
RETURN =
+ %5B%5D #
%5B'_retu
|
bad310a283d4d459464a2aff670fd596f5716fef
|
Use the daliuge-pbc PBC implementation on demand
|
sip/execution_control/processing_controller/scheduler/scheduler.py
|
sip/execution_control/processing_controller/scheduler/scheduler.py
|
# coding=utf-8
"""Processing Block Scheduler.
Implemented with a set of long running threads.
"""
import datetime
import os
import sys
import time
from threading import Thread, active_count
import celery
from celery.app.control import Inspect
from sip_config_db.scheduling import ProcessingBlock, ProcessingBlockList
from sip_config_db.utils.datetime_utils import datetime_from_isoformat
from .log import LOG
from .pb_queue import ProcessingBlockQueue
from .release import __service_name__
BROKER = os.getenv('CELERY_BROKER_URL', 'redis://localhost:6379/1')
BACKEND = os.getenv('CELERY_RESULT_BACKEND', 'redis://localhost:6379/2')
APP = celery.Celery(broker=BROKER, backend=BACKEND)
execution_task_name = 'sip_pbc.tasks.execute_processing_block'
class ProcessingBlockScheduler:
# pylint: disable=too-few-public-methods
"""Processing Block Scheduler class."""
def __init__(self, report_interval: float = 5.0, max_pbcs: int = 4):
"""Initialise the Scheduler.
Args:
report_interval (float): Minimum interval between reports, in s
max_pbcs (int): Maximum number of concurrent PBCs
(and therefore PBs) that can be running.
"""
LOG.info('Starting Processing Block Scheduler.')
self._queue = self._init_queue()
self._pb_events = ProcessingBlockList().subscribe(__service_name__)
self._report_interval = report_interval
self._num_pbcs = 0 # Current number of PBCs
self._max_pbcs = max_pbcs
self._pb_list = ProcessingBlockList()
@staticmethod
def _init_queue():
"""Initialise the Processing Block queue from the database.
This method should populate the queue from the current state of the
Configuration Database.
This needs to be based on the current set of Processing Blocks in
the database and consider events on these processing blocks.
"""
LOG.info('Initialising Processing Block queue.')
queue = ProcessingBlockQueue()
active_pb_ids = ProcessingBlockList().active
LOG.info('Initialising PC PB queue: %s', active_pb_ids)
for pb_id in active_pb_ids:
pb = ProcessingBlock(pb_id)
queue.put(pb.id, pb.priority, pb.type)
return queue
def queue(self):
"""Return the processing block queue."""
return self._queue
def _monitor_events(self):
"""Watch for Processing Block events."""
LOG.info("Starting to monitor PB events")
check_counter = 0
while True:
if check_counter == 50:
check_counter = 0
LOG.debug('Checking for PB events...')
published_events = self._pb_events.get_published_events()
for event in published_events:
if event.type == 'status_changed':
LOG.info('PB status changed event: %s',
event.data['status'])
if event.data['status'] == 'created':
LOG.info('Acknowledged PB created event (%s) for %s, '
'[timestamp: %s]', event.id,
event.object_id, event.timestamp)
pb = ProcessingBlock(event.object_id)
self._queue.put(event.object_id, pb.priority, pb.type)
if event.data['status'] == 'completed':
LOG.info('Acknowledged PB completed event (%s) for %s,'
' [timestamp: %s]', event.id,
event.object_id, event.timestamp)
self._num_pbcs -= 1
if self._num_pbcs < 0:
self._num_pbcs = 0
time.sleep(0.1)
check_counter += 1
def _processing_controller_status(self):
"""Report on the status of the Processing Block queue(s)."""
LOG.info('Starting Processing Block queue reporter.')
while True:
LOG.info('PB queue length = %d', len(self._queue))
time.sleep(self._report_interval)
if active_count() != 5:
LOG.critical('Processing Controller not running '
'correctly! (%d/%d threads active)',
active_count(), 5)
def _schedule_processing_blocks(self):
"""Schedule Processing Blocks for execution."""
LOG.info('Starting to Schedule Processing Blocks.')
while True:
time.sleep(0.5)
if not self._queue:
continue
if self._num_pbcs >= self._max_pbcs:
LOG.warning('Resource limit reached!')
continue
_inspect = Inspect(app=APP)
if self._queue and _inspect.active() is not None:
next_pb = self._queue[-1]
LOG.info('Considering %s for execution...', next_pb[2])
utc_now = datetime.datetime.utcnow()
time_in_queue = (utc_now -
datetime_from_isoformat(next_pb[4]))
if time_in_queue.total_seconds() >= 10:
item = self._queue.get()
LOG.info('------------------------------------')
LOG.info('>>> Executing %s! <<<', item)
LOG.info('------------------------------------')
APP.send_task(execution_task_name, args=(item,))
self._num_pbcs += 1
else:
LOG.info('Waiting for resources for %s', next_pb[2])
def _monitor_pbc_status(self):
"""Monitor the PBC status."""
LOG.info('Starting to Monitor PBC status.')
inspect = APP.control.inspect()
workers = inspect.ping()
start_time = time.time()
while workers is None:
time.sleep(0.1)
elapsed = time.time() - start_time
if elapsed > 20.0:
LOG.warning('PBC not found!')
break
if workers is not None:
for worker in workers:
_tasks = inspect.registered_tasks()[worker]
LOG.info('Worker: %s tasks:', worker)
for task_index, task_name in enumerate(_tasks):
LOG.info(' %02d : %s', task_index, task_name)
while True:
LOG.info('Checking PBC status (%d/%d)', self._num_pbcs,
self._max_pbcs)
inspect = APP.control.inspect()
workers = inspect.ping()
if workers is None:
LOG.warning('PBC service not found!')
else:
LOG.info('PBC state: %s', APP.events.State())
_active = inspect.active()
_scheduled = inspect.scheduled()
for worker in workers:
LOG.info(' Worker %s: scheduled: %s, active: %s',
worker, _active[worker], _scheduled[worker])
time.sleep(self._report_interval)
def start(self):
"""Start the scheduler threads."""
# TODO(BMo) having this check is probably a good idea but I've \
# disabled it for now while the PBC is in flux.
# assert sip_pbc.release.__version__ == '1.2.3'
scheduler_threads = [
Thread(target=self._monitor_events, daemon=True),
Thread(target=self._processing_controller_status, daemon=True),
Thread(target=self._schedule_processing_blocks, daemon=True),
Thread(target=self._monitor_pbc_status, daemon=True)
]
for thread in scheduler_threads:
thread.start()
try:
for thread in scheduler_threads:
thread.join()
except KeyboardInterrupt:
LOG.info('Keyboard interrupt!')
sys.exit(0)
finally:
LOG.info('Finally!')
|
Python
| 0
|
@@ -745,16 +745,114 @@
g_block'
+%0Aif os.getenv('USE_DLG', None):%0A execution_task_name = 'dlg_pbc.tasks.execute_processing_block'
%0A%0Aclass
|
2a5b373b7efc7a6b1bf9fb93ad348cadae82ab56
|
Add test for min_sup
|
tests/test_evaluation/test_TopListEvaluator.py
|
tests/test_evaluation/test_TopListEvaluator.py
|
from unittest.mock import patch
from nose.tools import raises, assert_equal
from numpy.testing import assert_almost_equal
import numpy as np
from otdet.evaluation import TopListEvaluator
class TestInit:
def setUp(self):
self.sample_result = [
[(5, True), (4, False), (3, True), (2, False), (1, False)],
[(5, False), (4, True), (3, False), (2, True), (1, False)]
]
@patch.object(TopListEvaluator, '_get_nums')
def test_default(self, mock_get_nums):
mock_get_nums.return_value = 5, 2
evaluator = TopListEvaluator(self.sample_result, N=3)
assert_equal(evaluator.result, self.sample_result)
assert_equal(evaluator.N, 3)
assert_equal(evaluator._M, 5)
assert_equal(evaluator._n, 2)
@raises(Exception)
def test_pick_negative(self):
TopListEvaluator(self.sample_result, N=-1)
class TestGetNums:
def test_default(self):
sample_result = [
[(5, True), (4, False), (3, True), (2, False), (1, False)],
[(5, False), (4, True), (3, False), (2, True), (1, False)]
]
evaluator = TopListEvaluator(sample_result)
assert_equal(evaluator._get_nums(), (5, 2))
@raises(Exception)
def test_num_oot_mismatch(self):
sample_result = [
[(5, True), (4, True), (3, True), (2, False), (1, False)],
[(5, False), (4, True), (3, False), (2, True), (1, False)]
]
TopListEvaluator(sample_result)
@raises(Exception)
def test_num_post_mismatch(self):
sample_result = [
[(5, True), (4, True), (3, True), (2, False)],
[(5, False), (4, True), (3, False), (2, True), (1, False)]
]
TopListEvaluator(sample_result)
class TestBaseline:
def setUp(self):
sample_result = [
[(5, True), (4, False), (3, True), (2, False), (1, False)],
[(5, False), (4, True), (3, False), (2, True), (1, False)]
]
self.evaluator = TopListEvaluator(sample_result, N=3)
def test_normal_case(self):
expected = np.array([0.1, 0.6, 0.3]) # 0 <= k <= 2
assert_almost_equal(self.evaluator.baseline, expected)
def test_top_few_list(self):
self.evaluator.N = 1
expected = np.array([0.6, 0.4]) # 0 <= k <= 1
assert_almost_equal(self.evaluator.baseline, expected)
def test_top_many_list(self):
self.evaluator.N = 4
expected = np.array([0.4, 0.6]) # 1 <= k <= 2
assert_almost_equal(self.evaluator.baseline, expected)
class TestPerformance:
def setUp(self):
sample_result = [
[(5, True), (4, False), (3, False), (2, True), (1, False)],
[(5, True), (4, False), (3, True), (2, False), (1, False)],
[(5, False), (4, True), (3, True), (2, False), (1, False)],
[(5, False), (4, False), (3, False), (2, True), (1, True)]
]
self.evaluator = TopListEvaluator(sample_result, N=3)
def test_normal_case(self):
expected = np.array([0.25, 0.25, 0.50]) # 0 <= k <= 2
result = self.evaluator.performance
assert_almost_equal(result, expected)
|
Python
| 0
|
@@ -1760,24 +1760,518 @@
e_result)%0A%0A%0A
+class TestMinSup:%0A def setUp(self):%0A self.sample_result = %5B%0A %5B(5, True), (4, False), (3, True), (2, False), (1, False)%5D,%0A %5B(5, False), (4, True), (3, False), (2, True), (1, False)%5D%0A %5D%0A%0A def test_default(self):%0A evaluator = TopListEvaluator(self.sample_result, N=4)%0A assert_equal(evaluator.min_sup, 1)%0A%0A def test_default2(self):%0A evaluator = TopListEvaluator(self.sample_result, N=2)%0A assert_equal(evaluator.min_sup, 0)%0A%0A%0A
class TestBa
|
73873d47de67be9ab2b954c2a14c58fb3423fb3b
|
remove unused imports
|
txdarn/resources/support.py
|
txdarn/resources/support.py
|
import hashlib
import datetime
import functools
import pkgutil
from wsgiref.handlers import format_date_time
import eliot
from twisted.web import resource, template, http
from twisted.python.constants import Names, ValueConstant
from .. import encoding, compat
from . import headers
DEFAULT_CACHEABLE_POLICY = headers.CachePolicy(
cacheDirectives=(headers.PUBLIC,
headers.MAX_AGE(headers.ONE_YEAR)),
expiresOffset=headers.ONE_YEAR)
DEFAULT_UNCACHEABLE_POLICY = headers.CachePolicy(
cacheDirectives=(headers.NO_STORE,
headers.NO_CACHE(),
headers.MUST_REVALIDATE,
headers.MAX_AGE(0)),
expiresOffset=None)
class SlashIgnoringResource(resource.Resource):
def getChild(self, name, request):
if not (name or request.postpath):
return self
return resource.Resource.getChild(self, name, request)
class PolicyApplyingResource(resource.Resource):
def __init__(self, policies):
self.policies = policies
def applyPolicies(self, request):
for policy in self.policies:
request = policy.apply(request)
return request
class Greeting(SlashIgnoringResource):
@encoding.contentType(b'text/plain')
def render_GET(self, request):
return b'Welcome to SockJS!\n'
class IFrameElement(template.Element):
loader = template.XMLString(pkgutil.get_data('txdarn',
'content/iframe.xml'))
def __init__(self, sockJSURL):
self.sockJSURL = sockJSURL
@template.renderer
def sockjsLocation(self, request, tag):
tag.attributes[b'src'] = self.sockJSURL
return tag(b'')
# we have to manually insert these two attributes because
# twisted.template (predictably) does not maintain attribute
# order. unfortunately, the official sockjs-protocol test does a
# simple regex match against this page and so expects these to be
# a specific order. tag.attributes is an OrderedDict, so exploit
# that here to enforce attribute ordering.
@template.renderer
def xUACompatible(self, request, tag):
tag.attributes[b'http-equiv'] = b'X-UA-Compatible'
tag.attributes[b'content'] = b'IE=edge'
return tag()
@template.renderer
def contentType(self, request, tag):
tag.attributes[b'http-equiv'] = b'Content-Type'
tag.attributes[b'content'] = b'text/html; charset=UTF-8'
return tag()
class IFrameResource(PolicyApplyingResource):
iframe = None
etag = None
doctype = b'<!DOCTYPE html>'
def __init__(self,
sockJSURL,
policies=(DEFAULT_CACHEABLE_POLICY,
headers.AccessControlPolicy(methods=(b'GET',
b'OPTIONS'),
maxAge=2000000)),
_render=functools.partial(template.flattenString,
request=None)):
PolicyApplyingResource.__init__(self, policies)
self.element = IFrameElement(sockJSURL)
renderingDeferred = _render(root=self.element)
def _cbSetTemplate(iframe):
self.iframe = b'\n'.join([self.doctype, iframe])
renderingDeferred.addCallback(_cbSetTemplate)
renderingDeferred.addErrback(eliot.writeFailure)
if not self.iframe:
raise RuntimeError("Could not render iframe!")
hashed = hashlib.sha256(self.iframe).hexdigest()
self.etag = compat.networkString(hashed)
@encoding.contentType(b'text/html')
def render_GET(self, request):
if request.setETag(self.etag) is http.CACHED:
return b''
request = self.applyPolicies(request)
return self.iframe
class InfoResource(PolicyApplyingResource, SlashIgnoringResource):
def __init__(self,
policies=(DEFAULT_CACHEABLE_POLICY,
headers.AccessControlPolicy(methods=(b'GET',
b'OPTIONS'),
maxAge=2000000)),
_render=compat.asJSON,
_now=datetime.datetime.utcnow):
PolicyApplyingResource.__init__(self, policies)
self._render = _render
self._now = _now
@encoding.contentType(b'application/json')
def render_GET(self, request):
self._render({})
|
Python
| 0.000001
|
@@ -59,54 +59,8 @@
util
-%0Afrom wsgiref.handlers import format_date_time
%0A%0Aim
@@ -122,66 +122,8 @@
http
-%0Afrom twisted.python.constants import Names, ValueConstant
%0A%0Afr
|
a6cd85878c024e4248ca7b4135c7bf1199600772
|
Remove derp.
|
WebMirror/management/SpcnetTvManage.py
|
WebMirror/management/SpcnetTvManage.py
|
import calendar
import datetime
import json
import os
import os.path
import shutil
import traceback
from concurrent.futures import ThreadPoolExecutor
import urllib.error
import urllib.parse
from sqlalchemy import and_
from sqlalchemy import or_
import sqlalchemy.exc
from sqlalchemy_continuum.utils import version_table
if __name__ == "__main__":
import logSetup
logSetup.initLogging()
import common.database as db
import common.Exceptions
import common.management.file_cleanup
import Misc.HistoryAggregator.Consolidate
import flags
import pprint
import config
from config import C_RAW_RESOURCE_DIR
import WebMirror.TimedTriggers.QueueTriggers
import WebMirror.OutputFilters.rss.FeedDataParser
def delete_internal(sess, ids):
if ids:
print("Updating for netloc(s) %s. %s rows requiring update." % (netloc, len(ids)))
else:
print("No rows needing retriggering for netloc %s." % (netloc))
ctbl = version_table(db.WebPages)
chunk_size = 5000
for chunk_idx in range(0, len(ids), chunk_size):
chunk = ids[chunk_idx:chunk_idx+chunk_size]
while 1:
try:
# Allow ids that only exist in the history table by falling back to a
# history-table query if the main table doesn't have the ID.
try:
ex = sess.query(db.WebPages.url).filter(db.WebPages.id == chunk[0]).one()[0]
except sqlalchemy.orm.exc.NoResultFound:
ex = sess.query(ctbl.c.url).filter(ctbl.c.id == chunk[0]).all()[0][0]
print("Example removed URL: '%s'" % (ex))
q1 = sess.query(db.WebPages).filter(db.WebPages.id.in_(chunk))
affected_rows_main = q1.delete(synchronize_session=False)
q2 = sess.query(ctbl).filter(ctbl.c.id.in_(chunk))
affected_rows_ver = q2.delete(synchronize_session=False)
sess.commit()
print("Deleted %s rows (%s version table rows). %0.2f%% done." %
(affected_rows_main, affected_rows_ver, 100 * ((chunk_idx) / len(ids))))
break
except sqlalchemy.exc.InternalError:
print("Transaction error (sqlalchemy.exc.InternalError). Retrying.")
sess.rollback()
except sqlalchemy.exc.OperationalError:
print("Transaction error (sqlalchemy.exc.OperationalError). Retrying.")
sess.rollback()
except sqlalchemy.exc.IntegrityError:
print("Transaction error (sqlalchemy.exc.IntegrityError). Retrying.")
sess.rollback()
except sqlalchemy.exc.InvalidRequestError:
print("Transaction error (sqlalchemy.exc.InvalidRequestError). Retrying.")
traceback.print_exc()
sess.rollback()
def exposed_delete_spcnet_invalid_url_pages():
'''
So the spcnet.tv forum software generates THOUSANDS of garbage links somehow.
Anyways, delete those.
'''
sess = db.get_db_session()
tables = [
db.WebPages.__table__,
version_table(db.WebPages)
]
for ctbl in tables:
# Print Querying for affected rows
q = sess.query(ctbl) \
.where(ctbl.c.netloc == "www.spcnet.tv") \
.where(ctbl.c.content.like('%<div class="blockrow restore">Invalid Forum specified. If you followed a valid link, please notify the <a href="/contact/index.php">administrator</a>%'))
ids = q.all()
ids = set(ids)
# Returned list of IDs is each ID packed into a 1-tuple. Unwrap those tuples so it's just a list of integer IDs.
ids = [tmp[0] for tmp in ids]
print("Fount %s rows requring deletion. Deleting." % len(update))
delete_internal(sess, ids)
sess.commit()
|
Python
| 0.000001
|
@@ -2811,24 +2811,29 @@
ery(ctbl
+.c.id
) %5C%0A%09%09%09.
where(ct
@@ -2816,37 +2816,38 @@
tbl.c.id) %5C%0A%09%09%09.
-wh
+filt
er
-e
(ctbl.c.netloc =
@@ -2875,13 +2875,14 @@
%09%09%09.
-wh
+filt
er
-e
(ctb
@@ -3054,16 +3054,45 @@
/a%3E%25'))%0A
+%09%09print(%22Query:%22)%0A%09%09print(q)%0A
%09%09ids =
|
e83edfa7cd561e8fb4c700ff3cf18a684213dc62
|
Allow removal of q:lines
|
txircd/modules/cmd_qline.py
|
txircd/modules/cmd_qline.py
|
from twisted.words.protocols import irc
from txircd.modbase import Command
from txircd.utils import epoch, now, irc_lower, parse_duration, CaseInsensitiveDictionary, VALID_NICKNAME
from fnmatch import fnmatch
irc.RPL_STATSQLINE = "217"
class QlineCommand(Command):
def __init__(self):
self.banList = CaseInsensitiveDictionary()
def onUse(self, user, data):
mask = data["mask"]
if "reason" in data:
self.banList[mask] = {
"setter": user.nickname,
"created": epoch(now()),
"duration": data["duration"],
"reason": data["reason"]
}
if "*" not in mask and "?" not in mask:
if mask in self.ircd.users:
self.remove_user(self.ircd.users[mask], data["reason"])
else:
now_banned = {}
for uid, user in self.ircd.users.iteritems():
reason = self.match_qline(user)
if reason:
now_banned[uid] = reason
for uid, reason in now_banned.iteritems():
self.remove_user(self.ircd.users[uid], reason)
def processParams(self, user, params):
if user.registered > 0:
user.sendMessage(irc.ERR_NOTYETREGISTERED, "QLINE", ":You have not registered")
return {}
if "o" not in user.mode:
user.sendMessage(irc.ERR_NOPRIVILEGES, ":Permission denied - You do not have the correct operator privileges")
return {}
if not params:
user.sendMessage(irc.ERR_NEEDMOREPARAMS, "QLINE", ":Not enough parameters")
return {}
self.expire_qlines()
if len(params) < 3 or not params[2]:
if params[0] not in self.banList:
user.sendMessage("NOTICE", ":*** There is not a q:line set on {}; check /stats Q for a list of existing q:lines".format(params[0]))
return {}
return {
"user": user,
"mask": params[0]
}
if params[0] in self.banList:
user.sendMessage("NOTICE", ":*** Q:line already exists for {}; check /stats Q for a list of existing q:lines".format(params[0]))
return {}
if not params[0].replace("*", ""):
user.sendMessage("NOTICE", ":*** That q:line will match all nicks! Please check your nick mask and try again.")
return {}
if not VALID_NICKNAME.match(params[0].replace("*", "").replace("?", "a")):
user.sendMessage("NOTICE", ":*** That isn't a valid nick mask and won't match any nicks. Please check your nick mask and try again.")
return {}
return {
"user": user,
"mask": params[0],
"duration": parse_duration(params[1]),
"reason": " ".join(params[2:])
}
def remove_user(self, user, reason):
quit_to = set()
leavingChans = user.channels.keys()
for chan in leavingChans:
cdata = self.ircd.channels[chan]
user.leave(cdata)
for u in cdata.users:
quit_to.add(u)
for u in quit_to:
u.sendMessage("QUIT", ":Q:Lined: {}".format(reason), to=None, prefix=user.prefix())
user.sendMessage("ERROR", ":Closing Link {} [Q:Lined: {}]".format(user.prefix(), data["reason"]), to=None, prefix=None)
del self.ircd.users[user.nickname]
user.socket.transport.loseConnection()
def statsList(self, cmd, data):
if cmd != "STATS":
return
if data["statstype"] != "Q":
return
udata = data["user"]
self.expire_qlines()
for mask, linedata in self.banList.iteritems():
udata.sendMessage(irc.RPL_STATSQLINE, ":{} {} {} {} :{}".format(mask, linedata["created"], linedata["duration"], linedata["setter"], linedata["reason"]))
def check_register(self, user):
self.expire_qlines()
reason = self.match_qline(user)
if not reason:
return True
user.sendMessage("NOTICE", ":{}".format(self.ircd.servconfig["client_ban_msg"]))
user.sendMessage("ERROR", ":Closing Link: {} [Q:Lined: {}]".format(user.hostname, reason))
def match_qline(self, user):
if "o" in user.mode:
return None
lowerNick = irc_lower(user.nickname)
for mask, linedata in self.banList.iteritems():
if fnmatch(lowerNick, mask):
return linedata["reason"]
return None
def expire_qlines(self):
current_time = epoch(now())
expired = []
for mask, linedata in self.banList.iteritems():
if linedata["duration"] and current_time > linedata["created"] + linedata["duration"]:
expired.append(mask)
for mask in expired:
del self.banList[mask]
def blockNick(self, user, command, data):
if command != "NICK":
return data
newNick = data["nick"]
lowerNick = irc_lower(newNick)
self.expire_qlines()
for mask, linedata in self.banList.iteritems():
if fnmatch(lowerNick, mask):
user.sendMessage(irc.ERR_ERRONEUSNICKNAME, newNick, ":Invalid nickname: {}".format(linedata["reason"]))
return {}
return data
class Spawner(object):
def __init__(self, ircd):
self.ircd = ircd
self.qlineCmd = None
def spawn(self):
self.qlineCmd = QlineCommand()
return {
"commands": {
"QLINE": self.qlineCmd
},
"actions": {
"commandextra": [self.qlineCmd.statsList],
"register": [self.qlineCmd.check_register],
"commandpermission": [self.qlineCmd.blockNick]
}
}
def cleanup(self):
del self.ircd.commands["QLINE"]
self.ircd.actions["commandextra"].remove(self.qlineCmd.statsList)
self.ircd.actions["register"].remove(self.qlineCmd.check_register)
self.ircd.actions["commandpermission"].remove(self.qlineCmd.blockNick)
|
Python
| 0.000021
|
@@ -953,16 +953,50 @@
reason)%0A
+%09%09else:%0A%09%09%09del self.banList%5Bmask%5D%0A
%09%0A%09def p
|
747013feb65b7c9621234a4f2c808b00f0e6787f
|
Fix config passing when master pillar is turned off
|
salt/transport/__init__.py
|
salt/transport/__init__.py
|
# -*- coding: utf-8 -*-
'''
Encapsulate the different transports available to Salt. Currently this is only ZeroMQ.
'''
import salt.payload
import salt.auth
class Channel(object):
@staticmethod
def factory(opts, **kwargs):
# Default to ZeroMQ for now
ttype = 'zeromq'
if 'transport_type' in opts:
ttype = opts['transport_type']
elif 'transport_type' in opts['pillar']['master']:
ttype = opts['pillar']['master']['transport_type']
if ttype == 'zeromq':
return ZeroMQChannel(opts, **kwargs)
else:
raise Exception("Channels are only defined for ZeroMQ")
# return NewKindOfChannel(opts, **kwargs)
class ZeroMQChannel(Channel):
'''
Encapsulate sending routines to ZeroMQ.
ZMQ Channels default to 'crypt=aes'
'''
def __init__(self, opts, **kwargs):
self.opts = opts
# crypt defaults to 'aes'
self.crypt = kwargs['crypt'] if 'crypt' in kwargs else 'aes'
self.serial = salt.payload.Serial(opts)
if self.crypt != 'clear':
self.auth = salt.crypt.SAuth(opts)
if 'master_uri' in kwargs:
master_uri = kwargs['master_uri']
else:
master_uri = opts['master_uri']
self.sreq = salt.payload.SREQ(master_uri)
def crypted_transfer_decode_dictentry(self, load, dictkey=None, tries=3, timeout=60):
ret = self.sreq.send('aes', self.auth.crypticle.dumps(load), tries, timeout)
key = self.auth.get_keys()
aes = key.private_decrypt(ret['key'], 4)
pcrypt = salt.crypt.Crypticle(self.opts, aes)
return pcrypt.loads(ret[dictkey])
def _crypted_transfer(self, load, tries=3, timeout=60):
'''
In case of authentication errors, try to renegotiate authentication
and retry the method.
Indeed, we can fail too early in case of a master restart during a
minion state execution call
'''
def _do_transfer():
return self.auth.crypticle.loads(
self.sreq.send(self.crypt,
self.auth.crypticle.dumps(load),
tries,
timeout)
)
try:
return _do_transfer()
except salt.crypt.AuthenticationError:
self.auth = salt.crypt.SAuth(self.opts)
return _do_transfer()
def _uncrypted_transfer(self, load, tries=3, timeout=60):
return self.sreq.send(self.crypt, load, tries, timeout)
def send(self, load, tries=3, timeout=60):
if self.crypt != 'clear':
return self._crypted_transfer(load, tries, timeout)
else:
return self._uncrypted_transfer(load, tries, timeout)
# Do we ever do non-crypted transfers?
|
Python
| 0
|
@@ -404,33 +404,37 @@
ype' in opts
-%5B
+.get(
'pillar'
%5D%5B'master'%5D:
@@ -421,26 +421,34 @@
'pillar'
-%5D%5B
+, %7B%7D).get(
'master'
%5D:%0A
@@ -439,17 +439,21 @@
'master'
-%5D
+, %7B%7D)
:%0A
|
2159a35811cac75b0c68677fc41443aa8eac6e5b
|
Stop conn_join from overriding channel restrictions
|
txircd/modules/conn_join.py
|
txircd/modules/conn_join.py
|
from txircd.channel import IRCChannel
from txircd.modbase import Module
class Autojoin(Module):
def joinOnConnect(self, user):
if "client_join_on_connect" in self.ircd.servconfig:
for channel in self.ircd.servconfig["client_join_on_connect"]:
user.join(self.ircd.channels[channel] if channel in self.ircd.channels else IRCChannel(self.ircd, channel))
return True
class Spawner(object):
def __init__(self, ircd):
self.ircd = ircd
self.conn_join = None
def spawn(self):
self.conn_join = Autojoin().hook(self.ircd)
return {
"actions": {
"welcome": self.conn_join.joinOnConnect
}
}
|
Python
| 0
|
@@ -286,109 +286,45 @@
ser.
-join(self.ircd.channels%5Bchannel%5D if channel in self.ircd.channels else IRCChannel(self.ircd
+handleCommand(%22JOIN%22, None
,
+%5B
channel
-)
+%5D
)%0A
|
493e48ddb52889fcb282b7747e0f5a9c2b541005
|
Remove internal variables/properties with the reserved words
|
salt/utils/asynchronous.py
|
salt/utils/asynchronous.py
|
# -*- coding: utf-8 -*-
'''
Helpers/utils for working with tornado async stuff
'''
from __future__ import absolute_import, print_function, unicode_literals
import tornado.ioloop
import tornado.concurrent
import contextlib
from salt.utils import zeromq
@contextlib.contextmanager
def current_ioloop(io_loop):
'''
A context manager that will set the current ioloop to io_loop for the context
'''
orig_loop = tornado.ioloop.IOLoop.current()
io_loop.make_current()
try:
yield
finally:
orig_loop.make_current()
class SyncWrapper(object):
'''
A wrapper to make Async classes synchronous
This is uses as a simple wrapper, for example:
async = AsyncClass()
# this method would reguarly return a future
future = async.async_method()
sync = SyncWrapper(async_factory_method, (arg1, arg2), {'kwarg1': 'val'})
# the sync wrapper will automatically wait on the future
ret = sync.async_method()
'''
def __init__(self, method, args=tuple(), kwargs=None):
if kwargs is None:
kwargs = {}
self.io_loop = zeromq.ZMQDefaultLoop()
kwargs['io_loop'] = self.io_loop
with current_ioloop(self.io_loop):
self.async = method(*args, **kwargs)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError as ex:
if key == 'async':
raise ex
attr = getattr(self.async, key)
if hasattr(attr, '__call__'):
def wrap(*args, **kwargs):
# Overload the ioloop for the func call-- since it might call .current()
with current_ioloop(self.io_loop):
ret = attr(*args, **kwargs)
if isinstance(ret, tornado.concurrent.Future):
ret = self._block_future(ret)
return ret
return wrap
else:
return attr
def _block_future(self, future):
self.io_loop.add_future(future, lambda future: self.io_loop.stop())
self.io_loop.start()
return future.result()
def __del__(self):
'''
On deletion of the async wrapper, make sure to clean up the async stuff
'''
if hasattr(self, 'async'):
if hasattr(self.async, 'close'):
# Certain things such as streams should be closed before
# their associated io_loop is closed to allow for proper
# cleanup.
self.async.close()
del self.async
self.io_loop.close()
del self.io_loop
elif hasattr(self, 'io_loop'):
self.io_loop.close()
del self.io_loop
|
Python
| 0.000001
|
@@ -61,24 +61,31 @@
ornado async
+hronous
stuff%0A'''%0A%0A
@@ -701,16 +701,23 @@
async
+hronous
= Async
@@ -787,24 +787,31 @@
ture = async
+hronous
.async_metho
@@ -1257,16 +1257,23 @@
lf.async
+hronous
= metho
@@ -1461,16 +1461,23 @@
= 'async
+hronous
':%0A
@@ -1529,16 +1529,23 @@
lf.async
+hronous
, key)%0A
@@ -2261,24 +2261,31 @@
of the async
+hronous
wrapper, ma
@@ -2313,16 +2313,23 @@
he async
+hronous
stuff%0A
@@ -2370,16 +2370,23 @@
, 'async
+hronous
'):%0A
@@ -2414,16 +2414,23 @@
lf.async
+hronous
, 'close
@@ -2632,16 +2632,23 @@
lf.async
+hronous
.close()
@@ -2674,16 +2674,23 @@
lf.async
+hronous
%0A
|
1891946c1d736abb103cff9af561356e551bbd6c
|
Make hello-world-bot write to a logfile different from the logger bot.
|
samples/hello-world-bot.py
|
samples/hello-world-bot.py
|
# Copyright (c) 2013 Alan McIntyre
import decimal
import time
import btceapi
import btcebot
class RangeTrader(btcebot.TraderBase):
'''
This is a simple trader that handles a single currency pair, selling
all available inventory if price is above sell_price, buying with
all available funds if price is below buy_price. Use for actual trading
at your own risk (and remember this is just a sample, not a recommendation
on how to make money trading using this framework).
'''
def __init__(self, api, pair, buy_price, sell_price, live_trades = False):
btcebot.TraderBase.__init__(self, (pair,))
self.api = api
self.pair = pair
self.buy_price = buy_price
self.sell_price = sell_price
self.live_trades = live_trades
self.current_lowest_ask = None
self.current_highest_bid = None
# Apparently the API adds the fees to the amount you submit,
# so dial back the order just enough to make up for the
# 0.2% trade fee.
self.fee_adjustment = decimal.Decimal("0.998")
def _attemptBuy(self, price, amount):
info = self.api.getInfo()
curr1, curr2 = self.pair.split("_")
# Limit order to what we can afford to buy.
available = getattr(info, "balance_" + curr2)
max_buy = available / price
buy_amount = min(max_buy, amount) * self.fee_adjustment
if buy_amount >= btceapi.min_orders[self.pair]:
print "attempting to buy %s %s at %s for %s %s" % (buy_amount,
curr1.upper(), price, buy_amount*price, curr2.upper())
if self.live_trades:
r = self.api.trade(self.pair, "buy", price, buy_amount)
print "\tReceived %s %s" % (r.received, curr1.upper())
# If the order didn't fill completely, cancel the remaining order
if r.order_id != 0:
print "\tCanceling unfilled portion of order"
self.api.cancelOrder(r.order_id)
def _attemptSell(self, price, amount):
info = self.api.getInfo()
curr1, curr2 = self.pair.split("_")
# Limit order to what we have available to sell.
available = getattr(info, "balance_" + curr1)
sell_amount = min(available, amount) * self.fee_adjustment
if sell_amount >= btceapi.min_orders[self.pair]:
print "attempting to sell %s %s at %s for %s %s" % (sell_amount,
curr1.upper(), price, sell_amount*price, curr2.upper())
if self.live_trades:
r = self.api.trade(self.pair, "sell", price, sell_amount)
print "\tReceived %s %s" % (r.received, curr2.upper())
# If the order didn't fill completely, cancel the remaining order
if r.order_id != 0:
print "\tCanceling unfilled portion of order"
self.api.cancelOrder(r.order_id)
# This overrides the onNewDepth method in the TraderBase class, so the
# framework will automatically pick it up and send updates to it.
def onNewDepth(self, t, pair, asks, bids):
ask_price, ask_amount = asks[0]
bid_price, bid_amount = bids[0]
if ask_price <= self.buy_price:
self._attemptBuy(ask_price, ask_amount)
elif bid_price >= self.sell_price:
self._attemptSell(bid_price, bid_amount)
def onBotError(msg, tracebackText):
tstr = time.strftime("%Y/%m/%d %H:%M:%S")
print "%s - %s" % (tstr, msg)
open("logger-bot-error.log", "a").write(
"%s - %s\n%s\n%s\n" % (tstr, msg, tracebackText, "-"*80))
def run(key_file, buy_floor, sell_ceiling, live_trades):
# Load the keys and create an API object from the first one.
handler = btceapi.KeyHandler(key_file, resaveOnDeletion=True)
key = handler.getKeys()[0]
print "Trading with key %s" % key
api = btceapi.TradeAPI(key, handler=handler)
# Create a trader that handles LTC/USD trades in the given range.
trader = RangeTrader(api, "ltc_usd", buy_floor, sell_ceiling, live_trades)
# Create a bot and add the trader to it.
bot = btcebot.Bot()
bot.addTrader(trader)
# Add an error handler so we can print info about any failures
bot.addErrorHandler(onBotError)
# The bot will provide the traders with updated information every
# 15 seconds.
bot.setCollectionInterval(15)
bot.start()
print "Running; press Ctrl-C to stop"
try:
while 1:
# you can do anything else you prefer in this loop while
# the bot is running in the background
time.sleep(3600)
except KeyboardInterrupt:
print "Stopping..."
finally:
bot.stop()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Simple range trader example.')
parser.add_argument('key_file',
help='Path to a file containing key/secret/nonce data.')
parser.add_argument('buy_floor', type=decimal.Decimal,
help='Price at or below which we will buy.')
parser.add_argument('sell_ceiling', type=decimal.Decimal,
help='Price at or above which we will sell.')
parser.add_argument('--live-trades', default=False, action="store_true",
help='Actually make trades.')
args = parser.parse_args()
run(args.key_file, args.buy_floor, args.sell_ceiling, args.live_trades)
|
Python
| 0.000004
|
@@ -3607,14 +3607,19 @@
en(%22
-logger
+hello-world
-bot
|
7d216557c039d482490ad518f4366d09b14af4c0
|
Convert log likelihood values from mnl_estimate to Python floats.
|
urbansim/urbanchoice/mnl.py
|
urbansim/urbanchoice/mnl.py
|
import numpy as np
import pandas as pd
import scipy.optimize
import pmat
from pmat import PMAT
# right now MNL can only estimate location choice models, where every equation
# is the same
# it might be better to use stats models for a non-location choice problem
# data should be column matrix of dimensions NUMVARS x (NUMALTS*NUMOBVS)
# beta is a row vector of dimensions 1 X NUMVARS
def mnl_probs(data, beta, numalts):
clamp = data.typ == 'numpy'
utilities = beta.multiply(data)
utilities.reshape(numalts, utilities.size() / numalts)
exponentiated_utility = utilities.exp(inplace=True)
if clamp:
exponentiated_utility.inftoval(1e20)
if clamp:
exponentiated_utility.clamptomin(1e-300)
sum_exponentiated_utility = exponentiated_utility.sum(axis=0)
probs = exponentiated_utility.divide_by_row(
sum_exponentiated_utility, inplace=True)
if clamp:
probs.nantoval(1e-300)
if clamp:
probs.clamptomin(1e-300)
return probs
def get_hessian(derivative):
return np.linalg.inv(np.dot(derivative, np.transpose(derivative)))
def get_standard_error(hessian):
return np.sqrt(np.diagonal(hessian))
# data should be column matrix of dimensions NUMVARS x (NUMALTS*NUMOBVS)
# beta is a row vector of dimensions 1 X NUMVARS
def mnl_loglik(beta, data, chosen, numalts, weights=None, lcgrad=False,
stderr=0):
numvars = beta.size
numobs = data.size() / numvars / numalts
beta = np.reshape(beta, (1, beta.size))
beta = PMAT(beta, data.typ)
probs = mnl_probs(data, beta, numalts)
# lcgrad is the special gradient for the latent class membership model
if lcgrad:
assert weights
gradmat = weights.subtract(probs).reshape(probs.size(), 1)
gradarr = data.multiply(gradmat)
else:
if not weights:
gradmat = chosen.subtract(probs).reshape(probs.size(), 1)
else:
gradmat = chosen.subtract(probs).multiply_by_row(
weights).reshape(probs.size(), 1)
gradarr = data.multiply(gradmat)
if stderr:
gradmat = data.multiply_by_row(gradmat.reshape(1, gradmat.size()))
gradmat.reshape(numvars, numalts * numobs)
return get_standard_error(get_hessian(gradmat.get_mat()))
chosen.reshape(numalts, numobs)
if weights is not None:
if probs.shape() == weights.shape():
loglik = ((probs.log(inplace=True)
.element_multiply(weights, inplace=True)
.element_multiply(chosen, inplace=True))
.sum(axis=1).sum(axis=0))
else:
loglik = ((probs.log(inplace=True)
.multiply_by_row(weights, inplace=True)
.element_multiply(chosen, inplace=True))
.sum(axis=1).sum(axis=0))
else:
loglik = (probs.log(inplace=True).element_multiply(
chosen, inplace=True)).sum(axis=1).sum(axis=0)
if loglik.typ == 'numpy':
loglik, gradarr = loglik.get_mat(), gradarr.get_mat().flatten()
else:
loglik = loglik.get_mat()[0, 0]
gradarr = np.reshape(gradarr.get_mat(), (1, gradarr.size()))[0]
return -1 * loglik, -1 * gradarr
def mnl_simulate(data, coeff, numalts, GPU=False, returnprobs=False):
atype = 'numpy' if not GPU else 'cuda'
data = np.transpose(data)
coeff = np.reshape(np.array(coeff), (1, len(coeff)))
data, coeff = PMAT(data, atype), PMAT(coeff, atype)
probs = mnl_probs(data, coeff, numalts)
if returnprobs:
return np.transpose(probs.get_mat())
# convert to cpu from here on - gpu doesn't currently support these ops
if probs.typ == 'cuda':
probs = PMAT(probs.get_mat())
probs = probs.cumsum(axis=0)
r = pmat.random(probs.size() / numalts)
choices = probs.subtract(r, inplace=True).firstpositive(axis=0)
return choices.get_mat()
def mnl_estimate(data, chosen, numalts, GPU=False, coeffrange=(-3, 3),
weights=None, lcgrad=False, beta=None):
"""
Parameters
----------
data
chosen
numalts
GPU : bool
coeffrange
weights
lcgrad : bool
beta
Returns
-------
log_likelihood : dict
Dictionary of log-likelihood values describing the quality of
the model fit.
fit_parameters : pandas.DataFrame
Table of fit parameters with columns 'Coefficient', 'Std. Error',
'T-Score'.
"""
atype = 'numpy' if not GPU else 'cuda'
numvars = data.shape[1]
numobs = data.shape[0] / numalts
if chosen is None:
chosen = np.ones((numobs, numalts)) # used for latent classes
data = np.transpose(data)
chosen = np.transpose(chosen)
data, chosen = PMAT(data, atype), PMAT(chosen, atype)
if weights is not None:
weights = PMAT(np.transpose(weights), atype)
if beta is None:
beta = np.zeros(numvars)
bounds = np.array([coeffrange for i in range(numvars)])
args = (data, chosen, numalts, weights, lcgrad)
bfgs_result = scipy.optimize.fmin_l_bfgs_b(mnl_loglik,
beta,
args=args,
fprime=None,
factr=1e5,
approx_grad=False,
bounds=bounds
)
beta = bfgs_result[0]
stderr = mnl_loglik(
beta, data, chosen, numalts, weights, stderr=1, lcgrad=lcgrad)
l0beta = np.zeros(numvars)
l0 = -1 * mnl_loglik(l0beta, *args)[0]
l1 = -1 * mnl_loglik(beta, *args)[0]
log_likelihood = {
'null': l0[0][0],
'convergence': l1[0][0],
'ratio': (1 - (l1 / l0))[0][0]
}
fit_parameters = pd.DataFrame({
'Coefficient': beta,
'Std. Error': stderr,
'T-Score': beta / stderr})
return log_likelihood, fit_parameters
|
Python
| 0.00001
|
@@ -5820,16 +5820,22 @@
l':
+float(
l0%5B0%5D%5B0%5D
,%0A
@@ -5826,24 +5826,25 @@
oat(l0%5B0%5D%5B0%5D
+)
,%0A 'c
@@ -5860,16 +5860,22 @@
e':
+float(
l1%5B0%5D%5B0%5D
,%0A
@@ -5870,16 +5870,17 @@
l1%5B0%5D%5B0%5D
+)
,%0A
@@ -5890,16 +5890,22 @@
ratio':
+float(
(1 - (l1
@@ -5917,16 +5917,17 @@
))%5B0%5D%5B0%5D
+)
%0A %7D%0A%0A
|
142538049bd1bf8ae92c80060435965104ec54bb
|
Add ability to use the pattern ((args, kwargs), callable) when specifying schema
|
scrapi/base/transformer.py
|
scrapi/base/transformer.py
|
from __future__ import unicode_literals
import abc
import logging
logger = logging.getLogger(__name__)
class BaseTransformer(object):
__metaclass__ = abc.ABCMeta
def transform(self, doc):
return self._transform(self.schema, doc)
def _transform(self, schema, doc):
transformed = {}
for key, value in schema.items():
if isinstance(value, dict):
transformed[key] = self._transform(value, doc)
elif isinstance(value, list) or isinstance(value, tuple):
transformed[key] = self._transform_iter(value, doc)
elif isinstance(value, basestring):
transformed[key] = self._transform_string(value, doc)
return transformed
def _transform_iter(self, l, doc):
docs = []
for value in l:
if isinstance(value, basestring):
docs.append(self._transform_string(value, doc))
elif callable(value):
return value(*[res for res in docs])
@abc.abstractmethod
def _transform_string(self, string, doc):
raise NotImplementedError
@abc.abstractproperty
def name(self):
raise NotImplementedError
@abc.abstractproperty
def schema(self):
raise NotImplementedError
class XMLTransformer(BaseTransformer):
__metaclass__ = abc.ABCMeta
def _transform_string(self, string, doc):
val = doc.xpath(string, namespaces=self.namespaces)
return '' if not val else unicode(val[0]) if len(val) == 1 else [unicode(v) for v in val]
@abc.abstractproperty
def namespaces(self):
raise NotImplementedError
|
Python
| 0.000015
|
@@ -795,16 +795,124 @@
cs = %5B%5D%0A
+%0A if isinstance(l%5B0%5D, tuple) and len(l) == 2:%0A return self._transform_arg_kwargs(l, doc)%0A%0A
@@ -1125,16 +1125,750 @@
docs%5D)%0A%0A
+ def _transform_arg_kwargs(self, l, doc):%0A if len(l%5B0%5D) == 1:%0A if isinstance(l%5B0%5D%5B0%5D, dict):%0A kwargs = l%5B0%5D%5B0%5D%0A args = %5B%5D%0A elif isinstance(l%5B0%5D%5B0%5D, tuple) or isinstance(l%5B0%5D%5B0%5D, list):%0A args = l%5B0%5D%5B0%5D%0A kwargs = %7B%7D%0A else:%0A raise ValueError(%22((args, kwargs), callable) pattern not matched, %7B%7D does not define args or kwargs correctly%22.format(l))%0A else:%0A args = l%5B0%5D%5B0%5D%0A kwargs = l%5B0%5D%5B1%5D%0A fn = l%5B1%5D%0A return fn(%0A *%5Bself._transform_string(arg, doc) for arg in args%5D,%0A **%7Bkey: self._transform_string(value, doc) for key, value in kwargs.items()%7D%0A )%0A%0A
@abc
|
213bcd782db44a1765196914c70b6283be2e9032
|
Add support for detection of gzipped packet traces
|
util/decode_packet_trace.py
|
util/decode_packet_trace.py
|
#!/usr/bin/env python
# Copyright (c) 2013-2014 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Hansson
# This script is used to dump protobuf packet traces to ASCII
# format. It assumes that protoc has been executed and already
# generated the Python package for the packet messages. This can
# be done manually using:
# protoc --python_out=. --proto_path=src/proto src/proto/packet.proto
#
# The ASCII trace format uses one line per request on the format cmd,
# addr, size, tick,flags. For example:
# r,128,64,4000,0
# w,232123,64,500000,0
import protolib
import sys
# Import the packet proto definitions. If they are not found, attempt
# to generate them automatically. This assumes that the script is
# executed from the gem5 root.
try:
import packet_pb2
except:
print "Did not find packet proto definitions, attempting to generate"
from subprocess import call
error = call(['protoc', '--python_out=util', '--proto_path=src/proto',
'src/proto/packet.proto'])
if not error:
print "Generated packet proto definitions"
try:
import google.protobuf
except:
print "Please install Python protobuf module"
exit(-1)
import packet_pb2
else:
print "Failed to import packet proto definitions"
exit(-1)
def main():
if len(sys.argv) != 3:
print "Usage: ", sys.argv[0], " <protobuf input> <ASCII output>"
exit(-1)
try:
proto_in = open(sys.argv[1], 'rb')
except IOError:
print "Failed to open ", sys.argv[1], " for reading"
exit(-1)
try:
ascii_out = open(sys.argv[2], 'w')
except IOError:
print "Failed to open ", sys.argv[2], " for writing"
exit(-1)
# Read the magic number in 4-byte Little Endian
magic_number = proto_in.read(4)
if magic_number != "gem5":
print "Unrecognized file"
exit(-1)
print "Parsing packet header"
# Add the packet header
header = packet_pb2.PacketHeader()
protolib.decodeMessage(proto_in, header)
print "Object id:", header.obj_id
print "Tick frequency:", header.tick_freq
print "Parsing packets"
num_packets = 0
packet = packet_pb2.Packet()
# Decode the packet messages until we hit the end of the file
while protolib.decodeMessage(proto_in, packet):
num_packets += 1
# ReadReq is 1 and WriteReq is 4 in src/mem/packet.hh Command enum
cmd = 'r' if packet.cmd == 1 else ('w' if packet.cmd == 4 else 'u')
if packet.HasField('pkt_id'):
ascii_out.write('%s,' % (packet.pkt_id))
if packet.HasField('flags'):
ascii_out.write('%s,%s,%s,%s,%s\n' % (cmd, packet.addr, packet.size,
packet.flags, packet.tick))
else:
ascii_out.write('%s,%s,%s,%s\n' % (cmd, packet.addr, packet.size,
packet.tick))
print "Parsed packets:", num_packets
# We're done
ascii_out.close()
proto_in.close()
if __name__ == "__main__":
main()
|
Python
| 0
|
@@ -2558,16 +2558,28 @@
0000,0%0A%0A
+import gzip%0A
import p
@@ -3487,24 +3487,465 @@
)%0A%0A try:%0A
+ # First see if this file is gzipped%0A try:%0A # Opening the file works even if it is not a gzip file%0A proto_in = gzip.open(sys.argv%5B1%5D, 'rb')%0A%0A # Force a check of the magic number by seeking in the%0A # file. If we do not do it here the error will occur when%0A # reading the first message.%0A proto_in.seek(1)%0A proto_in.seek(0)%0A except IOError:%0A
proto
@@ -4377,16 +4377,29 @@
ed file%22
+, sys.argv%5B1%5D
%0A
|
465c1c1c9d7c102b4d35eb8c228565dbf8d35910
|
simplify the code
|
util/remaining-gnu-error.py
|
util/remaining-gnu-error.py
|
#!/usr/bin/env python3
# This script lists the GNU failing tests by size
# Just like with util/run-gnu-test.sh, we expect the gnu sources
# to be in ../
import urllib.request
import urllib
import os
import glob
import json
base = "../gnu/tests/"
urllib.request.urlretrieve(
"https://raw.githubusercontent.com/uutils/coreutils-tracking/main/gnu-full-result.json",
"result.json",
)
tests = glob.glob(base + "/*/*.sh")
tests_pl = glob.glob(base + "/*/*.pl")
tests_xpl = glob.glob(base + "/*/*.xpl")
tests = tests + tests_pl + tests_xpl
# sort by size
list_of_files = sorted(tests, key=lambda x: os.stat(x).st_size)
with open("result.json", "r") as json_file:
data = json.load(json_file)
for d in data:
for e in data[d]:
# Not all the tests are .sh files, rename them if not.
script = e.replace(".log", ".sh")
a = f"{base}{d}{script}"
if not os.path.exists(a):
a = a.replace(".sh", ".pl")
if not os.path.exists(a):
a = a.replace(".pl", ".xpl")
# the tests pass, we don't care anymore
if data[d][e] == "PASS":
try:
list_of_files.remove(a)
except ValueError:
# Ignore the error
pass
# Remove the factor tests and reverse the list (bigger first)
tests = list(filter(lambda k: "factor" not in k, list_of_files))
for f in reversed(tests):
print("%s: %s" % (f, os.stat(f).st_size))
print("")
print("%s tests remaining" % len(tests))
|
Python
| 0.000677
|
@@ -389,110 +389,37 @@
)%0A%0At
-est
+ype
s =
-glob.glob(base + %22/*/*.sh%22)%0Atests_pl = glob.glob(base + %22/*/*.pl%22)%0Atests_xpl = glob.glob(base +
+(%22/*/*.sh%22, %22/*/*.pl%22,
%22/*
@@ -427,24 +427,25 @@
*.xpl%22)%0A
+%0A
tests =
tests +
@@ -440,37 +440,72 @@
s =
-tests + tests_pl + tests_xpl%0A
+%5B%5D%0Afor files in types:%0A tests.extend(glob.glob(base + files))
%0A# s
@@ -1076,29 +1076,8 @@
S%22:%0A
- try:%0A
@@ -1111,95 +1111,8 @@
e(a)
-%0A except ValueError:%0A # Ignore the error%0A pass
%0A%0A#
|
b2a4967e956c07831516d90411f16d9f46a62cfb
|
Update script for py3 and cross-platform TMPDIR access
|
scripts/avogadro-remote.py
|
scripts/avogadro-remote.py
|
#!/usr/bin/python
import sys
import json
import time
import socket
import struct
class Connection:
def __init__(self, name = "avogadro"):
# create socket
self.sock = socket.socket(socket.AF_UNIX,
socket.SOCK_STREAM)
# connect
self.sock.connect("/tmp/" + name)
def send_json(self, obj):
self.send_message(json.dumps(obj))
def send_message(self, msg):
sz = len(msg)
hdr = struct.pack('>I', sz)
pkt = hdr + msg
self.sock.send(pkt)
def recv_message(self, size = 1024):
pkt = self.sock.recv(size)
return pkt[4:]
def recv_json(self):
msg = self.recv_message()
try:
return json.loads(msg)
except Exception as e:
print 'error: ' + str(e)
return {}
def close(self):
# close socket
self.sock.close()
if __name__ == '__main__':
conn = Connection()
method = sys.argv[1]
if method == 'openFile':
conn.send_json(
{
'jsonrpc' : '2.0',
'id' : 0,
'method' : 'openFile',
'params' : {
'fileName' : str(sys.argv[2])
}
}
)
elif method == 'kill':
conn.send_json(
{
'jsonrpc' : '2.0',
'id' : 0,
'method' : 'kill'
}
)
else:
print 'unknown method: ' + method
sys.exit(-1)
conn.close()
print 'reply: ' + str(conn.recv_message())
conn.close()
|
Python
| 0
|
@@ -12,16 +12,55 @@
python%0A%0A
+from __future__ import print_function%0A%0A
import s
@@ -113,16 +113,32 @@
t struct
+%0Aimport tempfile
%0A%0Aclass
@@ -347,15 +347,35 @@
ect(
-%22/tmp/%22
+tempfile.gettempdir() + '/'
+ n
@@ -548,16 +548,32 @@
dr + msg
+.encode('ascii')
%0A sel
@@ -811,17 +811,17 @@
print
-
+(
'error:
@@ -830,16 +830,17 @@
+ str(e)
+)
%0A r
@@ -1371,17 +1371,17 @@
print
-
+(
'unknown
@@ -1403,24 +1403,8 @@
thod
-%0A sys.exit(-1
)%0A
@@ -1417,16 +1417,33 @@
.close()
+%0A sys.exit(-1)
%0A%0A prin
@@ -1443,17 +1443,17 @@
%0A print
-
+(
'reply:
@@ -1480,16 +1480,17 @@
ssage())
+)
%0A conn.
|
0b038b8348de12eab4396cbae69095ff54407075
|
update SHEF soilm code to MV for ISUSM
|
scripts/isuag/isusm2rr5.py
|
scripts/isuag/isusm2rr5.py
|
"""Create the RR5 SHEF product that the Weather Bureau Desires
Run from RUN_20_AFTER.sh
"""
import subprocess
import datetime
import os
import unittest
import tempfile
import numpy as np
from pyiem.util import get_dbconn
from pyiem.tracker import loadqc
def mt(prefix, tmpf, depth, q):
"""Properly encode a value at depth into SHEF"""
if tmpf is None or "soil4" in q or np.isnan(tmpf):
return ""
val = float(depth)
val += abs(tmpf) / 1000.0
if tmpf < 0:
val = 0 - val
return "/%s %.3f" % (prefix, val)
def generate_rr5():
"""Create the RR5 Data"""
qcdict = loadqc()
data = (
"\n\n\n"
": Iowa State University Soil Moisture Network\n"
": Data contact Daryl Herzmann akrherz@iastate.edu\n"
": File generated %s UTC\n"
) % (datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M"),)
pgconn = get_dbconn("iem", user="nobody")
cursor = pgconn.cursor()
cursor.execute(
"""
SELECT id, valid, tmpf, c1tmpf, c2tmpf, c3tmpf, c4tmpf,
c2smv, c3smv, c4smv, phour
from current c JOIN stations t
on (t.iemid = c.iemid) WHERE t.network = 'ISUSM' and
valid > (now() - '90 minutes'::interval)
"""
)
for row in cursor:
q = qcdict.get(row[0], dict())
if "tmpf" in q or row[2] is None:
tmpf = "M"
else:
tmpf = "%.1f" % (row[2],)
if "precip" in q or row[10] is None:
precip = "M"
else:
precip = "%.2f" % (row[10],)
data += (".A %s %s C DH%s/TA %s%s%s%s%s\n" ".A1 %s%s%s/PPHRP %s\n") % (
row[0],
row[1].strftime("%Y%m%d"),
row[1].strftime("%H%M"),
tmpf,
mt("TV", row[3], "4", q),
mt("TV", row[4], "12", q),
mt("TV", row[5], "24", q),
mt("TV", row[6], "50", q),
mt("MW", max([0, 0 if row[7] is None else row[7]]), "12", q),
mt("MW", max([0, 0 if row[8] is None else row[8]]), "24", q),
mt("MW", max([0, 0 if row[9] is None else row[9]]), "50", q),
precip,
)
return data
def main():
"""Go Main Go"""
rr5data = generate_rr5()
# print rr5data
(tmpfd, tmpfn) = tempfile.mkstemp()
os.write(tmpfd, rr5data.encode("utf-8"))
os.close(tmpfd)
subprocess.call(
("/home/ldm/bin/pqinsert -p 'SUADSMRR5DMX.dat' %s") % (tmpfn,),
shell=True,
)
os.unlink(tmpfn)
if __name__ == "__main__":
main()
class MyTest(unittest.TestCase):
"""Test out our functions"""
def test_mt(self):
"""Conversion of values to SHEF encoded values"""
self.assertEquals(mt("TV", 4, 40, dict()), "/TV 40.004")
self.assertEquals(mt("TV", -4, 40, dict()), "/TV -40.004")
self.assertEquals(mt("TV", 104, 40, dict()), "/TV 40.104")
|
Python
| 0
|
@@ -135,24 +135,8 @@
os%0A
-import unittest%0A
impo
@@ -1882,33 +1882,33 @@
mt(%22M
-W
+V
%22, max(%5B0, 0 if
@@ -1956,33 +1956,33 @@
mt(%22M
-W
+V
%22, max(%5B0, 0 if
@@ -2038,17 +2038,17 @@
mt(%22M
-W
+V
%22, max(%5B
@@ -2476,119 +2476,8 @@
)%0A%0A%0A
-if __name__ == %22__main__%22:%0A main()%0A%0A%0Aclass MyTest(unittest.TestCase):%0A %22%22%22Test out our functions%22%22%22%0A%0A
def
@@ -2488,19 +2488,11 @@
_mt(
-self):%0A
+):%0A
@@ -2545,38 +2545,23 @@
%22%22%22%0A
- self.assertEquals(
+assert
mt(%22TV%22,
@@ -2567,33 +2567,35 @@
, 4, 40, dict())
-,
+ ==
%22/TV 40.004%22)%0A
@@ -2595,36 +2595,20 @@
004%22
-)
%0A
- self.assertEquals(
+assert
mt(%22
@@ -2623,25 +2623,27 @@
40, dict())
-,
+ ==
%22/TV -40.00
@@ -2648,36 +2648,20 @@
004%22
-)
%0A
- self.assertEquals(
+assert
mt(%22
@@ -2681,17 +2681,19 @@
dict())
-,
+ ==
%22/TV 40
@@ -2697,10 +2697,49 @@
40.104%22
+%0A%0A%0Aif __name__ == %22__main__%22:%0A main(
)%0A
|
3772b05552c16d82b8a8e5e634a0249047e230ed
|
use standard exclude patterns as overridable default in cystdlib.py
|
Tools/cystdlib.py
|
Tools/cystdlib.py
|
"""
Highly experimental script that compiles the CPython standard library using Cython.
Execute the script either in the CPython 'Lib' directory or pass the
option '--current-python' to compile the standard library of the running
Python interpreter.
Pass '-j N' to get a parallel build with N processes.
Usage example::
$ python cystdlib.py --current-python build_ext -i
"""
import os
import sys
from distutils.core import setup
from Cython.Build import cythonize
from Cython.Compiler import Options
# improve Python compatibility by allowing some broken code
Options.error_on_unknown_names = False
Options.error_on_uninitialized = False
exclude_patterns = ['**/test/**/*.py', '**/tests/**/*.py', '**/__init__.py']
broken = [
'idlelib/MultiCall.py',
'email/utils.py',
'multiprocessing/reduction.py',
'multiprocessing/util.py',
'threading.py', # interrupt handling
'lib2to3/fixes/fix_sys_exc.py',
'traceback.py',
'types.py',
'enum.py',
'importlib/_bootstrap',
]
default_directives = dict(
auto_cpdef=False, # enable when it's safe, see long list of failures below
binding=True,
set_initial_path='SOURCEFILE')
default_directives['optimize.inline_defnode_calls'] = True
special_directives = [
(['pkgutil.py',
'decimal.py',
'datetime.py',
'optparse.py',
'sndhdr.py',
'opcode.py',
'ntpath.py',
'urllib/request.py',
'plat-*/TYPES.py',
'plat-*/IN.py',
'tkinter/_fix.py',
'lib2to3/refactor.py',
'webbrowser.py',
'shutil.py',
'multiprocessing/forking.py',
'xml/sax/expatreader.py',
'xmlrpc/client.py',
'pydoc.py',
'xml/etree/ElementTree.py',
'posixpath.py',
'inspect.py',
'ctypes/util.py',
'urllib/parse.py',
'warnings.py',
'tempfile.py',
'trace.py',
'heapq.py',
'pickletools.py',
'multiprocessing/connection.py',
'hashlib.py',
'getopt.py',
'os.py',
'types.py',
], dict(auto_cpdef=False)),
]
del special_directives[:] # currently unused
def build_extensions(includes='**/*.py',
excludes=None,
special_directives=special_directives,
language_level=sys.version_info[0],
parallel=None):
if isinstance(includes, str):
includes = [includes]
excludes = list(excludes or ()) + exclude_patterns + broken
all_groups = (special_directives or []) + [(includes, {})]
extensions = []
for modules, directives in all_groups:
exclude_now = excludes[:]
for other_modules, _ in special_directives:
if other_modules != modules:
exclude_now.extend(other_modules)
d = dict(default_directives)
d.update(directives)
extensions.extend(
cythonize(
modules,
exclude=exclude_now,
exclude_failures=True,
language_level=language_level,
compiler_directives=d,
nthreads=parallel,
))
return extensions
def build(extensions):
try:
setup(ext_modules=extensions)
result = True
except:
import traceback
print('error building extensions %s' % (
[ext.name for ext in extensions],))
traceback.print_exc()
result = False
return extensions, result
def _build(args):
sys_args, ext = args
sys.argv[1:] = sys_args
return build([ext])
def parse_args():
from optparse import OptionParser
parser = OptionParser('%prog [options] [LIB_DIR (default: ./Lib)]')
parser.add_option(
'--current-python', dest='current_python', action='store_true',
help='compile the stdlib of the running Python')
parser.add_option(
'-j', '--jobs', dest='parallel_jobs', metavar='N',
type=int, default=1,
help='run builds in N parallel jobs (default: 1)')
parser.add_option(
'-x', '--exclude', dest='excludes', metavar='PATTERN',
action="append", help='exclude modules/packages matching PATTERN')
options, args = parser.parse_args()
if not args:
args = ['./Lib']
elif len(args) > 1:
parser.error('only one argument expected, got %d' % len(args))
return options, args
if __name__ == '__main__':
options, args = parse_args()
if options.current_python:
# assume that the stdlib is where the "os" module lives
os.chdir(os.path.dirname(os.__file__))
else:
os.chdir(args[0])
pool = None
parallel_jobs = options.parallel_jobs
if options.parallel_jobs:
try:
import multiprocessing
pool = multiprocessing.Pool(parallel_jobs)
print("Building in %d parallel processes" % parallel_jobs)
except (ImportError, OSError):
print("Not building in parallel")
parallel_jobs = 0
extensions = build_extensions(
parallel=parallel_jobs,
excludes=options.excludes)
sys_args = ['build_ext', '-i']
if pool is not None:
results = pool.map(_build, [(sys_args, ext) for ext in extensions])
pool.close()
pool.join()
for ext, result in results:
if not result:
print("building extension %s failed" % (ext[0].name,))
else:
sys.argv[1:] = sys_args
build(extensions)
|
Python
| 0
|
@@ -2426,14 +2426,8 @@
s or
- ()) +
exc
@@ -2439,16 +2439,17 @@
patterns
+)
+ broke
|
5489fe0abc5dda3b6d41bee368cd0b9727459af3
|
Add search urls for projects
|
projects/urls.py
|
projects/urls.py
|
from django.conf.urls import patterns, url
urlpatterns = patterns('projects.views',
url(r'^add/$', 'add_project', name='add-project'),
url(r'^edit/(?P<project_id>\d+)/$', 'edit_project', name='edit-project'),
url(r'^edit_status/(?P<project_id>\d+)/$', 'edit_status', name='edit-status'),
url(r'^archive/$', 'projects_archive', name='projects-archive'),
url(r'^archive/review/(?P<project_id>\d+)/$', 'show_project', name='show-project'),
url(r'^archive/review/versions/(?P<project_id>\d+)/$', 'show_project_versions', name='show-project-versions'),
url(r'^archive/(?P<year>\d{4})/(?P<month>\d{,2})/$', 'projects_year_month', name='projects-year-month'),
)
|
Python
| 0
|
@@ -676,10 +676,467 @@
onth'),%0A
+ url(r'%5Esearch/user/(?P%3Csearched_creator%3E%5Cd*)/$', 'projects_by_creator', name='projects-by-creator'),%0A url(r'%5Esearch/status/(?P%3Csearched_status%3E.*)/$', 'projects_by_status', name='projects-by-status'),%0A url(r'%5Esearch/name/(?P%3Csearched_name%3E.*)/$', 'projects_by_name', name='projects-by-name'),%0A url(r'%5Esearch/(?P%3Csearched_name%3E%5Cd*)/(?P%3Csearched_status%3E.*)/(?P%3Csearched_creator%3E.*)/$', 'projects_complex_search', name='projects-complex-search'),%0A
)%0A
|
35b0af0fafb117e3cc613d3073602902fadb9c5c
|
Add daily-view to worker
|
server/worker/functions.py
|
server/worker/functions.py
|
import logging
import time
from django.db import connection
from server.models import SensorValue, Threshold, Notification
import functions
logger = logging.getLogger('worker')
def check_thresholds():
for threshold in Threshold.objects.all():
try:
latest_sensorvalue = SensorValue.objects.filter(
sensor=threshold.sensor).latest('timestamp')
if threshold.min_value is not None:
if latest_sensorvalue.value < threshold.min_value:
message = 'Threshold "%s" triggered (%s < %s)' % (
threshold.name, latest_sensorvalue.value, threshold.min_value)
Notification(threshold=threshold, message=message,
category=Notification.Danger, show_manager=threshold.show_manager).save()
logger.debug(message)
if threshold.max_value is not None:
if latest_sensorvalue.value > threshold.max_value:
message = 'Threshold "%s" triggered (%s > %s)' % (
threshold.name, latest_sensorvalue.value, threshold.max_value)
Notification(threshold=threshold, message=message,
category=Notification.Danger, show_manager=threshold.show_manager).save()
logger.debug(message)
except SensorValue.DoesNotExist:
logger.debug('No SensorValue found for Sensor #%s' %
threshold.sensor_id)
def refresh_views():
logger.debug('Trigger views refresh')
cursor = connection.cursor()
cursor.execute('''REFRESH MATERIALIZED VIEW server_sensorvaluehourly;''')
cursor.execute(
'''REFRESH MATERIALIZED VIEW server_sensorvaluemonthlysum;''')
cursor.execute(
'''REFRESH MATERIALIZED VIEW server_sensorvaluemonthlyavg;''')
logger.debug('Successfully refreshed views')
|
Python
| 0.000001
|
@@ -1694,24 +1694,101 @@
hourly;''')%0A
+ cursor.execute('''REFRESH MATERIALIZED VIEW server_sensorvaluedaily;''')%0A
cursor.e
@@ -1957,20 +1957,16 @@
vg;''')%0A
-
%0A log
@@ -2006,9 +2006,8 @@
views')
-%0A
|
13bb0a7ea546fed050b68c73730384c168370ac3
|
Add typing for plogger.
|
ptest/plogger.py
|
ptest/plogger.py
|
import logging
import sys
from datetime import datetime
from . import config
class PConsole:
def __init__(self, out):
self.out = out
def write(self, msg):
self.out.write(str(msg))
def write_line(self, msg):
self.out.write(str(msg) + "\n")
pconsole = PConsole(sys.stdout)
pconsole_err = PConsole(sys.stderr)
class PReporter:
def __init__(self):
pass
def debug(self, msg, screenshot=False):
self.__log(logging.DEBUG, msg, screenshot)
def info(self, msg, screenshot=False):
self.__log(logging.INFO, msg, screenshot)
def warn(self, msg, screenshot=False):
self.__log(logging.WARN, msg, screenshot)
def error(self, msg, screenshot=False):
self.__log(logging.ERROR, msg, screenshot)
def critical(self, msg, screenshot=False):
self.__log(logging.CRITICAL, msg, screenshot)
def __log(self, level, msg, screenshot):
from . import test_executor, screen_capturer
try:
running_test_fixture = test_executor.current_executor().get_property("running_test_fixture")
except AttributeError as e:
pconsole.write_line("[%s] %s" % (logging.getLevelName(level), msg))
else:
log = {"time": str(datetime.now()), "level": logging.getLevelName(level).lower(), "message": str(msg)}
if screenshot and not config.get_option("disable_screenshot"):
log["screenshots"] = screen_capturer.take_screenshots()
running_test_fixture.logs.append(log)
if config.get_option("verbose"):
# output to pconsole
message = "[%s] %s" % (running_test_fixture.full_name, msg)
pconsole.write_line(message)
preporter = PReporter()
|
Python
| 0
|
@@ -157,32 +157,37 @@
write(self, msg
+: str
):%0A self.
@@ -235,16 +235,21 @@
elf, msg
+: str
):%0A
@@ -425,32 +425,37 @@
debug(self, msg
+: str
, screenshot=Fal
@@ -442,33 +442,41 @@
str, screenshot
-=
+: bool =
False):%0A
@@ -533,32 +533,37 @@
f info(self, msg
+: str
, screenshot=Fal
@@ -550,33 +550,41 @@
str, screenshot
-=
+: bool =
False):%0A
@@ -640,32 +640,37 @@
f warn(self, msg
+: str
, screenshot=Fal
@@ -657,33 +657,41 @@
str, screenshot
-=
+: bool =
False):%0A
@@ -748,32 +748,37 @@
error(self, msg
+: str
, screenshot=Fal
@@ -765,33 +765,41 @@
str, screenshot
-=
+: bool =
False):%0A
@@ -864,24 +864,29 @@
al(self, msg
+: str
, screenshot
@@ -885,17 +885,25 @@
reenshot
-=
+: bool =
False):%0A
@@ -982,21 +982,31 @@
f, level
-, msg
+: int, msg: str
, screen
@@ -1009,16 +1009,22 @@
reenshot
+: bool
):%0A
|
3b5b3afbc66f60df45f0458ffdd0d37b9a7c50d0
|
Add homemade fast width/height reader for JPEG files
|
ptoolbox/tags.py
|
ptoolbox/tags.py
|
# -*- coding: utf-8 -*-
from datetime import datetime
TAG_WIDTH = 'EXIF ExifImageWidth'
TAG_HEIGHT = 'EXIF ExifImageLength'
TAG_DATETIME = 'Image DateTime'
def parse_time(tags):
tag = tags.get(TAG_DATETIME, None)
if not tag:
raise KeyError(TAG_DATETIME)
return datetime.strptime(str(tag), "%Y:%m:%d %H:%M:%S")
def parse_width(tags):
tag = tags.get(TAG_WIDTH, None)
if not tag:
raise KeyError(TAG_WIDTH)
return int(str(tag), 10)
def parse_height(tags):
tag = tags.get(TAG_HEIGHT, None)
if not tag:
raise KeyError(TAG_HEIGHT)
return int(str(tag), 10)
|
Python
| 0
|
@@ -18,16 +18,30 @@
-8 -*-%0A%0A
+import struct%0A
from dat
@@ -166,16 +166,653 @@
eTime'%0A%0A
+%0Adef jpeg_size(path):%0A %22%22%22Get image size.%0A Structure of JPEG file is:%0A ffd8 %5BffXX SSSS DD DD ...%5D %5BffYY SSSS DDDD ...%5D (S is 16bit size, D the data)%0A We look for the SOF0 header 0xffc0; its structure is%0A %5Bffc0 SSSS PPHH HHWW ...%5D where PP is 8bit precision, HHHH 16bit height, WWWW width%0A %22%22%22%0A with open(path, 'rb') as f:%0A _, header_type, size = struct.unpack('%3EHHH', f.read(6))%0A while header_type != 0xffc0:%0A f.seek(size - 2, 1)%0A header_type, size = struct.unpack('%3EHH', f.read(4))%0A bpi, height, width = struct.unpack('%3EBHH', f.read(5))%0A return width, height%0A%0A%0A
def pars
|
7a64ac255f53e85f888093daac83b3c0fabcf15e
|
Update ESPEC_tests.py
|
ESPEC_tests.py
|
ESPEC_tests.py
|
# -*- coding: utf-8 -*-
"""ESPEC_tests.py: Simple test routine for pyESPEC library
__author__ = "Jason M. Battle"
__copyright__ = "Copyright 2016, Jason M. Battle"
__license__ = "MIT"
__email__ = "jason.battle@gmail.com"
"""
from ESPEC import SH241
if __name__ == '__main__':
test = SH241()
test.OpenChannel()
if test.GetMode() == 'OFF':
test.SetPowerOn()
# Read Commands
test.GetROMVersion()
test.GetIntStatus()
test.GetIntMask()
test.GetAlarmStat()
test.GetKeyProtStat()
test.GetType()
test.GetMode()
test.GetCondition()
test.GetTemp()
test.GetHumid()
test.GetRefrigeCtl()
test.GetRelayStat()
test.GetHeaterStat()
test.GetProgStat()
test.GetProgData()
test.GetProgStepData(1)
# Write Commands
test.SetIntMask(0b01000000)
test.ResetIntStatus()
test.SetKeyProtectOn()
test.SetKeyProtectOff()
test.SetPowerOff()
test.SetPowerOn()
test.SetTemp(25.0)
test.SetHighTemp(155.0)
test.SetLowTemp(-45.0)
test.SetHumid(50.0)
test.SetHighHumid(100)
test.SetLowHumid(0)
test.SetHumidOff()
test.SetRefrigeCtl(9)
test.SetRelayOn(1)
test.SetRelayOff(1)
test.SetModeOff()
test.SetModeStandby()
test.SetModeConstant()
test.ProgramWrite()
test.SetModeProgram()
test.ProgramAdvance()
test.ProgramEnd()
test.SetModeStandby()
test.ProgramErase()
test.SetModeOff()
test.CloseChannel()
|
Python
| 0
|
@@ -1522,16 +1522,22 @@
CloseChannel()%0D%0A
+ %0D%0A
|
d177977ed4da7168e1d04b5420e224bb3b75a4fc
|
Use StringIO from six & remove trailing space in test file
|
src/azure/cli/tests/test_commands.py
|
src/azure/cli/tests/test_commands.py
|
import os
import sys
import unittest
import re
import vcr
import logging
from six import add_metaclass
try:
import unittest.mock as mock
except ImportError:
import mock
try:
# Python 3
from io import StringIO
except ImportError:
# Python 2
from StringIO import StringIO
from azure.cli.main import main as cli
from command_specs import TEST_SPECS
logging.basicConfig()
vcr_log = logging.getLogger('vcr')
vcr_log.setLevel(logging.ERROR)
VCR_CASSETTE_DIR = os.path.join(os.path.dirname(__file__), 'recordings')
FILTER_HEADERS = [
'authorization',
'client-request-id',
'x-ms-client-request-id',
'x-ms-correlation-request-id',
'x-ms-ratelimit-remaining-subscription-reads',
'x-ms-request-id',
'x-ms-routing-request-id',
'x-ms-gateway-service-instanceid',
'x-ms-ratelimit-remaining-tenant-reads',
'x-ms-served-by',
]
def before_record_request(request):
request.uri = re.sub('/subscriptions/([^/]+)/', '/subscriptions/00000000-0000-0000-0000-000000000000/', request.uri)
return request
def before_record_response(response):
def remove_entries(the_dict, entries):
for key in entries:
if key in the_dict:
del the_dict[key]
remove_entries(response['headers'], FILTER_HEADERS)
return response
my_vcr = vcr.VCR(
cassette_library_dir=VCR_CASSETTE_DIR,
before_record_request=before_record_request,
before_record_response=before_record_response
)
class TestSequenceMeta(type):
def __new__(mcs, name, bases, dict):
def gen_test(test_name, command, expected_result):
def load_subscriptions_mock(self):
return [{"id": "00000000-0000-0000-0000-000000000000", "user": "example@example.com", "access_token": "access_token", "state": "Enabled", "name": "Example", "active": True}];
@mock.patch('azure.cli._profile.Profile.load_subscriptions', load_subscriptions_mock)
@my_vcr.use_cassette('%s.yaml'%test_name, filter_headers=FILTER_HEADERS)
def test(self):
with StringIO() as io:
cli(command.split(), file=io)
self.assertEqual(io.getvalue(), expected_result)
return test
for module_name, test_specs in TEST_SPECS:
for test_spec_item in test_specs:
test_name = 'test_%s' % test_spec_item['test_name']
full_test_name = '%s.%s'%(module_name, test_name)
dict[test_name] = gen_test(full_test_name, test_spec_item['command'], test_spec_item['expected_result'])
return type.__new__(mcs, name, bases, dict)
@add_metaclass(TestSequenceMeta)
class TestCommands(unittest.TestCase):
pass
if __name__ == '__main__':
unittest.main()
|
Python
| 0
|
@@ -96,16 +96,26 @@
etaclass
+, StringIO
%0Atry:%0A
@@ -186,126 +186,8 @@
ck%0A%0A
-try:%0A # Python 3%0A from io import StringIO%0Aexcept ImportError:%0A # Python 2%0A from StringIO import StringIO%0A%0A
from
@@ -1743,24 +1743,16 @@
True%7D%5D;%0A
-
%0A
@@ -1975,12 +1975,12 @@
-with
+io =
Str
@@ -1990,20 +1990,9 @@
IO()
- as io:%0A
+%0A
@@ -2033,24 +2033,93 @@
ile=io)%0A
+ actual_result = io.getvalue()%0A io.close()%0A
@@ -2139,29 +2139,29 @@
rtEqual(
-io.getvalue()
+actual_result
, expect
|
72d905e1e4098cf929213f59662c0c3090fd93cf
|
remove debug print
|
pyast/dump/js.py
|
pyast/dump/js.py
|
import json
import pyast
from collections import OrderedDict
import sys
if sys.version >= '3':
basestring = str
else:
pass
def _dump_node_name(node):
return node.__class__.__name__.lower()
def _dump_node(node, name=None, indent=0):
if isinstance(node, basestring):
return node
elif isinstance(node, bool):
return node
struct = OrderedDict({'type': None})
if isinstance(node, pyast.Node):
struct['type'] = _dump_node_name(node)
for field in node._fields:
struct[field] = _dump_node(getattr(node, field))
elif isinstance(node, pyast.TypedList):
struct = []
for elem in node:
struct.append(_dump_node(elem))
elif isinstance(node, pyast.TypedDict):
struct = {}
for elem, key in node.items():
struct[key] =_dump_node(elem)
return struct
def dump(ast):
struct = _dump_node(ast)
print(json)
o = json.dumps(struct, indent=2)
return o
|
Python
| 0.000008
|
@@ -920,24 +920,8 @@
st)%0A
- print(json)%0A
|
c5b7db285508b14cd19c79b46ddbde8dfcc92acb
|
Update type mappings
|
pyathena/util.py
|
pyathena/util.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import functools
import logging
import threading
import re
import uuid
import tenacity
from past.builtins import xrange
from tenacity import (after_log, retry_if_exception,
stop_after_attempt, wait_exponential)
from pyathena import DataError, OperationalError
from pyathena.model import AthenaCompression
_logger = logging.getLogger(__name__)
PATTERN_OUTPUT_LOCATION = re.compile(r'^s3://(?P<bucket>[a-zA-Z0-9.\-_]+)/(?P<key>.+)$')
def parse_output_location(output_location):
match = PATTERN_OUTPUT_LOCATION.search(output_location)
if match:
return match.group('bucket'), match.group('key')
else:
raise DataError('Unknown `output_location` format.')
def get_chunks(df, chunksize=None):
rows = len(df)
if rows == 0:
return
if chunksize is None:
chunksize = rows
elif chunksize == 0:
raise ValueError('Chunksize argument should be non-zero')
chunks = int(rows / chunksize) + 1
for i in xrange(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, rows)
if start_i >= end_i:
break
yield df[start_i:end_i]
def reset_index(df, index_label=None):
df.index.name = index_label if index_label else 'index'
try:
df.reset_index(inplace=True)
except ValueError as e:
raise ValueError('Duplicate name in index/columns: {0}'.format(e))
def as_pandas(cursor, coerce_float=False):
from pandas import DataFrame
names = [metadata[0] for metadata in cursor.description]
return DataFrame.from_records(cursor.fetchall(), columns=names,
coerce_float=coerce_float)
def to_sql_type_mappings(col):
import pandas as pd
col_type = pd._lib.infer_dtype(col, skipna=True)
if col_type == 'datetime64' or col_type == 'datetime':
return 'TIMESTAMP'
elif col_type == "timedelta64":
return 'BIGINT'
elif col_type == 'floating':
if col.dtype == 'float32':
return 'FLOAT'
else:
return 'DOUBLE'
elif col_type == 'integer':
if col.dtype == 'int32':
return 'INT'
else:
return 'BIGINT'
elif col_type == 'boolean':
return 'BOOLEAN'
elif col_type == "date":
return 'DATE'
elif col_type == 'complex':
raise ValueError('Complex datatype not supported')
return 'VARCHAR'
def to_sql(df, name, conn, location, schema='default',
index=False, index_label=None, chunksize=None,
if_exists='fail', compression=None, flavor='spark',
type_mappings=to_sql_type_mappings):
# TODO Supports orc, avro, json, csv or tsv format
# TODO Supports partitioning
if if_exists not in ('fail', 'replace', 'append'):
raise ValueError('`{0}` is not valid for if_exists'.format(if_exists))
if compression is not None and not AthenaCompression.is_valid(compression):
raise ValueError('`{0}` is not valid for compression'.format(compression))
import pyarrow as pa
import pyarrow.parquet as pq
bucket_name, key_prefix = parse_output_location(location)
bucket = conn.session.resource('s3', region_name=conn.region_name,
**conn._client_kwargs).Bucket(bucket_name)
cursor = conn.cursor()
retry_config = conn.retry_config
table = cursor.execute("""
SELECT table_name
FROM information_schema.tables
WHERE table_schema = '{schema}'
AND table_name = '{table}'
""".format(schema=schema, table=name)).fetchall()
if if_exists == 'fail':
if table:
raise OperationalError('Table `{0}.{1}` already exists.'.format(schema, name))
elif if_exists == 'replace':
if table:
cursor.execute("""
DROP TABLE {schema}.{table}
""".format(schema=schema, table=name))
objects = bucket.objects.filter(Prefix=key_prefix)
if list(objects.limit(1)):
objects.delete()
if index:
reset_index(df, index_label)
for chunk in get_chunks(df, chunksize):
table = pa.Table.from_pandas(chunk)
buf = pa.BufferOutputStream()
pq.write_table(table, buf,
compression=compression,
flavor=flavor)
retry_api_call(bucket.put_object,
config=retry_config,
Body=buf.getvalue().to_pybytes(),
Key=key_prefix + str(uuid.uuid4()))
ddl = generate_ddl(df=df,
name=name,
location=location,
schema=schema,
compression=compression,
type_mappings=type_mappings)
cursor.execute(ddl)
def get_column_names_and_types(df, type_mappings):
return [
(str(df.columns[i]), type_mappings(df.iloc[:, i]))
for i in xrange(len(df.columns))
]
def generate_ddl(df, name, location, schema='default', compression=None,
type_mappings=to_sql_type_mappings):
ddl = 'CREATE EXTERNAL TABLE IF NOT EXISTS `{0}`.`{1}` (\n'.format(schema, name)
ddl += ',\n'.join([
'`{0}` {1}'.format(c[0], c[1])
for c in get_column_names_and_types(df, type_mappings)
])
ddl += '\n)\n'
ddl += 'STORED AS PARQUET\n'
ddl += "LOCATION '{0}'\n".format(location)
if compression:
ddl += "TBLPROPERTIES ('parquet.compress'='{0}')\n".format(compression.upper())
return ddl
def synchronized(wrapped):
"""The missing @synchronized decorator
https://git.io/vydTA"""
_lock = threading.RLock()
@functools.wraps(wrapped)
def _wrapper(*args, **kwargs):
with _lock:
return wrapped(*args, **kwargs)
return _wrapper
class RetryConfig(object):
def __init__(self, exceptions=('ThrottlingException', 'TooManyRequestsException'),
attempt=5, multiplier=1, max_delay=100, exponential_base=2):
self.exceptions = exceptions
self.attempt = attempt
self.multiplier = multiplier
self.max_delay = max_delay
self.exponential_base = exponential_base
def retry_api_call(func, config, logger=None,
*args, **kwargs):
retry = tenacity.Retrying(
retry=retry_if_exception(
lambda e: getattr(e, 'response', {}).get(
'Error', {}).get('Code', None) in config.exceptions
if e else False),
stop=stop_after_attempt(config.attempt),
wait=wait_exponential(multiplier=config.multiplier,
max=config.max_delay,
exp_base=config.exponential_base),
after=after_log(logger, logger.level) if logger else None,
reraise=True
)
return retry(func, *args, **kwargs)
|
Python
| 0
|
@@ -1976,16 +1976,71 @@
ESTAMP'%0A
+ elif col_type == 'timedelta':%0A return 'INT'%0A
elif
@@ -2494,16 +2494,80 @@
== '
-complex'
+bytes':%0A return 'BINARY'%0A elif col_type in %5B'complex', 'time'%5D
:%0A
@@ -2594,15 +2594,11 @@
or('
-Complex
+%7B0%7D
dat
@@ -2617,16 +2617,33 @@
pported'
+.format(col_type)
)%0A re
@@ -2652,15 +2652,14 @@
rn '
-VARCHAR
+STRING
'%0A%0A%0A
|
c46e472755c7b7dd450626e136f31a29ca9a5321
|
Fix a regression in accessing the username for the session.
|
rbtools/utils/users.py
|
rbtools/utils/users.py
|
from __future__ import unicode_literals
import getpass
import logging
import sys
from six.moves import input
from rbtools.api.errors import AuthorizationError
from rbtools.commands import CommandError
def get_authenticated_session(api_client, api_root, auth_required=False):
"""Return an authenticated session.
None will be returned if the user is not authenticated, unless the
'auth_required' parameter is True, in which case the user will be prompted
to login.
"""
session = api_root.get_session(expand='user')
if not session.authenticated:
if not auth_required:
return None
logging.warning('You are not authenticated with the Review Board '
'server at %s, please login.' % api_client.url)
sys.stderr.write('Username: ')
username = input()
password = getpass.getpass(b'Password: ')
api_client.login(username, password)
try:
session = session.get_self()
except AuthorizationError:
raise CommandError('You are not authenticated.')
return session
def get_user(api_client, api_root, auth_required=False):
"""Return the user resource for the current session."""
session = get_authenticated_session(api_client, api_root, auth_required)
if session:
return session.user
def get_username(api_client, api_root, auth_required=False):
"""Return the username for the current session."""
session = get_authenticated_session(api_client, api_root, auth_required)
if session:
return session.links.user.title
|
Python
| 0.003157
|
@@ -1347,16 +1347,33 @@
n.user%0A%0A
+ return None%0A%0A
%0Adef get
@@ -1489,43 +1489,23 @@
+u
se
-ssion
+r
= get_
-authenticated_session
+user
(api
@@ -1538,39 +1538,36 @@
quired)%0A%0A if
+u
se
-ssion
+r
:%0A return
@@ -1571,29 +1571,35 @@
urn
+u
se
-ssion.links.user.titl
+r.username%0A%0A return Non
e%0A
|
73a6ad619448c3b3be44538e2f1853479208ac62
|
Remove vestiges of python2 prints in docstrings
|
pyee/__init__.py
|
pyee/__init__.py
|
# -*- coding: utf-8 -*-
"""
pyee supplies an ``EventEmitter`` object similar to the ``EventEmitter``
from Node.js. It supports both synchronous callbacks and asyncio coroutines.
Example
-------
::
In [1]: from pyee import EventEmitter
In [2]: ee = EventEmitter()
In [3]: @ee.on('event')
...: def event_handler():
...: print 'BANG BANG'
...:
In [4]: ee.emit('event')
BANG BANG
In [5]:
"""
try:
from asyncio import iscoroutine, ensure_future
except ImportError:
iscoroutine = None
ensure_future = None
from collections import defaultdict
__all__ = ['EventEmitter', 'PyeeException']
class PyeeException(Exception):
"""An exception internal to pyee."""
pass
class EventEmitter():
"""The EventEmitter class.
For interoperation with asyncio, one can specify the scheduler and
the event loop. The scheduler defaults to ``asyncio.ensure_future``,
and the loop defaults to ``None``---in other words, the default
asyncio event loop.
Most events are registered with EventEmitter via the ``on`` and ``once``
methods. However, pyee EventEmitters have two *special* events:
- 'new_listener': Fires whenever a new listener is created. Listeners for
this event do not fire upon their own creation.
- 'error': When emitted raises an Exception by default, behavior can be
overriden by attaching callback to the event.
For example::
@ee.on('error')
def onError(message):
logging.err(message)
ee.emit('error', Exception('something blew up'))
"""
def __init__(self, scheduler=ensure_future, loop=None):
self._events = defaultdict(list)
self._schedule = scheduler
self._loop = loop
def on(self, event, f=None):
"""Registers the function (or optionally an asyncio coroutine function)
``f`` to the event name ``event``.
If ``f`` isn't provided, this method returns a function that
takes ``f`` as a callback; in other words, you can use this method
as a decorator, like so::
@ee.on('data')
def data_handler(data):
print(data)
As mentioned, this method can also take an asyncio coroutine function::
@ee.on('data')
async def data_handler(data)
await do_async_thing(data)
This will automatically schedule the coroutine using the configured
scheduling function (defaults to ``asyncio.ensure_future``) and the
configured event loop (defaults to ``asyncio.get_event_loop()``).
"""
def _on(f):
# Fire 'new_listener' *before* adding the new listener!
self.emit('new_listener', event, f)
# Add the necessary function
self._events[event].append(f)
# Return original function so removal works
return f
if f is None:
return _on
else:
return _on(f)
def emit(self, event, *args, **kwargs):
"""Emit ``event``, passing ``*args`` and ``**kwargs`` to each attached
function. Returns ``True`` if any functions are attached to ``event``;
otherwise returns ``False``.
Example::
ee.emit('data', '00101001')
Assuming ``data`` is an attached function, this will call
``data('00101001')'``.
For coroutine event handlers, calling emit is non-blocking. In other
words, you do not have to await any results from emit, and the
coroutine is scheduled in a fire-and-forget fashion.
"""
handled = False
# Copy the events dict first. Avoids a bug if the events dict gets
# changed in the middle of the following for loop.
events_copy = list(self._events[event])
# Pass the args to each function in the events dict
for f in events_copy:
result = f(*args, **kwargs)
if iscoroutine and iscoroutine(result):
self._schedule(result, loop=self._loop)
handled = True
if not handled and event == 'error':
if len(args):
raise args[0]
else:
raise PyeeException("Uncaught, unspecified 'error' event.")
return handled
def once(self, event, f=None):
"""The same as ``ee.on``, except that the listener is automatically
removed after being called.
"""
def _once(f):
def g(*args, **kwargs):
f(*args, **kwargs)
self.remove_listener(event, g)
return g
def _wrapper(f):
self.on(event, _once(f))
return f
if f is None:
return _wrapper
else:
_wrapper(f)
def remove_listener(self, event, f):
"""Removes the function ``f`` from ``event``."""
self._events[event].remove(f)
def remove_all_listeners(self, event=None):
"""Remove all listeners attached to ``event``.
If ``event`` is ``None``, remove all listeners on all events.
"""
if event is not None:
self._events[event] = []
else:
self._events = None
self._events = defaultdict(list)
def listeners(self, event):
"""Returns the list of all listeners registered to the ``event``.
"""
return self._events[event]
|
Python
| 0.000005
|
@@ -353,17 +353,17 @@
print
-
+(
'BANG BA
@@ -365,16 +365,17 @@
NG BANG'
+)
%0A
|
2beb589edc2f7b57be0d6a559e2f29471490bc91
|
FIX py2 support!
|
pyfaker/utils.py
|
pyfaker/utils.py
|
import re
import random
import os
import json
from string import Formatter
class BaseFake(object):
pass
class CallFormatter(Formatter):
def get_field(self, field_name, *args, **kwargs):
obj, used_key = Formatter.get_field(self, field_name, *args, **kwargs)
return obj(), used_key
'''
class CallFormatter(Formatter):
def get_field(field_name, *args, **kwargs):
used_key = Formatter.get_field(field_name, *args, **kwargs)
return (used_key[0](),) + used_key[1:]
class CallFormatter(Formatter):
def get_field(self, field_name, *args, **kwargs):
if kwargs is None:
kwargs = kwargs.update(args[1])
else:
kwargs.update(args[1])
obj, used_key = Formatter.get_field(self, field_name, args[0:1], kwargs)
return obj(kwargs['cls']()), used_key
'''
call_fmt = CallFormatter()
def get_locales():
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
fpath = os.path.join(curpath(), 'locales.json')
with open(fpath, 'r') as f:
return json.load(f)
_all_locales = get_locales()
def to_camel(s):
"""returns string to camel caps
Example
to_camel('foo_bar') == 'FooBar'
"""
try:
return s.title().replace('_', '') # assume the titles are ascii, else class name fail
except Exception: # TODO specify which kind of error
raise ValueError(
"%s doesn't convert to a good string for a class name" % s)
def update_loc(loc1, loc2):
loc1.update(loc2)
'''
def format_(s, current, fake_=None):
namespace = dict(current.__dict__, **{'cls': current}) # and fake_ ?
# first replace #s with digits then fill in rest using _locals
def callback(matchobj):
return '%s' % random.randrange(10)
s = re.sub(r'#', callback, s)
return s
fmt = CallFormatter()
return fmt.format(s, **namespace)
'''
|
Python
| 0
|
@@ -1262,16 +1262,20 @@
return
+str(
s.title(
@@ -1292,16 +1292,17 @@
'_', '')
+)
# assu
|
cb4a35ac7ff107a99d5d637a193715776918597d
|
Fix dictionary merging for Python 3
|
kafka_influxdb/config/loader.py
|
kafka_influxdb/config/loader.py
|
from . import default_config
import yaml
import logging
import argparse
import collections
import sys
class ObjectView(object):
def __init__(self, d):
self.__dict__ = d
def load_config():
"""
Load settings from default config and optionally
overwrite with config file and commandline parameters
(in that order).
Note: Commandline parameters are of the form
--kafka_host="localhost"
to make them easy to enter from the cli
while the config file parameters are stored in a dict
{kafka: { host: localhost }}
to avoid redundancy in the key name.
So to merge them, we flatten all keys.
"""
# We start with the default config
config = flatten(default_config.DEFAULT_CONFIG)
# Read commandline arguments
cli_config = flatten(parse_args())
if "configfile" in cli_config:
print("Reading config file {}".format(cli_config['configfile']))
configfile = flatten(parse_configfile(cli_config['configfile']))
config = dict(config.items() + configfile.items())
# Parameters from commandline take precedence over all others
config = overwrite_config(config, cli_config)
# Set verbosity level
if 'verbose' in config:
if config['verbose'] == 1:
logging.getLogger().setLevel(logging.INFO)
elif config['verbose'] > 1:
logging.getLogger().setLevel(logging.DEBUG)
return ObjectView(config)
def overwrite_config(old_values, new_values):
return dict(old_values.items() + new_values.items())
def parse_configfile(configfile):
"""
Read settings from file
"""
with open(configfile) as f:
try:
return yaml.safe_load(f)
except Exception as e:
logging.fatal("Could not load default config file: ", e)
exit(-1)
def flatten(d, parent_key='', sep='_'):
"""
Flatten keys in a dictionary
Example:
flatten({'a': 1, 'c': {'a': 2, 'b': {'x': 5, 'y' : 10}}, 'd': [1, 2, 3]})
=> {'a': 1, 'c_a': 2, 'c_b_x': 5, 'd': [1, 2, 3], 'c_b_y': 10}
"""
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def parse_args(args=sys.argv[1:]):
parser = argparse.ArgumentParser(description='A Kafka consumer for InfluxDB',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--kafka_host', type=str, default=argparse.SUPPRESS,
help="Hostname or IP of Kafka message broker (default: localhost)")
parser.add_argument('--kafka_port', type=int, default=argparse.SUPPRESS,
help="Port of Kafka message broker (default: 9092)")
parser.add_argument('--kafka_topic', type=str, default=argparse.SUPPRESS,
help="Topic for metrics (default: my_topic)")
parser.add_argument('--kafka_group', type=str, default=argparse.SUPPRESS,
help="Kafka consumer group (default: my_group)")
parser.add_argument('--influxdb_host', type=str, default=argparse.SUPPRESS,
help="InfluxDB hostname or IP (default: localhost)")
parser.add_argument('--influxdb_port', type=int, default=argparse.SUPPRESS,
help="InfluxDB API port (default: 8086)")
parser.add_argument('--influxdb_user', type=str, default=argparse.SUPPRESS,
help="InfluxDB username (default: root)")
parser.add_argument('--influxdb_password', type=str, default=argparse.SUPPRESS,
help="InfluxDB password (default: root)")
parser.add_argument('--influxdb_dbname', type=str, default=argparse.SUPPRESS,
help="InfluxDB database to write metrics into (default: metrics)")
parser.add_argument('--influxdb_use_ssl', default=argparse.SUPPRESS, action="store_true",
help="Use SSL connection for InfluxDB (default: False)")
parser.add_argument('--influxdb_verify_ssl', default=argparse.SUPPRESS, action="store_true",
help="Verify the SSL certificate before connecting (default: False)")
parser.add_argument('--influxdb_timeout', type=int, default=argparse.SUPPRESS,
help="Max number of seconds to establish a connection to InfluxDB (default: 5)")
parser.add_argument('--influxdb_use_udp', default=argparse.SUPPRESS, action="store_true",
help="Use UDP connection for InfluxDB (default: False)")
parser.add_argument('--influxdb_retention_policy', type=str, default=argparse.SUPPRESS,
help="Retention policy for incoming metrics (default: default)")
parser.add_argument('--influxdb_time_precision', type=str, default=argparse.SUPPRESS,
help="Precision of incoming metrics. Can be one of 's', 'm', 'ms', 'u' (default: s)")
parser.add_argument('--encoder', type=str, default=argparse.SUPPRESS,
help="Input encoder which converts an incoming message to dictionary "
"(default: collectd_graphite_encoder)")
parser.add_argument('--buffer_size', type=int, default=argparse.SUPPRESS,
help="Maximum number of messages that will be collected before flushing to the backend "
"(default: 1000)")
parser.add_argument('-c', '--configfile', type=str, default=argparse.SUPPRESS,
help="Configfile path (default: None)")
parser.add_argument('-s', '--statistics', default=argparse.SUPPRESS, action="store_true",
help="Show performance statistics (default: True)")
parser.add_argument('-b', '--benchmark', default=argparse.SUPPRESS, action="store_true",
help="Run benchmark (default: False)")
parser.add_argument('-v', '--verbose', action='count', default=argparse.SUPPRESS,
help="Set verbosity level. Increase verbosity by adding a v: -v -vv -vvv (default: 0)")
cli_args = parser.parse_args(args)
# Convert config from argparse Namespace to dict
return vars(cli_args)
|
Python
| 0.000144
|
@@ -1516,17 +1516,17 @@
items()
-+
+%7C
new_val
|
960816cb708c6bd6b1de8aa9c29758e327784f13
|
kill an io_loop check
|
pyfire/server.py
|
pyfire/server.py
|
from datetime import datetime
import errno
import fcntl
import os
import socket
import sys
import traceback
import threading
import xml.etree.ElementTree as ET
from zmq.eventloop import ioloop
from tornado import iostream
from pyfire import configuration as config
from pyfire.errors import XMPPProtocolError
from pyfire.logger import Logger
from pyfire.stream import processor
from pyfire.stream.stanzas import TagHandler
log = Logger(__name__)
class XMPPServer(object):
"""A non-blocking, single-threaded XMPP server."""
def __init__(self, io_loop=None):
self.io_loop = io_loop
self._sockets = {} # fd -> socket object
self._started = False
self._connections = {}
self.checker = ioloop.PeriodicCallback(
self.check_for_closed_connections, 30000)
def listen(self, port, address=""):
"""Binds to the given port and starts the server in a single process.
This method is a shortcut for:
server.bind(port, address)
server.start()
"""
self.bind(port, address)
self.start()
def bind(self, port, address=None, family=socket.AF_UNSPEC):
"""Binds this server to the given port on the given address.
To start the server, call start(). You can call listen() as
a shortcut to the sequence of bind() and start() calls.
Address may be either an IP address or hostname. If it's a hostname,
the server will listen on all IP addresses associated with the
name. Address may be an empty string or None to listen on all
available interfaces. Family may be set to either socket.AF_INET
or socket.AF_INET6 to restrict to ipv4 or ipv6 addresses, otherwise
both will be used if available.
This method may be called multiple times prior to start() to listen
on multiple ports or interfaces.
"""
if address == "":
address = None
for res in socket.getaddrinfo(address, port, family,
socket.SOCK_STREAM, 0,
socket.AI_PASSIVE | socket.AI_ADDRCONFIG):
af, socktype, proto, canonname, sockaddr = res
sock = socket.socket(af, socktype, proto)
flags = fcntl.fcntl(sock.fileno(), fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(sock.fileno(), fcntl.F_SETFD, flags)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if af == socket.AF_INET6:
# On linux, ipv6 sockets accept ipv4 too by default,
# but this makes it impossible to bind to both
# 0.0.0.0 in ipv4 and :: in ipv6. On other systems,
# separate sockets *must* be used to listen for both ipv4
# and ipv6. For consistency, always disable ipv4 on our
# ipv6 sockets and use a separate ipv4 socket when needed.
if hasattr(socket, "IPPROTO_IPV6"):
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
sock.setblocking(0)
sock.bind(sockaddr)
sock.listen(128)
log.info("Starting to listen on IP %s Port %s for connections" % sockaddr)
self._sockets[sock.fileno()] = sock
if self._started:
self.io_loop.add_handler(sock.fileno(), self._handle_events,
ioloop.IOLoop.READ)
def start(self):
"""Starts this server in the IOLoop."""
assert not self._started
if not self.io_loop:
self.io_loop = ioloop.IOLoop.instance()
for fd in self._sockets.keys():
self.io_loop.add_handler(fd, self._handle_events,
ioloop.IOLoop.READ)
def stop(self):
"""Stops listening for new connections.
Streams currently running may still continue after the
server is stopped.
"""
for fd, sock in self._sockets.iteritems():
self.io_loop.remove_handler(fd)
sock.close()
def _handle_events(self, fd, events):
while True:
try:
connection, address = self._sockets[fd].accept()
except socket.error, e:
if e.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
return
raise
try:
stream = iostream.IOStream(connection, io_loop=self.io_loop)
log.info("Starting new connection for client connection from %s:%s" % address)
self._connections[address] = XMPPConnection(stream, address)
if not self.checker._running:
self.checker.start()
except Exception, e:
exc_type, exc_value, exc_traceback = sys.exc_info()
log.error("Error in connection callback, %s" % str(e))
for line in traceback.format_tb(exc_traceback):
if line.find("\n") >= 0:
for subline in line.split("\n"):
log.error(subline)
else:
log.error(line.rstrip("\n"))
def check_for_closed_connections(self):
log.debug("checking for closed connections")
for address in self._connections.keys():
connection = self._connections[address]
if connection.closed():
log.debug("detected dead stream/connection: %s:%s" % connection.address)
del self._connections[address]
if len(self._connections) == 0:
log.debug("stopping checker")
self.checker.stop()
class XMPPConnection(object):
"""One XMPP connection initiated by class:`XMPPServer`"""
def __init__(self, stream, address):
self.stream = stream
self.address = address
self.connectiontime = self.last_seen = datetime.now()
self.taghandler = TagHandler(self)
self.parser = processor.StreamProcessor(
self.taghandler.streamhandler,
self.taghandler.contenthandler)
self.stream.read_bytes(1, self._read_char)
def _read_char(self, data):
"""Reads from client in byte mode"""
try:
if data == " ":
log.debug("Found whitespace keepalive")
self.stream.read_bytes(1, self._read_char)
else:
log.debug("Processing byte: %s" % data)
self.parser.feed(data)
self.stream.read_until(">", self._read_xml)
self.last_seen = datetime.now()
except IOError:
self.done()
def _read_xml(self, data):
"""Reads from client until closing tag for xml is found"""
try:
self.last_seen = datetime.now()
log.debug("Processing chunk: %s" % data)
self.parser.feed(data)
if self.parser.depth >= 2:
self.stream.read_until(">", self._read_xml)
else:
self.stream.read_bytes(1, self._read_char)
except IOError:
self.done()
def send_string(self, string):
"""Sends a string to client"""
try:
self.stream.write(string)
log.debug("Sent string to client:" + string)
except IOError:
pass
def send_element(self, element):
"""Serializes and send an ET Element"""
string = ET.tostring(element)
log.debug("Sending element to client:" + string)
self.stream.write(string)
def stop_connection(self):
"""Sends stream close, discards stream closed errors"""
try:
self.stream.write("</stream:stream>")
except IOError:
pass
def done(self):
"""Does cleanup work"""
self.stream.close()
def closed(self):
"""Checks if underlying stream is closed"""
return self.stream.closed()
|
Python
| 0.000002
|
@@ -594,16 +594,44 @@
io_loop
+ or ioloop.IOLoop.instance()
%0A
@@ -3637,89 +3637,8 @@
ted%0A
- if not self.io_loop:%0A self.io_loop = ioloop.IOLoop.instance()%0A
|
24d886b97e6ce1636e95f3c1bde7c889cf622a7c
|
Change string to byte conversion
|
pyglet/compat.py
|
pyglet/compat.py
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Compatibility tools
Various tools for simultaneous Python 2.x and Python 3.x support
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import sys
import itertools
if sys.version_info[0] == 2:
if sys.version_info[1] < 6:
#Pure Python implementation from
#http://docs.python.org/library/itertools.html#itertools.izip_longest
def izip_longest(*args, **kwds):
# izip_longest('ABCD', 'xy', fillvalue='-') --> Ax By C- D-
fillvalue = kwds.get('fillvalue')
def sentinel(counter = ([fillvalue]*(len(args)-1)).pop):
yield counter() # yields the fillvalue, or raises IndexError
fillers = itertools.repeat(fillvalue)
iters = [itertools.chain(it, sentinel(), fillers) for it in args]
try:
for tup in itertools.izip(*iters):
yield tup
except IndexError:
pass
else:
izip_longest = itertools.izip_longest
else:
izip_longest = itertools.zip_longest
if sys.version_info[0] >= 3:
import io
def asbytes(s):
if isinstance(s, bytes):
return s
return s.encode("utf-8")
def asstr(s):
if isinstance(s, str):
return s
return s.decode("utf-8")
bytes_type = bytes
BytesIO = io.BytesIO
else:
import StringIO
asbytes = str
asstr = str
bytes_type = str
BytesIO = StringIO.StringIO
|
Python
| 0.000358
|
@@ -2898,31 +2898,117 @@
-return s.encode(%22utf-8%22
+elif isinstance(s, str):%0A return bytes(ord(c) for c in s)%0A else:%0A return bytes(s
)%0A
|
0419012e15fc2f68d90fcaaefda4cee8ea3ef8df
|
Make cookie loading a little more pythonic.
|
pyicloud/base.py
|
pyicloud/base.py
|
import uuid
import hashlib
import json
import logging
import pickle
import requests
import sys
import tempfile
import os
from re import match
from pyicloud.exceptions import PyiCloudFailedLoginException
from pyicloud.services import (
FindMyiPhoneServiceManager,
CalendarService,
UbiquityService,
ContactsService
)
logger = logging.getLogger(__name__)
class PyiCloudService(object):
"""
A base authentication class for the iCloud service. Handles the
validation and authentication required to access iCloud services.
Usage:
from pyicloud import PyiCloudService
pyicloud = PyiCloudService('username@apple.com', 'password')
pyicloud.iphone.location()
"""
def __init__(self, apple_id, password):
self.discovery = None
self.client_id = str(uuid.uuid1()).upper()
self.user = {'apple_id': apple_id, 'password': password}
self._home_endpoint = 'https://www.icloud.com'
self._setup_endpoint = 'https://p12-setup.icloud.com/setup/ws/1'
self._push_endpoint = 'https://p12-pushws.icloud.com'
self._base_login_url = '%s/login' % self._setup_endpoint
self._base_validate_url = '%s/validate' % self._setup_endpoint
self._base_system_url = '%s/system/version.json' % self._home_endpoint
self._base_webauth_url = '%s/refreshWebAuth' % self._push_endpoint
self._cookie_directory = os.path.join(
tempfile.gettempdir(),
'pyicloud',
)
self.session = requests.Session()
self.session.verify = False
self.session.headers.update({
'host': 'setup.icloud.com',
'origin': self._home_endpoint,
'referer': '%s/' % self._home_endpoint,
'User-Agent': 'Opera/9.52 (X11; Linux i686; U; en)'
})
self.params = {}
self.authenticate()
def refresh_validate(self):
"""
Queries the /validate endpoint and fetches two key values we need:
1. "dsInfo" is a nested object which contains the "dsid" integer.
This object doesn't exist until *after* the login has taken place,
the first request will compain about a X-APPLE-WEBAUTH-TOKEN cookie
2. "instance" is an int which is used to build the "id" query string.
This is, pseudo: sha1(email + "instance") to uppercase.
"""
req = self.session.get(self._base_validate_url, params=self.params)
resp = req.json()
if 'dsInfo' in resp:
dsid = resp['dsInfo']['dsid']
self.params.update({'dsid': dsid})
instance = resp.get(
'instance',
uuid.uuid4().hex.encode('utf-8')
)
sha = hashlib.sha1(
self.user.get('apple_id').encode('utf-8') + instance
)
self.params.update({'id': sha.hexdigest().upper()})
clientId = str(uuid.uuid1()).upper()
self.params.update({
'clientBuildNumber': '14E45',
'clientId': clientId,
})
def authenticate(self):
"""
Handles the full authentication steps, validating,
authenticating and then validating again.
"""
self.refresh_validate()
# Check if cookies directory exists
if not os.path.exists(self._cookie_directory):
# If not, create it
os.mkdir(self._cookie_directory)
# Set path for cookie file
cookiefile = self.user.get('apple_id')
cookiefile = os.path.join(self._cookie_directory, ''.join([c for c in cookiefile if match(r'\w', c)]))
# Check if cookie file already exists
if os.path.isfile(cookiefile):
# Get cookie data from file
with open(cookiefile, 'rb') as f:
webKBCookie = pickle.load(f)
self.session.cookies = requests.utils.cookiejar_from_dict(webKBCookie)
else:
webKBCookie = None
data = dict(self.user)
data.update({'id': self.params['id'], 'extended_login': False})
req = self.session.post(
self._base_login_url,
params=self.params,
data=json.dumps(data)
)
if not req.ok:
msg = 'Invalid email/password combination.'
raise PyiCloudFailedLoginException(msg)
# Pull X-APPLE-WEB-KB cookie from cookies
NewWebKBCookie = next(({key:val} for key, val in req.cookies.items() if 'X-APPLE-WEB-KB' in key), None)
# Check if cookie changed
if NewWebKBCookie and NewWebKBCookie != webKBCookie:
# Save the cookie in a pickle file
with open(cookiefile, 'wb') as f:
pickle.dump(NewWebKBCookie, f)
self.refresh_validate()
self.discovery = req.json()
self.webservices = self.discovery['webservices']
@property
def devices(self):
""" Return all devices."""
service_root = self.webservices['findme']['url']
return FindMyiPhoneServiceManager(
service_root,
self.session,
self.params
)
@property
def iphone(self):
return self.devices[0]
@property
def files(self):
if not hasattr(self, '_files'):
service_root = self.webservices['ubiquity']['url']
self._files = UbiquityService(
service_root,
self.session,
self.params
)
return self._files
@property
def calendar(self):
service_root = self.webservices['calendar']['url']
return CalendarService(service_root, self.session, self.params)
@property
def contacts(self):
service_root = self.webservices['contacts']['url']
return ContactsService(service_root, self.session, self.params)
def __unicode__(self):
return 'iCloud API: %s' % self.user.get('apple_id')
def __str__(self):
as_unicode = self.__unicode__()
if sys.version_info[0] >= 3:
return as_unicode
else:
return as_unicode.encode('ascii', 'ignore')
def __repr__(self):
return '<%s>' % str(self)
|
Python
| 0
|
@@ -3479,55 +3479,8 @@
e =
-self.user.get('apple_id')%0A cookiefile =
os.p
@@ -3488,16 +3488,29 @@
th.join(
+%0A
self._co
@@ -3524,16 +3524,28 @@
rectory,
+%0A
''.join
@@ -3561,41 +3561,92 @@
in
-cookiefile if match(r'%5Cw', c)%5D))%0A
+self.user.get('apple_id') if match(r'%5Cw', c)%5D)%0A )%0A%0A webKBCookie = None
%0A
@@ -3700,37 +3700,11 @@
-if os.path.isfile(cookiefile)
+try
:%0A
@@ -3906,65 +3906,314 @@
ict(
-webKBCookie)%0A else:%0A webKBCookie = None
+%0A webKBCookie%0A )%0A except IOError:%0A # This just means that the file doesn't exist; that's OK!%0A pass%0A except Exception as e:%0A logger.exception(%0A %22Unexpected error occurred while loading cookies: %25s%22 %25 (e, )%0A )
%0A%0A
|
adb0922e8f6b7f69b047c62407429ec33e025320
|
Delete additional "o" in "builder" object
|
litex_boards/targets/digilent_basys3.py
|
litex_boards/targets/digilent_basys3.py
|
#!/usr/bin/env python3
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2020-2021 Xuanyu Hu <xuanyu.hu@whu.edu.cn>
# SPDX-License-Identifier: BSD-2-Clause
from migen import *
from litex_boards.platforms import basys3
from litex.soc.cores.clock import *
from litex.soc.integration.soc import SoCRegion
from litex.soc.integration.soc_core import *
from litex.soc.integration.builder import *
from litex.soc.cores.video import VideoVGAPHY
from litex.soc.cores.led import LedChaser
# CRG ----------------------------------------------------------------------------------------------
class _CRG(Module):
def __init__(self, platform, sys_clk_freq):
self.rst = Signal()
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_vga = ClockDomain()
self.submodules.pll = pll = S7MMCM(speedgrade=-1)
self.comb += pll.reset.eq(platform.request("user_btnc") | self.rst)
pll.register_clkin(platform.request("clk100"), 100e6)
pll.create_clkout(self.cd_sys, sys_clk_freq)
pll.create_clkout(self.cd_vga, 40e6)
platform.add_false_path_constraints(self.cd_sys.clk, pll.clkin) # Ignore sys_clk to pll.clkin path created by SoC's rst.
#platform.add_platform_command("set_property CLOCK_DEDICATED_ROUTE FALSE [get_nets clk100_IBUF]")
# BaseSoC ------------------------------------------------------------------------------------------
class BaseSoC(SoCCore):
def __init__(self, sys_clk_freq=int(75e6), with_led_chaser=True, with_video_terminal=False, **kwargs):
platform = basys3.Platform()
# CRG --------------------------------------------------------------------------------------
self.submodules.crg = _CRG(platform, sys_clk_freq)
# SoCCore ----------------------------------_-----------------------------------------------
SoCCore.__init__(self, platform, sys_clk_freq, ident="LiteX SoC on Basys3", **kwargs)
# Video ------------------------------------------------------------------------------------
if with_video_terminal:
self.submodules.videophy = VideoVGAPHY(platform.request("vga"), clock_domain="vga")
if with_video_terminal:
self.add_video_terminal(phy=self.videophy, timings="800x600@60Hz", clock_domain="vga")
# Leds -------------------------------------------------------------------------------------
if with_led_chaser:
self.submodules.leds = LedChaser(
pads = platform.request_all("user_led"),
sys_clk_freq = sys_clk_freq)
# Build --------------------------------------------------------------------------------------------
def main():
from litex.soc.integration.soc import LiteXSoCArgumentParser
parser = LiteXSoCArgumentParser(description="LiteX SoC on Basys3")
target_group = parser.add_argument_group(title="Target options")
target_group.add_argument("--build", action="store_true", help="Build bitstream.")
target_group.add_argument("--load", action="store_true", help="Load bitstream.")
target_group.add_argument("--sys-clk-freq", default=75e6, help="System clock frequency.")
sdopts = target_group.add_mutually_exclusive_group()
sdopts.add_argument("--with-spi-sdcard", action="store_true", help="Enable SPI-mode SDCard support.")
sdopts.add_argument("--with-sdcard", action="store_true", help="Enable SDCard support.")
target_group.add_argument("--sdcard-adapter", type=str, help="SDCard PMOD adapter (digilent or numato).")
viopts = target_group.add_mutually_exclusive_group()
viopts.add_argument("--with-video-terminal", action="store_true", help="Enable Video Terminal (VGA).")
builder_args(parser)
soc_core_args(parser)
args = parser.parse_args()
soc = BaseSoC(
sys_clk_freq = int(float(args.sys_clk_freq)),
with_video_terminal = args.with_video_terminal,
**soc_core_argdict(args)
)
soc.platform.add_extension(basys3._sdcard_pmod_io)
if args.with_spi_sdcard:
soc.add_spi_sdcard()
if args.with_sdcard:
soc.add_sdcard()
if args.with_spi_sdcard:
soc.add_spi_sdcard()
if args.with_sdcard:
soc.add_sdcard()
builder = Builder(soc, **builder_argdict(args))
builder.build(run=args.build)
if args.load:
prog = soc.platform.create_programmer()
prog.load_bitstream(obuilder.get_bitstream_filename(mode="sram"))
if __name__ == "__main__":
main()
|
Python
| 0
|
@@ -4512,17 +4512,16 @@
tstream(
-o
builder.
|
be3f26c6f3401290e1bee726f0977cab78bdd61c
|
Allow unset viewport in lg_earth::client
|
lg_earth/src/lg_earth/client.py
|
lg_earth/src/lg_earth/client.py
|
import os
import shutil
import threading
import xml.etree.ElementTree as ET
from xml.dom import minidom
from tempfile import gettempdir as systmp
import rospy
from lg_common.msg import ApplicationState, WindowGeometry
from lg_common import ManagedApplication, ManagedWindow
from client_config import ClientConfig
TOOLBAR_HEIGHT = 22
class Client:
def __init__(self):
geometry = ManagedWindow.get_viewport_geometry()
geometry.y -= TOOLBAR_HEIGHT
geometry.height += TOOLBAR_HEIGHT
earth_window = ManagedWindow(
geometry=geometry,
w_class='Googleearth-bin',
w_name='Google Earth',
w_instance=self._get_instance()
)
cmd = ['/opt/google/earth/free/googleearth-bin']
args, geplus_config, layers_config, kml_content, view_content = \
self._get_config()
cmd.extend(args)
self.earth_proc = ManagedApplication(cmd, window=earth_window)
self._make_dir()
os.mkdir(self._get_dir() + '/.googleearth')
os.mkdir(self._get_dir() + '/.googleearth/Cache')
if rospy.get_param('~show_google_logo', True):
pass
else:
self._touch_file((self._get_dir() + '/.googleearth/' + 'localdbrootproto'))
os.mkdir(self._get_dir() + '/.config')
os.mkdir(self._get_dir() + '/.config/Google')
self._render_config(geplus_config,
'.config/Google/GoogleEarthPlus.conf')
self._render_config(layers_config,
'.config/Google/GECommonSettings.conf')
self._render_file(kml_content,
'.googleearth/myplaces.kml')
self._render_file(view_content,
'.googleearth/cached_default_view.kml')
os.environ['HOME'] = self._get_dir()
os.environ['BROWSER'] = '/dev/null'
if os.getenv('DISPLAY') is None:
os.environ['DISPLAY'] = ':0'
os.environ['LD_LIBRARY_PATH'] += ':/opt/google/earth/free'
def _touch_file(self, fname):
if os.path.exists(fname):
os.utime(fname, None)
else:
open(fname, 'a').close()
def _get_instance(self):
return '_earth_instance_' + rospy.get_name().strip('/')
def _get_dir(self):
return os.path.normpath(systmp() + '/' + self._get_instance())
def _make_dir(self):
self._clean_dir()
os.mkdir(self._get_dir())
assert os.path.exists(self._get_dir())
rospy.on_shutdown(self._clean_dir)
def _clean_dir(self):
try:
shutil.rmtree(self._get_dir())
except OSError:
pass
def _get_config(self):
config = ClientConfig(self._get_dir(), self._get_instance())
return config.get_config()
def _render_file(self, content, path):
with open(self._get_dir() + '/' + path, 'w') as f:
f.write(content)
def _render_config(self, config, path):
with open(self._get_dir() + '/' + path, 'w') as f:
for section, settings in config.iteritems():
f.write('[' + section + ']\n')
for k, v in settings.iteritems():
r = str(v).lower() if isinstance(v, bool) else str(v)
f.write(k + '=' + r + '\n')
f.write('\n')
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
Python
| 0.000001
|
@@ -425,16 +425,53 @@
metry()%0A
+ if geometry is not None:%0A
@@ -499,16 +499,20 @@
_HEIGHT%0A
+
|
cf644a17bd8c2abe436a37159bdf3eec7d2a358d
|
Remove premature optimization
|
luigi/tasks/quickgo/load_annotations.py
|
luigi/tasks/quickgo/load_annotations.py
|
# -*- coding: utf-8 -*-
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from tasks.utils.pgloader import PGLoader
from tasks.ontologies import Ontologies
from .quickgo_data import QuickGoData
CONTROL_FILE = """
LOAD CSV
FROM '{filename}'
WITH ENCODING ISO-8859-14
HAVING FIELDS ({fields})
INTO {db_url}
TARGET COLUMNS ({columns})
SET
search_path = '{search_path}'
WITH
fields escaped by double-quote,
fields terminated by ','
BEFORE LOAD DO
$$
create table if not exists {load_table} (
rna_id varchar(50),
qualifier varchar(30),
assigned_by varchar(50),
extensions jsonb,
ontology_term_id varchar(15),
evidence_code varchar(15)
);
$$,
$$
truncate table {load_table};
$$
AFTER LOAD DO
$$
INSERT INTO {final_table} (
rna_id,
qualifier,
assigned_by,
extensions,
ontology_term_id,
evidence_code
) (
SELECT
rna_id,
qualifier,
assigned_by,
extensions,
ontology_term_id,
evidence_code
FROM {load_table}
)
ON CONFLICT (rna_id, qualifier, assigned_by, ontology_term_id, evidence_code)
DO UPDATE
SET
extensions = excluded.extensions
;
$$,
$$
DROP TABLE {load_table};
$$
;
"""
class QuickGoLoadAnnotations(PGLoader):
def requires(self):
return [
QuickGoData(),
Ontologies(),
]
def control_file(self):
output = self.requires()[0].output()
table = 'go_term_annotations'
load_table = 'load_' + table
fields = ', '.join(output.annotations.headers)
return CONTROL_FILE.format(
filename=output.annotations.filename,
fields=fields,
columns=fields,
final_table=table,
load_table=load_table,
db_url=self.db_url(table=load_table),
search_path=self.db_search_path(),
)
|
Python
| 0.000015
|
@@ -1625,40 +1625,240 @@
-extensions = excluded.extensions
+rna_id = excluded.rna_id,%0A qualifier = excluded.qualifier,%0A assigned_by = excluded.assigned_by,%0A extensions = excluded.extensions,%0A ontology_term_id = excluded.ontology_term_id,%0A evidence_code = excluded.evidence_code
%0A;%0A$
|
625839a284085e92855b52aaa4abbf0f30d66bb2
|
pretty_output().header
|
qaamus/qaamus.py
|
qaamus/qaamus.py
|
import unittest
import requests
from bs4 import BeautifulSoup
from ind_ara_parser import IndAraParser
class Qaamus:
def terjemah(self, layanan, query):
"""
Return terjemahan tergantung dengan **layanan** apa yang dia pilih,
dan **query** apa yang dia pakai.
Adapun *layanan* di [Qaamus](qaamus.com) saat ini terdapat 3 layanan:
* Indonesia Arab
* Angka
* Terjemah nama
Sedangkan *query* adalah query pencarian anda"""
if layanan == "idar":
url = self.build_idar_url(query)
soup = self._make_soup(url)
parser = IndAraParser(soup)
return {"utama": parser.get_arti_master(),
"berhubungan": parser.get_all_arti_berhub(self._make_soup)}
def _make_soup(self, url):
"""Return BeautifulSoup object."""
resp = requests.get(url)
return BeautifulSoup(resp.content)
def build_idar_url(self, query):
"""Return url pencarian sesuai dengan *query* yang dimasukkan."""
query = "+".join(query.split(" "))
url = "http://qaamus.com/indonesia-arab/" + query + "/1"
return url
class QaamusTest(unittest.TestCase):
def test_building_idar_url(self):
q = Qaamus()
expected_url = "http://qaamus.com/indonesia-arab/capai/1"
this_url = q.build_idar_url("capai")
self.assertEqual(this_url, expected_url)
def test_building_idar_url_with_multiple_words(self):
q = Qaamus()
expected_url = "http://qaamus.com/indonesia-arab/mobil+ambulan+bagus/1"
this_url = q.build_idar_url("mobil ambulan bagus")
self.assertEqual(this_url, expected_url)
def idar(query):
return Qaamus().terjemah("idar", query)
if __name__ == "__main__":
print(idar("memukul"))
unittest.main()
|
Python
| 0.999995
|
@@ -97,16 +97,954 @@
arser%0A%0A%0A
+class pretty_output(object):%0A def __init__(self, dict_obj):%0A self.dict_obj = dict_obj%0A%0A @property%0A def header(self):%0A return %22-= Arti dari %7Bind_utama%7D =-%22.format(%0A ind_utama=self.dict_obj.get(%22utama%22).get(%22ind%22))%0A%0A%0Aclass PrettyOutputTestCase(unittest.TestCase):%0A def setUp(self):%0A self.dict_ = %7B'utama': %7B%22ind%22: %22ind_utama%22,%0A %22ara%22: %22ara_utama%22,%0A %22footer%22: %22footer%22%7D,%0A 'berhubungan': %5B%0A %7B%22ind%22: %22ind_pertama%22,%0A %22ara%22: %22ara_pertama%22%7D,%0A %7B%22ind%22: %22ind_kedua%22,%0A %22ara%22: %22ara_kedua%22%7D%0A %5D%0A %7D%0A%0A def test_pretty_output_header(self):%0A po = pretty_output(self.dict_).header%0A expected = %22-= Arti dari ind_utama =-%22%0A self.assertEqual(po, expected)%0A%0A%0A
class Qa
@@ -2080,24 +2080,55 @@
rab/
-%22 + query + %22/1%22
+%7Bquery%7D/1%22.format(%0A query=query)
%0A
|
4a246381f04b13ed71e098bbeb019b6d1e19dc32
|
Update about.py
|
quantum/about.py
|
quantum/about.py
|
# -*- coding: utf-8 -*-
# This file is part of Quantum.
#
# Copyright (c) 2017, Diego Nicolás Bernal-García
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
#
# The functions in this module were inspired by:
# QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
"""
Command line output of information on Quantum and dependencies.
"""
__all__ = ['about']
import sys
import os
import platform
import numpy
import scipy
import inspect
from qutip.utilities import _blas_info
import qutip.settings
from qutip.hardware_info import hardware_info
from quantum.version import __version__
def about():
"""
About box for Quantum. Gives version numbers for
Quantum, QuTiP, NumPy, SciPy, Cython, and MatPlotLib.
"""
print("")
print("Quantum: Quantum dynamics solver")
print("Copyright (c) 2017 and later.")
print(("D. N. Bernal-Garcia"))
print("")
print("Quantum Version: %s" % __version__)
print("QuTiP Version: %s" % qutip.__version__)
print("Numpy Version: %s" % numpy.__version__)
print("Scipy Version: %s" % scipy.__version__)
try:
import Cython
cython_ver = Cython.__version__
except:
cython_ver = 'None'
print("Cython Version: %s" % cython_ver)
try:
import matplotlib
matplotlib_ver = matplotlib.__version__
except:
matplotlib_ver = 'None'
print("Matplotlib Version: %s" % matplotlib_ver)
print("Python Version: %d.%d.%d" % sys.version_info[0:3])
print("Number of CPUs: %s" % hardware_info()['cpus'])
print("BLAS Info: %s" % _blas_info())
print("OPENMP Installed: %s" % str(qutip.settings.has_openmp))
print("INTEL MKL Ext: %s" % str(qutip.settings.has_mkl))
print("Platform Info: %s (%s)" % (platform.system(),
platform.machine()))
#quantum_install_path = os.path.dirname(inspect.getsourcefile(quantum))
#print("Installation path: %s" % quantum_install_path)
print("")
if __name__ == "__main__":
about()
|
Python
| 0
|
@@ -3988,17 +3988,17 @@
nal-Garc
-i
+%C3%AD
a%22))%0A
|
23ab13f192b58f8b550aa2e16d5861e14535698a
|
Add slot fot empty_patch in cli pop command
|
quilt/cli/pop.py
|
quilt/cli/pop.py
|
# vim: fileencoding=utf-8 et sw=4 ts=4 tw=80:
# python-quilt - A Python implementation of the quilt patch system
#
# Copyright (C) 2012 Björn Ricks <bjoern.ricks@googlemail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
import os
from quilt.cli.meta import Command
from quilt.pop import Pop
class PopCommand(Command):
usage = "%prog pop [-a] [patch]"
name = "pop"
def add_args(self, parser):
parser.add_option("-a", "--all", help="remove all applied patches",
action="store_true")
def run(self, options, args):
pop = Pop(os.getcwd(), self.get_pc_dir())
pop.unapplying.connect(self.unapplying)
pop.unapplied.connect(self.unapplied)
if options.all:
pop.unapply_all()
elif not args:
pop.unapply_top_patch()
else:
pop.unapply_patch(args[0])
def unapplying(self, patch):
print "Removing patch %s" % patch.get_name()
def unapplied(self, patch):
if not patch:
print "No patches applied"
else:
print "Now at patch %s" % patch.get_name()
|
Python
| 0
|
@@ -1389,16 +1389,66 @@
applied)
+%0A pop.empty_patch.connect(self.empty_patch)
%0A%0A
@@ -1837,28 +1837,137 @@
atch %25s%22 %25 patch.get_name()%0A
+%0A def empty_patch(self, patch):%0A print %22Patch %25s appears to be empty, removing%22 %25 patch.get_name()%0A
|
8058430bc32b01d7026f0a80c40a83181adcb90f
|
handle utf-8 encodings
|
EsFormatter.py
|
EsFormatter.py
|
import sublime, sublime_plugin, subprocess, threading, json, re, platform, sys, os
ON_WINDOWS = platform.system() is 'Windows'
ST2 = sys.version_info < (3, 0)
NODE = None
if not ON_WINDOWS:
# Extend Path to catch Node installed via HomeBrew
os.environ['PATH'] += ':/usr/local/bin'
class EsformatterCommand(sublime_plugin.TextCommand):
def run(self, edit):
if (NODE.mightWork() == False):
return
# Settings for formatting
settings = sublime.load_settings("EsFormatter.sublime-settings")
format_options = json.dumps(settings.get("format_options"))
if (len(self.view.sel()) == 1 and self.view.sel()[0].empty()):
# Only one caret and no text selected, format the whole file
textContent = self.view.substr(sublime.Region(0, self.view.size()))
thread = NodeCall(textContent, format_options)
thread.start()
self.handle_thread(thread, lambda: self.replaceFile(thread))
else:
# Format each and every selection block
threads = []
for selection in self.view.sel():
# Take everything from the beginning to the end of line
region = self.view.line(selection)
textContent = self.view.substr(region)
thread = NodeCall(textContent, format_options, len(threads), region)
threads.append(thread)
thread.start()
self.handle_threads(threads, lambda process, lastError: self.handleSyntaxErrors(process, lastError, format_options))
def replaceFile(self, thread):
'''Replace the entire file content with the formatted text.'''
self.view.run_command("esformat_update_content", {"text": thread.result})
sublime.status_message("File formatted")
def handleSyntaxErrors(self, threads=None, lastError=None, format_options=''):
'''When formatting whole lines there might be a syntax error because we select
the whole line content. In that case, fall-back to the user selection.'''
if (lastError is None and threads is not None):
self.replaceSelections(threads, None)
else:
# Format each and every selection block
threads = []
for selection in self.view.sel():
# Take only the user selection
textContent = self.view.substr(selection)
thread = NodeCall(textContent, format_options, len(threads), selection)
threads.append(thread)
thread.start()
self.handle_threads(threads, lambda process, lastError: self.replaceSelections(process, lastError))
def replaceSelections(self, threads, lastError):
'''Replace the content of a list of selections.
This is called when there are multiple cursors or a selection of text'''
if (lastError):
sublime.error_message("Error (2):" + lastError)
else:
# Modify the selections from top to bottom to account for different text length
offset = 0
regions = []
for thread in sorted(threads, key=lambda t: t.region.begin()):
if offset:
region = [thread.region.begin() + offset, thread.region.end() + offset, thread.result]
else:
region = [thread.region.begin(), thread.region.end(), thread.result]
offset += len(thread.result) - len(thread.code)
regions.append(region)
self.view.run_command("esformat_update_content", {"regions": regions})
def handle_thread(self, thread, callback):
if thread.is_alive():
sublime.set_timeout(lambda: self.handle_thread(thread, callback), 100)
elif thread.result is not False:
callback()
else:
sublime.error_message("Error (1):" + thread.error)
def handle_threads(self, threads, callback, process=False, lastError=None):
next_threads = []
if process is False:
process = []
for thread in threads:
if thread.is_alive():
next_threads.append(thread)
continue
if thread.result is False:
# This thread failed
lastError = thread.error
continue
# Thread completed correctly
process.append(thread)
if len(next_threads):
# Some more threads to wait
sublime.set_timeout(lambda: self.handle_threads(next_threads, callback, process, lastError), 100)
else:
callback(process, lastError)
class NodeCall(threading.Thread):
def __init__(self, code, options, id=0, region=None):
self.code = code
self.region = region
exec_path = os.path.join(sublime.packages_path(), "EsFormatter", "lib", "esformatter.js")
self.cmd = getNodeCommand(exec_path, options)
self.result = None
threading.Thread.__init__(self)
def run(self):
try:
process = subprocess.Popen(self.cmd, bufsize=160*len(self.code), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, startupinfo=getStartupInfo())
if ST2:
stdout, stderr = process.communicate(self.code)
self.result = re.sub(r'(\r|\r\n|\n)\Z', '', stdout)
else:
stdout, stderr = process.communicate(self.code.encode())
self.result = re.sub(r'(\r|\r\n|\n)\Z', '', str(stdout, encoding='utf-8'))
if stderr:
self.result = False
self.error = str(stderr)
except Exception as e:
self.result = False
self.error = str(e)
def getStartupInfo():
if ON_WINDOWS:
info = subprocess.STARTUPINFO()
info.dwFlags |= subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = subprocess.SW_HIDE
return info
return None
def getNodeCommand(libPath, options=None):
if (options):
return ["node", libPath, options]
else:
return ["node", libPath]
class NodeCheck:
'''This class check whether node.js is installed and available in the path.
The check is done only once when mightWork() is called for the first time.
Being a tri-state class it's better not accessing it's properties but only call mightWork()'''
def __init__(self):
self.works = False
self.checkDone = False
def mightWork(self):
if (self.checkDone):
return self.works
# Run node version to know if it's in the path
try:
subprocess.Popen(getNodeCommand("--version"), bufsize=1, stdin=None, stdout=None, stderr=None, startupinfo=getStartupInfo())
self.works = True
except OSError as e:
sublime.error_message("It looks like node is not installed.\nPlease make sure that node.js is installed and in your PATH")
return self.works
NODE = NodeCheck()
class EsformatUpdateContent(sublime_plugin.TextCommand):
def run(self, edit, text=None, regions=None):
if text:
self.view.replace(edit, sublime.Region(0, self.view.size()), text)
else:
for region in regions:
self.view.replace(edit, sublime.Region(region[0], region[1]), region[2])
|
Python
| 0.000002
|
@@ -857,32 +857,48 @@
Call(textContent
+.encode('utf-8')
, format_options
@@ -1351,32 +1351,48 @@
Call(textContent
+.encode('utf-8')
, format_options
@@ -5445,16 +5445,32 @@
stdout)
+.decode('utf-8')
%0A
@@ -5546,17 +5546,8 @@
code
-.encode()
)%0A
|
2c386cc3e81caffd906b68a6d527bd8bdd1d5ae5
|
Replace nltk.model.NgramModel with nltk.ngrams
|
marmot/features/lm_feature_extractor.py
|
marmot/features/lm_feature_extractor.py
|
from nltk.model import NgramModel
from marmot.features.feature_extractor import FeatureExtractor
from marmot.util.simple_corpus import SimpleCorpus
def check_lm_recursive(words, lm, low_order='left'):
if len(words) < lm._n:
return check_lm_recursive(words, lm._backoff, low_order=low_order)
if tuple(words) in lm._ngrams:
return lm._n
elif lm._n > 1:
if low_order == 'left':
return check_lm_recursive(words[1:], lm._backoff, low_order=low_order)
elif low_order == 'right':
return check_lm_recursive(words[:-1], lm._backoff, low_order=low_order)
else:
return 0
# Class that extracts various LM features
# Calling an external LM is very slow, so a new lm is constructed with nltk
class LMFeatureExtractor(FeatureExtractor):
def __init__(self, corpus_file, order=3):
# load the corpus
corp = SimpleCorpus(corpus_file)
# nltk LM requires all words in one list
all_words = [w for sent in [line for line in corp.get_texts()] for w in sent]
self.lm = NgramModel(order, all_words)
def check_lm_recursive(words, lm, low_order='left'):
if len(words) < lm._n:
return check_lm_recursive(words, lm._backoff, low_order=low_order)
if tuple(words) in lm._ngrams:
return lm._n
elif lm._n > 1:
if low_order == 'left':
return check_lm_recursive(words[1:], lm._backoff, low_order=low_order)
elif low_order == 'right':
return check_lm_recursive(words[:-1], lm._backoff, low_order=low_order)
else:
return 0
# returns a set of features related to LM
# currently extracting: highest order ngram including the word and its LEFT context,
# highest order ngram including the word and its RIGHT context
def get_features(self, context_obj):
left_ngram = check_lm_recursive(context_obj['target'][max(0, context_obj['index']-self.lm._n):context_obj['index']], self.lm, low_order='left')
right_ngram = check_lm_recursive(context_obj['target'][context_obj['index']:min(len(context_obj['target']),context_obj['index']+self.lm._n)], self.lm, low_order='right')
return (left_ngram, right_ngram)
|
Python
| 0.999998
|
@@ -1,12 +1,52 @@
+from nltk import ngrams, word_tokenize%0A#
from nltk.mo
@@ -187,468 +187,8 @@
us%0A%0A
-def check_lm_recursive(words, lm, low_order='left'):%0A if len(words) %3C lm._n:%0A return check_lm_recursive(words, lm._backoff, low_order=low_order) %0A%0A if tuple(words) in lm._ngrams:%0A return lm._n%0A elif lm._n %3E 1:%0A if low_order == 'left': %0A return check_lm_recursive(words%5B1:%5D, lm._backoff, low_order=low_order)%0A elif low_order == 'right':%0A return check_lm_recursive(words%5B:-1%5D, lm._backoff, low_order=low_order)%0A else:%0A return 0%0A%0A
%0A# C
@@ -349,16 +349,18 @@
ctor):%0A%0A
+
def __
@@ -399,53 +399,110 @@
3):%0A
+%0A
-# load the corpus%0A corp = SimpleCorpus
+ self.order = order%0A self.lm = %5B set() for i in range(order) %5D%0A for line in open
(cor
@@ -514,275 +514,243 @@
ile)
+:
%0A
-# nltk LM requires all words in one list%0A all_words = %5Bw for sent in %5Bline for line in corp.get_texts()%5D for w in sent%5D%0A self.lm = NgramModel(order, all_words)%0A%0A%0A def check_lm_recursive(words, lm, low_order='left'):%0A if len(words) %3C lm._n:%0A
+ words = word_tokenize(line%5B:-1%5D.decode('utf-8'))%0A for i in range(1,order):%0A self.lm%5Bi%5D = self.lm%5Bi%5D.union( ngrams( words, i+1 ) )%0A self.lm%5B0%5D = self.lm%5B0%5D.union(words)%0A
+%0A
-return
+def
che
@@ -758,139 +758,85 @@
k_lm
-_recursive(words, lm._backoff, low_order=low_order)
+(self, ngram, side='left'):%0A
-%0A%0A
-if tuple(words) in lm._ngrams:%0A return lm._n%0A elif lm._n %3E 1:%0A
+for i in range(self.order, 0, -1):%0A
@@ -840,25 +840,20 @@
if
-low_or
+si
de
-r
== 'lef
@@ -855,18 +855,16 @@
'left':
-
%0A
@@ -868,99 +868,70 @@
-return check_lm_recursive(words%5B1:%5D, lm._backoff, low_order=low_order)%0A elif low_or
+ cur_ngram = ngram%5Blen(ngram)-i:%5D%0A elif si
de
-r
==
@@ -951,90 +951,114 @@
-return check_lm_recursive(words%5B:-1%5D, lm._backoff, low_order=low_order)%0A else:%0A
+ cur_ngram = ngram%5B:i%5D%0A if tuple(cur_ngram) in self.lm%5Bi-1%5D:%0A return i%0A
@@ -1288,16 +1288,18 @@
context%0A
+
def ge
@@ -1329,16 +1329,20 @@
t_obj):%0A
+
left
@@ -1346,32 +1346,37 @@
eft_ngram =
+self.
check_lm
_recursive(c
@@ -1359,35 +1359,26 @@
elf.check_lm
-_recursive
(
+
context_obj%5B
@@ -1391,47 +1391,8 @@
t'%5D%5B
-max(0, context_obj%5B'index'%5D-self.lm._n)
:con
@@ -1412,42 +1412,35 @@
ex'%5D
++1
%5D, s
-elf.lm, low_or
+i
de
-r
='left'
+
)%0A
+
+
righ
@@ -1449,24 +1449,29 @@
ngram =
+self.
check_lm
_recursi
@@ -1466,19 +1466,10 @@
k_lm
-_recursive
(
+
cont
@@ -1511,100 +1511,23 @@
x'%5D:
-min(len(context_obj%5B'target'%5D),context_obj%5B'index'%5D+self.lm._n)%5D, self.lm, low_or
+%5D, si
de
-r
='right'
)%0A
@@ -1522,18 +1522,23 @@
='right'
+
)%0A
+
retu
|
e7885d37503888f0b42007622ef1af31f7302ebe
|
fix typo
|
corehq/apps/export/const.py
|
corehq/apps/export/const.py
|
"""
Some of these constants correspond to constants set in corehq/apps/export/static/export/js/const.js
so if changing a value, ensure that both places reflect the change
"""
from couchexport.deid import (
deid_ID,
deid_date
)
from corehq.apps.export.transforms import (
case_id_to_case_name,
user_id_to_username,
owner_id_to_display,
workflow_transform,
doc_type_transform,
case_or_user_id_to_name
)
# When fixing a bug that requires existing schemas to be rebuilt,
# bump the version number.
FORM_DATA_SCHEMA_VERSION = 9
CASE_DATA_SCHEMA_VERSION = 8
SMS_DATA_SCHEMA_VERSION = 1
DEID_ID_TRANSFORM = "deid_id"
DEID_DATE_TRANSFORM = "deid_date"
DEID_TRANSFORM_FUNCTIONS = {
DEID_ID_TRANSFORM: deid_ID,
DEID_DATE_TRANSFORM: deid_date,
}
CASE_NAME_TRANSFORM = "case_name_transform"
USERNAME_TRANSFORM = "username_transform"
OWNER_ID_TRANSFORM = "owner_id_transform"
WORKFLOW_TRANSFORM = "workflow_transform"
DOC_TYPE_TRANSFORM = "doc_type_transform"
CASE_OR_USER_ID_TRANSFORM = "case_or_user_id_transform"
TRANSFORM_FUNCTIONS = {
CASE_NAME_TRANSFORM: case_id_to_case_name,
USERNAME_TRANSFORM: user_id_to_username,
OWNER_ID_TRANSFORM: owner_id_to_display,
WORKFLOW_TRANSFORM: workflow_transform,
DOC_TYPE_TRANSFORM: doc_type_transform,
CASE_OR_USER_ID_TRANSFORM: case_or_user_id_to_name,
}
PLAIN_USER_DEFINED_SPLIT_TYPE = 'plain'
MULTISELCT_USER_DEFINED_SPLIT_TYPE = 'multi-select'
USER_DEFINED_SPLIT_TYPES = [
PLAIN_USER_DEFINED_SPLIT_TYPE,
MULTISELCT_USER_DEFINED_SPLIT_TYPE,
]
PROPERTY_TAG_NONE = None
PROPERTY_TAG_INFO = 'info'
PROPERTY_TAG_CASE = 'case'
PROPERTY_TAG_UPDATE = 'update'
PROPERTY_TAG_SERVER = 'server'
PROPERTY_TAG_DELETED = 'deleted'
PROPERTY_TAG_ROW = 'row'
PROPERTY_TAG_APP = "app"
PROPERTY_TAG_STOCK = 'stock'
# Yeah... let's not hard code this list everywhere
# This list comes from casexml.apps.case.xml.parser.CaseActionBase.from_v2
KNOWN_CASE_PROPERTIES = ["type", "name", "external_id", "user_id", "owner_id", "opened_on"]
# Attributes found on a case block. <case case_id="..." date_modified="..." ...>
CASE_ATTRIBUTES = ['@case_id', '@date_modified', '@user_id']
# Elements that are found in a case create block
# <case>
# <create>
# <case_name>
# ...
# </create>
# </case>
CASE_CREATE_ELEMENTS = ['case_name', 'owner_id', 'case_type']
FORM_EXPORT = 'form'
CASE_EXPORT = 'case'
SMS_EXPORT = 'sms'
MAX_EXPORTABLE_ROWS = 100000
CASE_SCROLL_SIZE = 10000
# When a question is missing completely from a form/case this should be the value
MISSING_VALUE = '---'
# When a question has been answered, but is blank, this shoudl be the value
EMPTY_VALUE = ''
UNKNOWN_INFERRED_FROM = 'unknown'
SKIPPABLE_PROPERTIES = frozenset([
'initial_processing_complete',
'_rev',
'computed_modified_on_',
'server_modified_on',
'domain',
'form.#type',
'form.@uiVersion',
'openrosa_headers.HTTP_X_OPENROSA_VERSION',
'openrosa_headers.HTTP_ACCEPT_LANGUAGE',
'openrosa_headers.HTTP_DATE',
'problem',
'doc_type',
'path',
'version',
'date_header',
'migrating_blobs_from_couch',
'orig_id',
'edited_on',
'deprecated_date',
'deprecated_form_id',
'auth_context.authenticated',
'auth_context.doc_type',
'auth_context.domain',
'auth_context.user_id',
])
SAVED_EXPORTS_QUEUE = 'saved_exports_queue'
# The maximum file size of one DataFile
MAX_DATA_FILE_SIZE = 104857600 # 100 MB
# The total space allowance of a domain for DataFiles
MAX_DATA_FILE_SIZE_TOTAL = 2147483648 # 2 GB
|
Python
| 0.999991
|
@@ -2635,10 +2635,10 @@
shou
-d
l
+d
be
|
7a5abe865ca66c3cc13d830f3d967217aca6db68
|
Improve listener organization
|
microcosm_postgres/encryption/models.py
|
microcosm_postgres/encryption/models.py
|
"""
Encryption-related models.
"""
from typing import Sequence, Tuple
from sqlalchemy import Column, LargeBinary, String
from sqlalchemy.event import contains, listen, remove
from sqlalchemy.dialects.postgresql import ARRAY
from microcosm_postgres.encryption.encryptor import Encryptor
def on_init(target: "EncryptableMixin", args, kwargs):
"""
Intercept SQLAlchemy's instance init event.
SQLALchemy allows callback to intercept ORM instance init functions. The calling arguments
will be an empty instance of the `target` model, plus the arguments passed to `__init__`.
The `kwargs` dictionary is mutable (which is why it is not passed as `**kwargs`). We leverage
this callback to conditionally remove the `__plaintext__` value and set the `ciphertext` property.
"""
encryptor = target.__encryptor__
# encryption context may be nullable
try:
encryption_context_key = str(kwargs[target.__encryption_context_key__])
except KeyError:
return
# do not encrypt targets that are not configured for it
if encryption_context_key not in encryptor:
return
plaintext = kwargs.pop(target.__plaintext__)
ciphertext, key_ids = encryptor.encrypt(encryption_context_key, plaintext)
target.ciphertext = (ciphertext, key_ids)
def on_load(target: "EncryptableMixin", context):
"""
Intercept SQLAlchemy's instance load event.
"""
encryptor = target.__encryptor__
# encryption context may be nullable
if target.encryption_context_key is None:
return
encryption_context_key = str(target.encryption_context_key)
# do not decrypt targets that are not configured for it
if encryption_context_key not in encryptor:
return
ciphertext, key_ids = target.ciphertext
target.plaintext = encryptor.decrypt(encryption_context_key, ciphertext)
class EncryptableMixin:
"""
A (conditionally) encryptable model.
Using SQLAlchemy ORM events to intercept instance construction and loading to
enforce encryption (if appropriate for the `encryption_context_key`). Should be
combined with database constraints to enforce that the instance is *either* encrypted
or un-encrypted, but *not* both.
Must define:
- An `encryption_context_key` property (defaults to `self.key`)
- A settable, `plaintext` property (defaults to `self.value`)
- A settable, `ciphertext` property (not defaulted)
"""
__encryptor__ = None
__encryption_context_key__ = "key"
__plaintext__ = "value"
@property
def encryption_context_key(self) -> str:
return getattr(self, self.__encryption_context_key__)
@property
def plaintext(self) -> str:
return getattr(self, self.__plaintext__)
@plaintext.setter
def plaintext(self, value: str) -> None:
return setattr(self, self.__plaintext__, value)
@property
def ciphertext(self) -> Tuple[bytes, Sequence[str]]:
raise NotImplementedError("Encryptable must implement `ciphertext` property")
@ciphertext.setter
def ciphertext(self, value: Tuple[bytes, Sequence[str]]) -> None:
raise NotImplementedError("Encryptable must implement `ciphertext` property")
@classmethod
def register(cls, encryptor: Encryptor):
"""
Register this encryptable with an encryptor.
Instances of this encryptor will be encrypted on initialization and decrypted on load.
"""
# save the current encryptor statically
cls.__encryptor__ = encryptor
# remove any existing registrations for the same function
if contains(cls, "init", on_init):
remove(cls, "init", on_init)
if contains(cls, "load", on_load):
remove(cls, "load", on_load)
# register the above functions; it's quite important that these are not closures,
# hence the __encryptor__ hack
listen(cls, "init", on_init)
listen(cls, "load", on_load)
class EncryptedMixin:
"""
A mixin that in include ciphertext and an array of key ids.
"""
# save the encrypted data as unindexed binary
ciphertext = Column(LargeBinary, nullable=False)
# save the encryption key ids in an indexed column for future re-keying
key_ids = Column(ARRAY(String), nullable=False, index=True)
|
Python
| 0.000002
|
@@ -787,16 +787,17 @@
operty.%0A
+%0A
%22%22%22%0A
@@ -3570,63 +3570,199 @@
#
-remove any existing registrations for the same func
+NB: we cannot use the before_insert listener in conjunction with a foreign key relationship%0A # for encrypted data; SQLAlchemy will warn about using 'related attribute set' opera
tion
+ so
%0A
@@ -3770,271 +3770,661 @@
-if contains(cls, %22init%22, on_init):%0A remove(cls, %22init%22, on_init)%0A%0A if contains(cls, %22load%22, on_load):%0A remove(cls, %22load%22, on_load)%0A%0A # register the above functions; it's quite important that these are not closures,%0A # h
+# late in its insert/flush process.%0A listeners = dict(%0A init=on_init,%0A load=on_load,%0A )%0A%0A for name, func in listeners.items():%0A # If we initialize the graph multiple times (as in many unit testing scenarios),%0A # we will accumulate listener functions -- with unpredictable results. As protection,%0A # we need to remove existing listeners before adding new ones; this solution only%0A # works if the id (e.g. memory address) of the listener does not change, which means%0A # they cannot be closures around the %60encryptor%60 reference.%0A #%0A # H
ence
@@ -4428,16 +4428,17 @@
nce the
+%60
__encryp
@@ -4446,51 +4446,110 @@
or__
+%60
hack
-%0A listen(cls, %22init%22, on_init)%0A
+ above...%0A if contains(cls, name, func):%0A remove(cls, name, func)%0A
@@ -4568,23 +4568,18 @@
ls,
-%22load%22, on_load
+name, func
)%0A%0A%0A
|
5d393ff5c007bafb731aaf703a5225081b99f69a
|
Align the add/remove URL with the filter URL
|
cotracker/cotracker/urls.py
|
cotracker/cotracker/urls.py
|
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.contrib import admin
from checkouts.views import (
PilotList,
PilotDetail,
AirstripList,
AirstripDetail,
BaseList,
BaseAttachedDetail,
BaseUnattachedDetail,
FilterFormView,
CheckoutUpdateFormView,
)
admin.autodiscover()
urlpatterns = patterns('',
url(r'^emerald/', include(admin.site.urls)),
)
urlpatterns += patterns('',
url(
regex=r'^pilots/$',
view=PilotList.as_view(),
name='pilot_list',
),
url(
regex=r'^pilots/(?P<username>\w+)/$',
view=PilotDetail.as_view(),
name='pilot_detail',
),
url(
regex=r'^airstrips/$',
view=AirstripList.as_view(),
name='airstrip_list',
),
url(
regex=r'^airstrips/(?P<ident>\w+)/$',
view=AirstripDetail.as_view(),
name='airstrip_detail',
),
url(
regex=r'^bases/$',
view=BaseList.as_view(),
name='base_list',
),
url(
regex=r'^bases/(?P<ident>\w+)/attached/$',
view=BaseAttachedDetail.as_view(),
name='base_attached_detail',
),
url(
regex=r'^bases/(?P<ident>\w+)/unattached/$',
view=BaseUnattachedDetail.as_view(),
name='base_unattached_detail',
),
url(
regex=r'^checkouts/$',
view=FilterFormView.as_view(),
name='checkout_filter',
),
url(
regex=r'^update/$',
view=CheckoutUpdateFormView.as_view(),
name='checkout_update',
),
)
if settings.SERVE_STATIC:
urlpatterns += patterns('',
url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT,})
)
|
Python
| 0.00003
|
@@ -1453,22 +1453,30 @@
egex=r'%5E
-update
+checkouts/edit
/$',%0A%09vi
|
a40ae461472559e2b8740ff1be0b1205254520a1
|
Add a manager to centralize webhook API calls
|
shopify/webhooks/models.py
|
shopify/webhooks/models.py
|
from __future__ import unicode_literals
import logging
import uuid
from django.contrib.sites.models import Site
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
import requests
from .utils import shopify_api
logger = logging.getLogger(__name__)
@python_2_unicode_compatible
class Webhook(models.Model):
TOPIC_CHOICES = (
('orders/create', 'Order creation'),
('orders/delete', 'Order deletion'),
('orders/updated', 'Order update'),
('orders/paid', 'Order payment'),
('orders/cancelled', 'Order cancellation'),
('orders/fulfilled', 'Order fulfillment'),
('carts/create', 'Cart creation'),
('carts/update', 'Cart update'),
('checkouts/create', 'Checkout creation'),
('checkouts/update', 'Checkout update'),
('checkouts/delete', 'Checkout deletion'),
('refunds/create', 'Refund create'),
('products/create', 'Product creation'),
('products/update', 'Product update'),
('products/delete', 'Product deletion'),
('collections/create', 'Collection creation'),
('collections/update', 'Collection update'),
('collections/delete', 'Collection deletion'),
('customer_groups/create', 'Customer group creation'),
('customer_groups/update', 'Customer group update'),
('customer_groups/delete', 'Customer group deletion'),
('customers/create', 'Customer creation'),
('customers/enable', 'Customer enable'),
('customers/disable', 'Customer disable'),
('customers/update', 'Customer update'),
('customers/delete', 'Customer deletion'),
('fulfillments/create', 'Fulfillment creation'),
('fulfillments/update', 'Fulfillment update'),
('shop/update', 'Shop update'),
)
# Automatically generated GUID for the local webhook. This
# GUID is also used to construct a unique URL.
id = models.CharField(primary_key=True, default=uuid.uuid4,
max_length=36, editable=False)
# An accepted event that will trigger the webhook
topic = models.CharField(max_length=32, choices=TOPIC_CHOICES)
# A unique Shopify ID for the webhook
webhook_id = models.IntegerField(editable=False)
def __str__(self):
return self.path
def save(self, *args, **kwargs):
if not self.webhook_id:
self.create()
super(Webhook, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
if self.webhook_id:
self.remove()
super(Webhook, self).delete(*args, **kwargs)
@property
def path(self):
return "/%s/%s/" % (self.topic, self.id)
def get_absolute_url(self):
base = 'https://%s' % Site.objects.get_current().domain
return base + self.path
def create(self):
payload = {'webhook': {'topic': self.topic,
'address': self.get_absolute_url(),
'format': 'json'}}
try:
resp = requests.post(shopify_api('/admin/webhooks.json'),
json=payload)
resp.raise_for_status()
webhook_id = resp.json()['webhook']['id']
except requests.exceptions.RequestException:
logger.error("Webhook creation returned %s: %s" % (resp.status_code,
resp.text))
else:
self.webhook_id = webhook_id
def remove(self):
try:
resp = requests.delete(shopify_api('/admin/webhooks/%d.json' % self.webhook_id))
resp.raise_for_status()
except requests.exceptions.RequestException:
logger.error("Webhook removal returned %s: %s" % (resp.status_code,
resp.text))
|
Python
| 0.000001
|
@@ -288,16 +288,233 @@
me__)%0A%0A%0A
+class WebhookManager(models.Manager):%0A def register(self):%0A for webhook in self.all():%0A webhook.register()%0A%0A def remove(self):%0A for webhook in self.all():%0A webhook.remove()%0A%0A%0A
@python_
@@ -2049,16 +2049,48 @@
%0A )%0A%0A
+ objects = WebhookManager()%0A%0A
# Au
@@ -2670,22 +2670,24 @@
self.
-c
re
-a
+gis
te
+r
()%0A
@@ -3101,22 +3101,24 @@
def
-c
re
-a
+gis
te
+r
(self):%0A
|
220f4f7d2d5e94760576cddb607478ef7345a901
|
add xtheme plugin to render only products which have discount
|
shuup/discounts/plugins.py
|
shuup/discounts/plugins.py
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2019, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from django import forms
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from shuup.discounts.models import Discount
from shuup.front.template_helpers.general import get_listed_products
from shuup.xtheme import TemplatedPlugin
from shuup.xtheme.plugins.forms import GenericPluginForm, TranslatableField
class ProductSelectionConfigForm(GenericPluginForm):
"""
A configuration form for the DiscountedProductsPlugin
"""
def populate(self):
"""
A custom populate method to display product choices
"""
for field in self.plugin.fields:
if isinstance(field, tuple):
name, value = field
value.initial = self.plugin.config.get(name, value.initial)
self.fields[name] = value
discounts_qs = Discount.objects.filter(
Q(shops=self.request.shop, active=True),
Q(Q(product__isnull=False) | Q(category__isnull=False, exclude_selected_category=False))
)
self.fields["discounts"] = forms.ModelMultipleChoiceField(
queryset=discounts_qs,
label=_("Discounts"),
help_text=_(
"Select all discounts to render products from. Only active discounts that have "
"product or category linked are available."
),
required=True,
initial=self.plugin.config.get("discounts", None)
)
def clean(self):
"""
A custom clean method to transform selected discounts into a list of ids
"""
cleaned_data = super(ProductSelectionConfigForm, self).clean()
if cleaned_data.get("discounts"):
cleaned_data["discounts"] = [discount.pk for discount in cleaned_data["discounts"]]
return cleaned_data
class DiscountedProductsPlugin(TemplatedPlugin):
identifier = "discount_product"
name = _("Discounted Products")
template_name = "shuup/discounts/product_discount_plugin.jinja"
editor_form_class = ProductSelectionConfigForm
fields = [
("title", TranslatableField(label=_("Title"), required=False, initial="")),
("count", forms.IntegerField(label=_("Count"), min_value=1, initial=4)),
("orderable_only", forms.BooleanField(
label=_("Only show in-stock and orderable items"),
help_text=_(
"Warning: The final number of products can be lower than 'Count' "
"as it will filter out unorderable products from a set of 'Count' products."
),
initial=True, required=False
))
]
def get_context_data(self, context):
count = self.config.get("count", 4)
orderable_only = self.config.get("orderable_only", True)
discounts = self.config.get("discounts")
if discounts:
# make sure to have only available discounts
discounts = Discount.objects.available(shop=context["request"].shop).filter(pk__in=discounts)
extra_filters = Q(
Q(product_discounts__in=discounts) | Q(shop_products__categories__category_discounts__in=discounts)
)
products = get_listed_products(context, count, orderable_only=orderable_only, extra_filters=extra_filters)
else:
products = []
return {
"request": context["request"],
"title": self.get_translated_value("title"),
"products": products
}
|
Python
| 0
|
@@ -3078,16 +3078,38 @@
counts%22)
+%0A products = %5B%5D
%0A%0A
@@ -3570,48 +3570,8 @@
ers)
-%0A else:%0A products = %5B%5D
%0A%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.