commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
de2aab06efd9cc5673ad517d453e5f660ef6fcf7
|
Disable test_socket_ssl timeout test on Windows.
|
Lib/test/test_socket_ssl.py
|
Lib/test/test_socket_ssl.py
|
# Test just the SSL support in the socket module, in a moderately bogus way.
from test import test_support
import socket
# Optionally test SSL support. This requires the 'network' resource as given
# on the regrtest command line.
skip_expected = not (test_support.is_resource_enabled('network') and
hasattr(socket, "ssl"))
def test_basic():
test_support.requires('network')
import urllib
socket.RAND_status()
try:
socket.RAND_egd(1)
except TypeError:
pass
else:
print "didn't raise TypeError"
socket.RAND_add("this is a random string", 75.0)
f = urllib.urlopen('https://sf.net')
buf = f.read()
f.close()
def test_timeout():
test_support.requires('network')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(30.0)
# connect to service which issues an welcome banner (without need to write anything)
s.connect(("gmail.org", 995))
ss = socket.ssl(s)
# read part of return welcome banner twice,# read part of return welcome banner twice
ss.read(1)
ss.read(1)
s.close()
def test_rude_shutdown():
try:
import threading
except ImportError:
return
# Some random port to connect to.
PORT = 9934
listener_ready = threading.Event()
listener_gone = threading.Event()
# `listener` runs in a thread. It opens a socket listening on PORT, and
# sits in an accept() until the main thread connects. Then it rudely
# closes the socket, and sets Event `listener_gone` to let the main thread
# know the socket is gone.
def listener():
s = socket.socket()
s.bind(('', PORT))
s.listen(5)
listener_ready.set()
s.accept()
s = None # reclaim the socket object, which also closes it
listener_gone.set()
def connector():
listener_ready.wait()
s = socket.socket()
s.connect(('localhost', PORT))
listener_gone.wait()
try:
ssl_sock = socket.ssl(s)
except socket.sslerror:
pass
else:
raise test_support.TestFailed(
'connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
connector()
t.join()
def test_main():
if not hasattr(socket, "ssl"):
raise test_support.TestSkipped("socket module has no ssl support")
test_rude_shutdown()
test_basic()
test_timeout()
if __name__ == "__main__":
test_main()
|
Python
| 0
|
@@ -71,16 +71,27 @@
s way.%0A%0A
+import sys%0A
from tes
@@ -697,24 +697,67 @@
f.close()%0A%0A
+if not sys.platform.startswith('win'):%0A
def test_tim
@@ -760,24 +760,28 @@
_timeout():%0A
+
test_sup
@@ -806,24 +806,28 @@
work')%0A%0A
+
+
s = socket.s
@@ -872,16 +872,20 @@
AM)%0A
+
+
s.settim
@@ -895,16 +895,20 @@
t(30.0)%0A
+
# co
@@ -988,16 +988,20 @@
ything)%0A
+
s.co
@@ -1026,16 +1026,20 @@
, 995))%0A
+
ss =
@@ -1049,24 +1049,28 @@
cket.ssl(s)%0A
+
# read p
@@ -1147,16 +1147,20 @@
r twice%0A
+
ss.r
@@ -1166,24 +1166,28 @@
read(1)%0A
+
+
ss.read(1)%0A
@@ -1181,24 +1181,28 @@
ss.read(1)%0A
+
s.close(
|
abafd85eaa0179451ed8a26b4d2aed73de38b824
|
Replace QuantityResult class by namedtuple in cart
|
satchless/cart/models.py
|
satchless/cart/models.py
|
# -*- coding: utf-8 -*-
from decimal import Decimal
from django.conf import settings
from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ObjectDoesNotExist
import random
from ..item import ItemSet, ItemLine
from ..util.models import DeferredForeignKey
from . import signals
def get_default_currency():
return settings.SATCHLESS_DEFAULT_CURRENCY
class QuantityResult(object):
def __init__(self, cart_item, new_quantity, quantity_delta, reason=None):
self.cart_item = cart_item
self.new_quantity = new_quantity
self.quantity_delta = quantity_delta
self.reason = reason
class Cart(models.Model, ItemSet):
owner = models.ForeignKey(User, null=True, blank=True, related_name='+')
currency = models.CharField(_("currency"), max_length=3,
default=get_default_currency)
token = models.CharField(max_length=32, blank=True, default='')
class Meta:
abstract = True
def __repr__(self):
return 'Cart(owner=%r, currency=%r, token=%r)' % (
self.owner, self.currency, self.token)
def __iter__(self):
for i in self.get_all_items():
yield i
def save(self, *args, **kwargs):
if not self.token:
for i in xrange(100):
token = ''.join(
random.sample('0123456789abcdefghijklmnopqrstuvwxyz', 32))
if not type(self).objects.filter(token=token).exists():
self.token = token
break
return super(Cart, self).save(*args, **kwargs)
def add_item(self, variant, quantity, dry_run=False, **kwargs):
variant = variant.get_subtype_instance()
quantity = variant.product.quantize_quantity(quantity)
try:
item = self.get_item(variant=variant, **kwargs)
old_qty = item.quantity
except ObjectDoesNotExist:
item = None
old_qty = Decimal(0)
quantity += old_qty
result = []
reason = u""
signals.cart_quantity_change_check.send(sender=type(self),
instance=self,
variant=variant,
old_quantity=old_qty,
new_quantity=quantity,
result=result)
assert len(result) <= 1
if len(result) == 1:
quantity, reason = result[0]
if not dry_run:
if not quantity:
if item:
item.delete()
else:
if item:
item.quantity = quantity
item.save()
else:
item = self.items.create(variant=variant, quantity=quantity,
**kwargs)
signals.cart_content_changed.send(sender=type(self), instance=self)
return QuantityResult(item, quantity, quantity - old_qty, reason)
def get_default_currency(self, **kwargs):
return self.currency
def get_item(self, **kwargs):
return self.items.get(**kwargs)
def get_all_items(self):
return list(self.items.all())
def replace_item(self, variant, quantity, dry_run=False, **kwargs):
variant = variant.get_subtype_instance()
quantity = variant.product.quantize_quantity(quantity)
result = []
reason = u""
try:
item = self.get_item(variant=variant, **kwargs)
old_qty = item.quantity
except ObjectDoesNotExist:
item = None
old_qty = Decimal(0)
signals.cart_quantity_change_check.send(sender=type(self),
instance=self,
variant=variant,
old_quantity=old_qty,
new_quantity=quantity,
result=result,
**kwargs)
assert len(result) <= 1
if len(result) == 1:
quantity, reason = result[0]
if not dry_run:
if not quantity:
if item:
item.delete()
else:
if item:
item.quantity = quantity
item.save()
else:
item = self.items.create(variant=variant, quantity=quantity,
**kwargs)
signals.cart_content_changed.send(sender=type(self), instance=self)
return QuantityResult(item, quantity, quantity - old_qty, reason)
def get_quantity(self, variant, **kwargs):
try:
return self.get_item(variant=variant, **kwargs).quantity
except ObjectDoesNotExist:
return Decimal('0')
def is_empty(self):
return not self.items.exists()
class CartItem(models.Model, ItemLine):
cart = DeferredForeignKey('cart', related_name='items', editable=False)
variant = DeferredForeignKey('variant', related_name='+', editable=False)
quantity = models.DecimalField(_("quantity"), max_digits=10,
decimal_places=4)
class Meta:
abstract = True
unique_together = ('cart', 'variant')
def __unicode__(self):
return u"%s × %.10g" % (self.variant, self.quantity)
def get_price_per_item(self, **kwargs):
return self.variant.get_subtype_instance().get_price(**kwargs)
def get_quantity(self, **kwargs):
return self.quantity
def save(self, *args, **kwargs):
assert self.quantity > 0
return super(CartItem, self).save(*args, **kwargs)
|
Python
| 0.000283
|
@@ -45,16 +45,51 @@
Decimal%0A
+from collections import namedtuple%0A
from dja
@@ -494,22 +494,16 @@
RENCY%0A%0A%0A
-class
Quantity
@@ -512,42 +512,42 @@
sult
-(object):%0A%0A def __init__(self,
+ = namedtuple('QuantityResult', %5B'
cart
@@ -551,18 +551,20 @@
art_item
+'
,
+'
new_quan
@@ -571,189 +571,86 @@
tity
-, quantity_delta, reason=None):%0A self.cart_item = cart_item%0A self.new_quantity = new_quantity%0A self.quantity_delta = quantity_delta%0A self.reason = reason
+',%0A 'quantity_delta', 'reason'%5D)
%0A%0A%0Ac
|
32ab2353d7a7f64300445688b0bd583fbe1a13fb
|
Improve keystone.conf [endpoint_policy] documentation
|
keystone/conf/endpoint_policy.py
|
keystone/conf/endpoint_policy.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from keystone.conf import utils
enabled = cfg.BoolOpt(
'enabled',
default=True,
deprecated_for_removal=True,
deprecated_reason=utils.fmt("""
The option to enable the OS-ENDPOINT-POLICY extension has been deprecated in
the M release and will be removed in the O release. The OS-ENDPOINT-POLICY
extension will be enabled by default.
"""),
help=utils.fmt("""
Enable endpoint_policy functionality.
"""))
driver = cfg.StrOpt(
'driver',
default='sql',
help=utils.fmt("""
Entrypoint for the endpoint policy backend driver in the
keystone.endpoint_policy namespace.
"""))
GROUP_NAME = __name__.split('.')[-1]
ALL_OPTS = [
enabled,
driver,
]
def register_opts(conf):
conf.register_opts(ALL_OPTS, group=GROUP_NAME)
def list_opts():
return {GROUP_NAME: ALL_OPTS}
|
Python
| 0.000001
|
@@ -771,16 +771,20 @@
-POLICY
+API
extensio
@@ -808,12 +808,12 @@
ated
- in%0A
+%0Ain
the
@@ -883,16 +883,20 @@
-POLICY%0A
+API
extensio
@@ -965,25 +965,25 @@
ble endpoint
-_
+-
policy funct
@@ -990,16 +990,125 @@
ionality
+, which allows policies to be associated%0Awith either specific endpoints, or endpoints of a given service type
.%0A%22%22%22))%0A
@@ -1190,16 +1190,17 @@
%22%22%0AEntry
+
point fo
@@ -1225,16 +1225,8 @@
icy
-backend
driv
@@ -1234,17 +1234,18 @@
r in the
-%0A
+ %60
keystone
@@ -1260,17 +1260,18 @@
t_policy
-
+%60%0A
namespac
@@ -1272,16 +1272,142 @@
mespace.
+ Only a %60sql%60 driver is provided by keystone, so there is no reason%0Ato set this unless you are providing a custom entry point.
%0A%22%22%22))%0A%0A
|
46070ee78862c18563297ceef4892394d3c065b6
|
Fix EC2 script to write to /dev/xvdb
|
fedimg/services/ec2.py
|
fedimg/services/ec2.py
|
#!/bin/env python
# -*- coding: utf8 -*-
import os
import subprocess
from libcloud.compute.base import NodeImage
from libcloud.compute.deployment import MultiStepDeployment
from libcloud.compute.deployment import ScriptDeployment, SSHKeyDeployment
from libcloud.compute.providers import get_driver
from libcloud.compute.types import Provider, DeploymentException
import fedimg
import fedimg.messenger
class EC2ServiceException(Exception):
""" Custom exception for EC2Service. """
pass
class EC2Service(object):
""" A class for interacting with an EC2 connection. """
def __init__(self):
# Will be a list of dicts. Dicts will contain AMI info.
self.amis = list()
for line in fedimg.AWS_AMIS.split('\n'):
""" Each line in AWS_AMIS has pipe-delimited attributes at these indicies:
0: region (ex. eu-west-1)
1: OS (ex. Fedora)
2: version (ex. 20)
3: arch (i386 or x86_64)
4: ami name (ex. ami-68e3d32d) """
# strip line to avoid any newlines or spaces from sneaking in
attrs = line.strip().split('|')
info = {'region': attrs[0],
'prov': self._region_to_provider(attrs[0]),
'os': attrs[1],
'ver': attrs[2],
'arch': attrs[3],
'ami': attrs[4]}
self.amis.append(info)
def _region_to_provider(self, region):
""" Takes a region name (ex. 'eu-west-1') and returns
the appropriate libcloud provider value. """
providers = {'ap-northeast-1': Provider.EC2_AP_NORTHEAST,
'ap-southeast-1': Provider.EC2_AP_SOUTHEAST,
'ap-southeast-2': Provider.EC2_AP_SOUTHEAST2,
'eu-west-1': Provider.EC2_EU_WEST,
'sa-east-1': Provider.EC2_SA_EAST,
'us-east-1': Provider.EC2_US_EAST,
'us-west-1': Provider.EC2_US_WEST,
'us-west-2': Provider.EC2_US_WEST_OREGON}
return providers[region]
def upload(self, raw_url):
""" Takes a URL to a .raw.xz file and registers it as an AMI in each
EC2 region. """
ami = self.amis[0] # DEBUG (us east x86_64)
cls = get_driver(ami['prov'])
driver = cls(fedimg.AWS_ACCESS_ID, fedimg.AWS_SECRET_KEY)
# select the desired node attributes
sizes = driver.list_sizes()
size_id = 'm1.large'
# check to make sure we have access to that size node
size = [s for s in sizes if s.id == size_id][0]
image = NodeImage(id=ami['ami'], name=None, driver=driver)
# deploy node
name = 'fedimg AMI builder' # TODO: will add raw image title
# TODO: Make automatically-created /dev/sda be deleted on termination
mappings = [{'VirtualName': None,
'Ebs': {'VolumeSize': 12, # 12 GB should be enough
'VolumeType': 'standard',
'DeleteOnTermination': 'true'},
'DeviceName': '/dev/sdb'}]
# read in ssh key
with open(fedimg.AWS_PUBKEYPATH, 'rb') as f:
key_content = f.read()
# Add key to authorized keys for root user
step_1 = SSHKeyDeployment(key_content)
# Add script for deploymentA
script = "sudo curl {0} | xzcat > /dev/sdb".format(raw_url)
step_2 = ScriptDeployment(script)
# Create deployment object
msd = MultiStepDeployment([step_1, step_2])
# Fedmsg info
file_name = raw_url.split('/')[-1]
destination = 'EC2 ({region})'.format(region=ami['region'])
fedimg.messenger.message(file_name, destination,
'started')
try:
# Must be EBS-backed for AMI registration to work.
node = driver.deploy_node(name=name, image=image, size=size,
ssh_username='fedora',
ssh_alternate_usernames=['root'],
ssh_key=fedimg.AWS_KEYPATH,
deploy=msd,
ex_keyname=fedimg.AWS_KEYNAME,
ex_security_groups=['ssh'],
ex_ebs_optimized=True,
ex_blockdevicemappings=mappings)
fedimg.messenger.message(file_name, destination,
'completed')
except DeploymentException as e:
fedimg.messenger.message(file_name, destination,
'failed')
print "Problem deploying node: {}".format(e.value)
print "Terminating instance."
driver.destroy_node(e.node)
# register that volume as an AMI, possibly after snapshotting it
# emit a fedmsg, etc
|
Python
| 0
|
@@ -3387,17 +3387,91 @@
ployment
-A
+%0A # Device becomes /dev/xvdb on instance due to recent kernel change
%0A
@@ -3510,17 +3510,18 @@
%3E /dev/
-s
+xv
db%22.form
|
5dcef138bf9addd53a9abc80c5436ea948bb22d0
|
Fix loading scheduler from subfolder (#1319)
|
src/diffusers/schedulers/scheduling_utils_flax.py
|
src/diffusers/schedulers/scheduling_utils_flax.py
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import os
from dataclasses import dataclass
from typing import Any, Dict, Optional, Tuple, Union
import jax.numpy as jnp
from ..utils import _COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS, BaseOutput
SCHEDULER_CONFIG_NAME = "scheduler_config.json"
_FLAX_COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS = ["Flax" + c for c in _COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS]
@dataclass
class FlaxSchedulerOutput(BaseOutput):
"""
Base class for the scheduler's step function output.
Args:
prev_sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
"""
prev_sample: jnp.ndarray
class FlaxSchedulerMixin:
"""
Mixin containing common functions for the schedulers.
Class attributes:
- **_compatibles** (`List[str]`) -- A list of classes that are compatible with the parent class, so that
`from_config` can be used from a class different than the one used to save the config (should be overridden
by parent class).
"""
config_name = SCHEDULER_CONFIG_NAME
_compatibles = []
has_compatibles = True
@classmethod
def from_pretrained(
cls,
pretrained_model_name_or_path: Dict[str, Any] = None,
subfolder: Optional[str] = None,
return_unused_kwargs=False,
**kwargs,
):
r"""
Instantiate a Scheduler class from a pre-defined JSON-file.
Parameters:
pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):
Can be either:
- A string, the *model id* of a model repo on huggingface.co. Valid model ids should have an
organization name, like `google/ddpm-celebahq-256`.
- A path to a *directory* containing model weights saved using [`~SchedulerMixin.save_pretrained`],
e.g., `./my_model_directory/`.
subfolder (`str`, *optional*):
In case the relevant files are located inside a subfolder of the model repo (either remote in
huggingface.co or downloaded locally), you can specify the folder name here.
return_unused_kwargs (`bool`, *optional*, defaults to `False`):
Whether kwargs that are not consumed by the Python class should be returned or not.
cache_dir (`Union[str, os.PathLike]`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
resume_download (`bool`, *optional*, defaults to `False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
output_loading_info(`bool`, *optional*, defaults to `False`):
Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.
local_files_only(`bool`, *optional*, defaults to `False`):
Whether or not to only look at local files (i.e., do not try to download the model).
use_auth_token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `transformers-cli login` (stored in `~/.huggingface`).
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
<Tip>
It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated
models](https://huggingface.co/docs/hub/models-gated#gated-models).
</Tip>
<Tip>
Activate the special ["offline-mode"](https://huggingface.co/transformers/installation.html#offline-mode) to
use this method in a firewalled environment.
</Tip>
"""
config, kwargs = cls.load_config(
pretrained_model_name_or_path=pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
scheduler, unused_kwargs = cls.from_config(config, return_unused_kwargs=True, **kwargs)
if hasattr(scheduler, "create_state") and getattr(scheduler, "has_state", False):
state = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):
"""
Save a scheduler configuration object to the directory `save_directory`, so that it can be re-loaded using the
[`~FlaxSchedulerMixin.from_pretrained`] class method.
Args:
save_directory (`str` or `os.PathLike`):
Directory where the configuration JSON file will be saved (will be created if it does not exist).
"""
self.save_config(save_directory=save_directory, push_to_hub=push_to_hub, **kwargs)
@property
def compatibles(self):
"""
Returns all schedulers that are compatible with this scheduler
Returns:
`List[SchedulerMixin]`: List of compatible schedulers
"""
return self._get_compatibles()
@classmethod
def _get_compatibles(cls):
compatible_classes_str = list(set([cls.__name__] + cls._compatibles))
diffusers_library = importlib.import_module(__name__.split(".")[0])
compatible_classes = [
getattr(diffusers_library, c) for c in compatible_classes_str if hasattr(diffusers_library, c)
]
return compatible_classes
def broadcast_to_shape_from_left(x: jnp.ndarray, shape: Tuple[int]) -> jnp.ndarray:
assert len(shape) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(shape) - x.ndim)), shape)
|
Python
| 0
|
@@ -5517,16 +5517,61 @@
or_path,
+%0A subfolder=subfolder,%0A
return_
@@ -5581,32 +5581,44 @@
sed_kwargs=True,
+%0A
**kwargs%0A
@@ -5610,16 +5610,17 @@
**kwargs
+,
%0A
|
06cc18240422b042d87d05029f5a429be166cb59
|
Remove overeager trailing commas
|
tools/scenario-player/scenario_player/tasks/channels.py
|
tools/scenario-player/scenario_player/tasks/channels.py
|
from scenario_player.exceptions import ScenarioAssertionError
from .raiden_api import RaidenAPIActionTask
class OpenChannelTask(RaidenAPIActionTask):
_name = 'open_channel'
_url_template = '{protocol}://{target_host}/api/1/channels'
_method = 'put'
@property
def _request_params(self):
if isinstance(self._config['to'], str) and len(self._config['to']) == 42:
partner_address = self._config['to']
else:
partner_address = self._runner.get_node_address(self._config['to'])
params = dict(
token_address=self._runner.token_address,
partner_address=partner_address,
)
total_deposit = self._config.get('total_deposit')
if total_deposit is not None:
params['total_deposit'] = total_deposit
return params
class ChannelActionTask(RaidenAPIActionTask):
_url_template = '{protocol}://{target_host}/api/1/channels/{token_address}/{partner_address}'
_method = 'patch'
@property
def _url_params(self):
if isinstance(self._config['to'], str) and len(self._config['to']) == 42:
partner_address = self._config['to']
else:
partner_address = self._runner.get_node_address(self._config['to'])
return dict(
token_address=self._runner.token_address,
partner_address=partner_address,
)
class CloseChannelTask(ChannelActionTask):
_name = 'close_channel'
@property
def _request_params(self):
return dict(state='closed')
class DepositTask(ChannelActionTask):
_name = 'deposit'
@property
def _request_params(self):
return dict(total_deposit=self._config['total_deposit'])
class TransferTask(ChannelActionTask):
_name = 'transfer'
_url_template = '{protocol}://{target_host}/api/1/payments/{token_address}/{partner_address}'
_method = 'post'
@property
def _request_params(self):
return dict(amount=self._config['amount'])
class AssertTask(ChannelActionTask):
_name = 'assert'
_method = 'get'
def _process_response(self, response_dict: dict):
response_dict = super()._process_response(response_dict)
for field in ['balance', 'total_deposit', 'state']:
if field not in self._config:
continue
if field not in response_dict:
raise ScenarioAssertionError(
f'Field "{field}" is missing in channel: {response_dict}',
)
if response_dict[field] != self._config[field]:
raise ScenarioAssertionError(
f'Value mismatch for "{field}". '
f'Should: "{self._config[field]}" '
f'Is: "{response_dict[field]}" '
f'Channel: {response_dict}',
)
class AssertAllTask(ChannelActionTask):
_name = 'assert_all'
_url_template = '{protocol}://{target_host}/api/1/channels/{token_address}'
_method = 'get'
@property
def _url_params(self):
return dict(token_address=self._runner.token_address)
def _process_response(self, response_dict: dict):
response_dict = super()._process_response(response_dict)
channel_count = len(response_dict)
for field in ['balance', 'total_deposit', 'state']:
# The task parameter field names are the plural of the channel field names
assert_field = f'{field}s'
if assert_field not in self._config:
continue
try:
channel_field_values = [channel[field] for channel in response_dict]
except KeyError:
raise ScenarioAssertionError(
f'Field "{field}" is missing in at least one channel: {response_dict}',
)
assert_field_value_count = len(self._config[assert_field])
if assert_field_value_count != channel_count:
direction = ['many', 'few'][assert_field_value_count < channel_count]
raise ScenarioAssertionError(
f'Assertion field "{field}" has too {direction} values. '
f'Have {channel_count} channels but {assert_field_value_count} values.',
)
channel_field_values_all = channel_field_values[:]
for value in self._config[assert_field]:
try:
channel_field_values.remove(value)
except ValueError:
channel_field_values_str = ", ".join(
str(val) for val in channel_field_values_all,
)
assert_field_values_str = ', '.join(
str(val) for val in self._config[assert_field],
)
raise ScenarioAssertionError(
f'Expected value "{value}" for field "{field}" not found in any channel. '
f'Existing values: {channel_field_values_str} '
f'Expected values: {assert_field_values_str}'
f'Channels: {response_dict}',
) from None
if len(channel_field_values) != 0:
raise ScenarioAssertionError(
f'Value mismatch for field "{field}". '
f'Not all values consumed, remaining: {channel_field_values}',
)
|
Python
| 0.998924
|
@@ -4637,17 +4637,16 @@
lues_all
-,
%0A
@@ -4787,17 +4787,16 @@
t_field%5D
-,
%0A
|
29384b927b620b7e943343409f62511451bb3059
|
Fix problem with Hopfield energy function for Python 2.7
|
neupy/algorithms/memory/utils.py
|
neupy/algorithms/memory/utils.py
|
from numpy import where
__all__ = ('sign2bin', 'bin2sign', 'hopfield_energy')
def sign2bin(matrix):
return where(matrix == 1, 1, 0)
def bin2sign(matrix):
return where(matrix == 0, -1, 1)
def hopfield_energy(weight, input_data, output_data):
energy_output = -0.5 * input_data.dot(weight).dot(output_data.T)
return energy_output.item(0)
|
Python
| 0.001066
|
@@ -16,16 +16,66 @@
rt where
+, inner%0Afrom numpy.core.umath_tests import inner1d
%0A%0A%0A__all
@@ -308,31 +308,22 @@
-energy_output =
+return
-0.5 *
inpu
@@ -318,16 +318,24 @@
-0.5 *
+inner1d(
input_da
@@ -348,21 +348,18 @@
(weight)
-.dot(
+,
output_d
@@ -365,41 +365,6 @@
data
-.T)%0A return energy_output.item(0
)%0A
|
2bd75e14d351ebd076cbcd035145ffb394f90a44
|
Use async memcopy
|
cgi-bin/paint_x2_unet/cgi_exe.py
|
cgi-bin/paint_x2_unet/cgi_exe.py
|
#!/usr/bin/env python
import numpy as np
import chainer
import chainer.functions as F
import chainer.links as L
import six
import os
import cv2
from chainer import cuda, optimizers, serializers, Variable
from chainer import training
from chainer.training import extensions
#from train import Image2ImageDataset
from img2imgDataset import ImageAndRefDataset
import unet
import lnet
class Painter:
def __init__(self, gpu=0):
print("start")
self.root = "./static/images/"
self.batchsize = 1
self.outdir = self.root + "out/"
self.outdir_min = self.root + "out_min/"
self.gpu = gpu
print("load model")
if self.gpu >= 0:
cuda.get_device(self.gpu).use()
cuda.set_max_workspace_size(64 * 1024 * 1024) # 64MB
chainer.Function.type_check_enable = False
self.cnn_128 = unet.UNET()
self.cnn = unet.UNET()
if self.gpu >= 0:
self.cnn_128.to_gpu()
self.cnn.to_gpu()
lnn = lnet.LNET()
#serializers.load_npz("./cgi-bin/wnet/models/model_cnn_128_df_4", cnn_128)
#serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_cnn_128_f3_2", cnn_128)
serializers.load_npz(
"./cgi-bin/paint_x2_unet/models/unet_128_standard", self.cnn_128)
#serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_cnn_128_ua_1", self.cnn_128)
#serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_m_1.6", self.cnn)
serializers.load_npz(
"./cgi-bin/paint_x2_unet/models/unet_512_standard", self.cnn)
#serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_p2_1", self.cnn)
#serializers.load_npz("./cgi-bin/paint_x2_unet/models/model_10000", self.cnn)
#serializers.load_npz("./cgi-bin/paint_x2_unet/models/liner_f", lnn)
def save_as_img(self, array, name):
array = array.transpose(1, 2, 0)
array = array.clip(0, 255).astype(np.uint8)
array = cuda.to_cpu(array)
img = cv2.cvtColor(array, cv2.COLOR_YUV2BGR)
cv2.imwrite(name, img)
def liner(self, id_str):
if self.gpu >= 0:
cuda.get_device(self.gpu).use()
image1 = cv2.imread(path1, cv2.IMREAD_GRAYSCALE)
image1 = np.asarray(image1, self._dtype)
if image1.ndim == 2:
image1 = image1[:, :, np.newaxis]
img = image1.transpose(2, 0, 1)
x = np.zeros((1, 3, img.shape[1], img.shape[2]), dtype='f')
if self.gpu >= 0:
x = cuda.to_gpu(x)
y = lnn.calc(Variable(x, volatile='on'), test=True)
self.save_as_img(y.data[0], self.root + "line/" + id_str + ".jpg")
def colorize_s(self, id_str, blur=0, s_size=128):
if self.gpu >= 0:
cuda.get_device(self.gpu).use()
dataset = ImageAndRefDataset(
[id_str + ".png"], self.root + "line/", self.root + "ref/")
test_in_s, test_in = dataset.get_example(
0, minimize=True, blur=blur, s_size=s_size)
x = np.zeros((1, 4, test_in_s.shape[1], test_in_s.shape[2]), dtype='f')
x[0, :] = test_in_s
if self.gpu >= 0:
x = cuda.to_gpu(x)
y = self.cnn_128.calc(Variable(x, volatile='on'), test=True)
self.save_as_img(y.data[0], self.outdir_min + id_str + ".png")
def colorize_l(self, id_str):
if self.gpu >= 0:
cuda.get_device(self.gpu).use()
dataset = ImageAndRefDataset(
[id_str + ".png"], self.root + "line/", self.root + "out_min/")
test_in, test_in_ = dataset.get_example(0, minimize=False)
x = np.zeros((1, 4, test_in.shape[1], test_in.shape[2]), dtype='f')
x[0, :] = test_in
if self.gpu >= 0:
x = cuda.to_gpu(x)
y = self.cnn.calc(Variable(x, volatile='on'), test=True)
self.save_as_img(y.data[0], self.outdir + id_str + ".jpg")
def colorize(self, id_str, blur=0, s_size=128):
if self.gpu >= 0:
cuda.get_device(self.gpu).use()
dataset = ImageAndRefDataset(
[id_str + ".png"], self.root + "line/", self.root + "ref/")
line, line2 = dataset.get_example(0, minimize=True)
# 1st fixed to 128*128
x = np.zeros((1, 4, line.shape[1], line.shape[2]), dtype='f')
input_bat = np.zeros((1, 4, line2.shape[1], line2.shape[2]), dtype='f')
print(input_bat.shape)
x[0, :] = line
input_bat[0, 0, :] = line2
if self.gpu >= 0:
x = cuda.to_gpu(x)
y = self.cnn_128.calc(Variable(x, volatile='on'), test=True)
del x # release memory
output = cuda.to_cpu(y.data[0])
del y # release memory
self.save_as_img(output, self.outdir_min + id_str + "_0.png")
for ch in range(3):
input_bat[0, 1 + ch, :] = cv2.resize(
output[ch, :], (line2.shape[2], line2.shape[1]), interpolation=cv2.INTER_CUBIC)
if self.gpu >= 0:
x = cuda.to_gpu(input_bat)
else:
x = input_bat
y = self.cnn.calc(Variable(x, volatile='on'), test=True)
del x # release memory
self.save_as_img(y.data[0], self.outdir + id_str + "_0.jpg")
if __name__ == '__main__':
for n in range(1):
print(n)
colorize(n * batchsize)
|
Python
| 0.000001
|
@@ -4530,32 +4530,50 @@
= cuda.to_gpu(x
+, cuda.Stream.null
)%0A y = se
@@ -5042,16 +5042,34 @@
nput_bat
+, cuda.Stream.null
)%0A
|
bf28376f252fd474d594e5039d0b2f2bb1afc26a
|
Add proper warnings on use of the backwards compatibility shim.
|
IPython/frontend.py
|
IPython/frontend.py
|
import sys
import types
class ShimModule(types.ModuleType):
def __getattribute__(self, key):
exec 'from IPython import %s' % key
return eval(key)
sys.modules['IPython.frontend'] = ShimModule('frontend')
|
Python
| 0
|
@@ -1,172 +1,2033 @@
-import sys%0Aimport types%0A%0A%0Aclass ShimModule(types.ModuleType):%0A%0A def __getattribute__(self, key):%0A exec 'from IPython import %25s' %25 key%0A return eval(key)
+%22%22%22%0AShim to maintain backwards compatibility with old frontend imports.%0A%0AWe have moved all contents of the old %60frontend%60 subpackage into top-level%0Asubpackages (%60html%60, %60qt%60 and %60terminal%60). This will let code that was making%0A%60from IPython.frontend...%60 calls continue working, though a warning will be%0Aprinted.%0A%22%22%22%0A%0A#-----------------------------------------------------------------------------%0A# Copyright (c) 2013, IPython Development Team.%0A#%0A# Distributed under the terms of the Modified BSD License.%0A#%0A# The full license is in the file COPYING.txt, distributed with this software.%0A#-----------------------------------------------------------------------------%0A%0A#-----------------------------------------------------------------------------%0A# Imports%0A#-----------------------------------------------------------------------------%0Afrom __future__ import print_function%0Aimport sys%0Aimport types%0A%0A#-----------------------------------------------------------------------------%0A# Class declarations%0A#-----------------------------------------------------------------------------%0A%0Aclass ShimModule(types.ModuleType):%0A%0A def __getattribute__(self, key):%0A m = (%22*** WARNING*** : The top-level %60frontend%60 module has been deprecated.%5Cn%22%0A %22Please import %25s directly from the %60IPython%60 level.%22 %25 key)%0A%0A # FIXME: I don't understand why, but if the print statement below is%0A # redirected to stderr, this shim module stops working. It seems the%0A # Python import machinery has problem with redirected prints happening%0A # during the import process. If we can't figure out a solution, we may%0A # need to leave it to print to default stdout.%0A print(m)%0A %0A # FIXME: this seems to work fine, but we should replace it with an%0A # __import__ call instead of using exec/eval.%0A exec 'from IPython import %25s' %25 key%0A return eval(key)%0A%0A%0A# Unconditionally insert the shim into sys.modules so that further import calls%0A# trigger the custom attribute access above
%0A%0Asy
|
f8a6e99980075384572e445c83065be538df583a
|
refactor accuracy score
|
model/building_type.py
|
model/building_type.py
|
"""
This script executes the task of estimating the building type, based solely on the geometry for that building.
The data for this script can be generated by running the prep/get-data.sh and prep/preprocess-buildings.py scripts,
which will take about an hour or two.
"""
import os
from datetime import datetime
import numpy as np
from keras import Input
from keras.callbacks import TensorBoard, EarlyStopping
from keras.engine import Model
from keras.layers import LSTM, Dense, Flatten
from keras.optimizers import Adam
from topoml_util import geom_scaler
from topoml_util.slack_send import notify
SCRIPT_VERSION = '0.2.27'
SCRIPT_NAME = os.path.basename(__file__)
TIMESTAMP = str(datetime.now()).replace(':', '.')
SIGNATURE = SCRIPT_NAME + ' ' + TIMESTAMP
DATA_FOLDER = '../files/buildings/'
FILENAME_PREFIX = 'buildings-train'
# Hyperparameters
BATCH_SIZE = int(os.getenv('BATCH_SIZE', 384))
TRAIN_VALIDATE_SPLIT = float(os.getenv('TRAIN_VALIDATE_SPLIT', 0.1))
REPEAT_DEEP_ARCH = int(os.getenv('REPEAT_DEEP_ARCH', 0))
LSTM_SIZE = int(os.getenv('LSTM_SIZE', 256))
DENSE_SIZE = int(os.getenv('DENSE_SIZE', 64))
EPOCHS = int(os.getenv('EPOCHS', 200))
LEARNING_RATE = float(os.getenv('LEARNING_RATE', 3e-4))
GEOM_SCALE = float(os.getenv('GEOM_SCALE', 0)) # Default 0, overridden when data is known
OPTIMIZER = Adam(lr=LEARNING_RATE)
PATIENCE = 40
RECURRENT_DROPOUT = 0.05
message = 'running {0} with ' \
'batch size: {1} ' \
'train/validate split: {2} ' \
'repeat deep: {3} ' \
'lstm size: {4} ' \
'dense size: {5} ' \
'epochs: {6} ' \
'learning rate: {7}' \
.format(
SIGNATURE,
BATCH_SIZE,
TRAIN_VALIDATE_SPLIT,
REPEAT_DEEP_ARCH,
LSTM_SIZE,
DENSE_SIZE,
EPOCHS,
LEARNING_RATE)
print(message)
# Load training data
train_geoms = []
train_building_type = []
for file in os.listdir(DATA_FOLDER):
if file.startswith(FILENAME_PREFIX) and file.endswith('.npz'):
train_loaded = np.load(DATA_FOLDER + file)
if len(train_geoms):
train_geoms = np.append(train_geoms, train_loaded['geoms'], axis=0)
train_building_type = np.append(train_building_type, train_loaded['building_type'], axis=0)
else:
train_geoms = train_loaded['geoms']
train_building_type = train_loaded['building_type']
# Normalize
GEOM_SCALE = GEOM_SCALE or geom_scaler.scale(train_geoms)
train_geoms = geom_scaler.transform(train_geoms, GEOM_SCALE)
# Map building types to one-hot vectors
train_targets = np.zeros((len(train_building_type), train_building_type.max() + 1))
for index, building_type in enumerate(train_building_type):
train_targets[index, building_type] = 1
# Shape determination
geom_max_points, geom_vector_len = train_geoms.shape[1:]
output_seq_length = train_targets.shape[-1]
# Build model
inputs = Input(shape=(geom_max_points, geom_vector_len))
model = LSTM(LSTM_SIZE, return_sequences=True, recurrent_dropout=RECURRENT_DROPOUT)(inputs)
for layer in range(REPEAT_DEEP_ARCH):
model = LSTM(LSTM_SIZE, return_sequences=True, recurrent_dropout=RECURRENT_DROPOUT)(model)
# model = TimeDistributed(Dense(DENSE_SIZE, activation='relu'))(model)
model = Dense(DENSE_SIZE, activation='relu')(model)
model = Flatten()(model)
model = Dense(output_seq_length, activation='softmax')(model)
model = Model(inputs=inputs, outputs=model)
model.compile(
loss='categorical_crossentropy',
metrics=['accuracy'],
optimizer=OPTIMIZER),
model.summary()
# Callbacks
callbacks = [
TensorBoard(log_dir='./tensorboard_log/' + SIGNATURE, write_graph=False),
# EarlyStopping(patience=PATIENCE, min_delta=0.001)
]
history = model.fit(
x=train_geoms,
y=train_targets,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
validation_split=TRAIN_VALIDATE_SPLIT,
callbacks=callbacks).history
# Run on unseen test data
TEST_DATA_FILE = '../files/buildings/buildings-test.npz'
test_loaded = np.load(TEST_DATA_FILE)
test_geoms = test_loaded['geoms']
test_building_types = test_loaded['building_type']
# Normalize
test_geoms = geom_scaler.transform(test_geoms, GEOM_SCALE) # re-use variance from training
test_pred = model.predict(test_geoms)
# Map test targets to one-hot vectors
test_targets = np.zeros((len(test_building_types), test_building_types.max() + 1))
for index, building_type in enumerate(test_building_types):
test_targets[index, building_type] = 1
correct = 0
for prediction, expected in zip(test_pred, test_targets):
if np.argmax(prediction) == np.argmax(expected):
correct += 1
accuracy = correct / len(test_pred)
message = 'test accuracy of {0} with ' \
'batch size: {1} ' \
'train/validate split: {2} ' \
'repeat deep: {3} ' \
'lstm size: {4} ' \
'dense size: {5} ' \
'epochs: {6} ' \
'learning rate: {7}' \
.format(
str(accuracy),
BATCH_SIZE,
TRAIN_VALIDATE_SPLIT,
REPEAT_DEEP_ARCH,
LSTM_SIZE,
DENSE_SIZE,
len(history['val_loss']),
LEARNING_RATE)
notify(SIGNATURE, message)
print(SCRIPT_NAME, 'finished successfully')
|
Python
| 0.000019
|
@@ -515,16 +515,59 @@
ort Adam
+%0Afrom sklearn.metrics import accuracy_score
%0A%0Afrom t
@@ -662,17 +662,17 @@
= '0.2.2
-7
+8
'%0ASCRIPT
@@ -934,10 +934,11 @@
E',
-38
+102
4))%0A
@@ -3703,18 +3703,16 @@
se),%0A
- #
EarlySt
@@ -4252,16 +4252,17 @@
raining%0A
+%0A
test_pre
@@ -4269,430 +4269,123 @@
d =
-model.predict(test_geoms)%0A%0A# Map test targets to one-hot vectors%0Atest_targets = np.zeros((len(test_building_types), test_building_types.max() + 1))%0Afor index, building_type in enumerate(test_building_types):%0A test_targets%5Bindex, building_type%5D = 1%0A%0Acorrect = 0%0Afor prediction, expected in zip(test_pred, test_targets):%0A if np.argmax(prediction) == np.argmax(expected):%0A correct += 1%0A%0Aaccuracy = correct / len(
+%5Bnp.argmax(prediction) for prediction in model.predict(test_geoms)%5D%0Aaccuracy = accuracy_score(test_building_types,
test
|
d8ceaa6cc9e475292633dae3927cf29f3783f9af
|
Update some comments
|
conda_tools/updater.py
|
conda_tools/updater.py
|
#!/usr/bin/env python
#
# updater.py
#
# Updates all packages in all installed conda environments.
# This script should be run under the root conda environment.
import os.path
import subprocess
import conda_api
from utils import get_root_prefix
def update_all(update_root=True, *blacklist_envs):
"""Updates all conda packages in all installed conda environments.
Required arguments:
update_root -- A Boolean flag that specifies whether the root conda
environment should be updated (default True).
Optional arguments:
*blacklist_envs -- Names of environments you don't want updated.
Example usage:
update_all(True, 'special_env1', 'special_env2')
This will update all conda environments (including root) but excluding
special_env1 and special_env2.
"""
# Before we do anything, set the ROOT_PREFIX
# variable so conda_api knows where to work from.
conda_api.set_root_prefix(get_root_prefix())
# Get all active environments, excluding the ones in the blacklist.
envs = [
os.path.basename(env) for env in conda_api.get_envs()
if os.path.basename(env) not in blacklist_envs
][1:]
print('ROOT_PREFIX is set to: {0}'.format(conda_api.ROOT_PREFIX))
if update_root:
root_update_result = conda_api.update(use_local=True, all=True, env='base')
print('Result from environment root:\n{0}'.format(root_update_result))
for env_name in envs:
# Update all packages in the environment.
env_update_result = conda_api.update(env=env_name, all=True)
print('Result from environment {0}:\n{1}'.format(env_name, env_update_result))
def pip_update(**pip_package_specs):
"""Updates pip packages in their respective conda environments.
Keyword arguments:
**pip_package_specs -- The key is the name of the environment, and the
value is an iterable of the pip package names
in that environment you want to update.
Example usage:
pip_package_specs = {'conda_env1':('autobahn','six','txaio',),
'conda_env2':('pika',)}
pip_update(**pip_package_specs)
This will update autobahn, six, and txaio in the conda environment
'conda_env1', and pika in the environment 'conda_env2'.
"""
if pip_package_specs:
conda_api.set_root_prefix(get_root_prefix())
for env, packages in pip_package_specs.items():
pip_args = ['install', '-U']
pip_args.extend(packages)
# Equivalent of running 'pip install -q -U package1 package2 ...',
# but runs it inside the appropriate conda environment.
p = conda_api.process(
name=env,
cmd='pip',
args=pip_args,
stdout=subprocess.PIPE
)
stdout, _ = p.communicate()
print('Pip update result from environment {0}:\n{1}'.format(env, stdout))
if __name__ == '__main__':
update_all()
|
Python
| 0
|
@@ -1036,16 +1036,111 @@
cklist.%0A
+ # The root environment will be the first element in this list,%0A # so exclude that also.%0A
envs
@@ -2682,11 +2682,8 @@
all
--q
-U p
|
7f800ee03790d28a25f0f5989c8f6a15401af172
|
remove print statement
|
django/crashreport/symbols/views.py
|
django/crashreport/symbols/views.py
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
from django.shortcuts import render
from django import forms
from django.http import HttpResponse, HttpResponseNotAllowed, HttpResponseServerError
from django.views.decorators.csrf import csrf_exempt
from .handler import SymbolsUploadHandler
import os
class UploadSymbolsForm(forms.Form):
symbols = forms.FileField()
comment = forms.CharField()
def handle_uploaded_file(f):
# TODO: moggi: get the symbols localtion from the configuration
file_path = os.path.join('/tmp/symbols_upload', f.name)
print(file_path)
with open(file_path, 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
return file_path
# TODO: this needs to be limited to logged in users
@csrf_exempt
def upload_symbols(request):
if request.method != 'POST':
return HttpResponseNotAllowed('Only POST here')
form = UploadSymbolsForm(request.POST, request.FILES)
print(form.fields)
if not form.is_valid():
return HttpResponseNotAllowed('Invalid data')
path = handle_uploaded_file(request.FILES['symbols'])
upload = SymbolsUploadHandler()
upload.process(form.cleaned_data, path)
# TODO: moggi: maybe report the zipfile.BadZipfile exception
return HttpResponse("Success")
# vim:set shiftwidth=4 softtabstop=4 expandtab: */
|
Python
| 0.999999
|
@@ -797,29 +797,8 @@
me)%0A
- print(file_path)%0A
@@ -1182,32 +1182,8 @@
S)%0A%0A
- print(form.fields)%0A%0A
|
6833fbd6f29c5fd8477c6545d708af6d61c55473
|
Fix filtering on preference type
|
UM/VersionUpgradeManager.py
|
UM/VersionUpgradeManager.py
|
# Copyright (c) 2015 Ultimaker B.V.
# Cura is released under the terms of the AGPLv3 or higher.
from UM.PluginRegistry import PluginRegistry
from UM.Preferences import Preferences
from UM.Settings.MachineInstance import MachineInstance
from UM.Settings.Profile import Profile
import collections #For deque, for breadth-first search.
## Regulates the upgrading of preferences from one application version to the
# next.
#
# The process of upgrading will take a look at all profiles, preferences and
# machine instances and check their version numbers. If they are older than
# the current version number of their respective type of file, an upgrade path
# will be planned for it in order to upgrade the file to the current version
# in as few conversions as possible.
#
# To this end, the upgrade manager will maintain the shortest paths to the
# current version for each of the types of profiles and each old version it
# encounters. Once a shortest path is found, it is cached and can be re-used
# for all nodes along this path. This minimises the extra start-up time
# required for the conversions.
#
# Old versions of the preferences are not deleted, but put in a folder next to
# the current (upgraded) versions, where they are never loaded again unless
# the user manually retrieves the files.
class VersionUpgradeManager:
## Initialises the version upgrade manager.
#
# This initialises the cache for shortest upgrade paths, and registers the
# version upgrade plug-ins.
def __init__(self):
#Initialise the caches for shortest upgrade paths.
#These dictionaries are keyed by the version number for which it is the shortest path.
#The value indicates the version upgrade plug-in to use to upgrade to the next version.
self._machine_instance_upgrade_paths = {} #The shortest paths to upgrade machine instances to the current version.
self._preferences_upgrade_paths = {} #The shortest paths to upgrade preferences to the current version.
self._profile_upgrade_paths = {} #The shortest paths to upgrade profiles to the current version.
self._versionUpgrades = [] #All upgrade plug-ins.
PluginRegistry.addType("version_upgrade", self._addVersionUpgrade)
## Performs the version upgrades of all preference files to the most recent
# version.
#
# The upgrade plug-ins must all be loaded at this point, or no upgrades
# can be performed.
def upgrade(self):
paths = self._findShortestUpgradePaths("machine_instance", MachineInstance.MachineInstanceVersion)
#TODO: Find all machine instances.
#TODO: Upgrade all machine instances to the most recent version.
paths = self._findShortestUpgradePaths("preferences", Preferences.PreferencesVersion)
#TODO: Find all preference files.
#TODO: Upgrade all preference files to the most recent version.
paths = self._findShortestUpgradePaths("profile", Profile.ProfileVersion)
#TODO: Find all profiles.
#TODO: Upgrade all profiles to the most recent version.
# private:
## Adds a version upgrade plug-in.
#
# \param version_upgrade_plugin The plug-in object of the version upgrade
# plug-in.
def _addVersionUpgrade(self, version_upgrade_plugin):
self._versionUpgrades.append(version_upgrade_plugin)
## For each version of a preference type, finds the next step to take to
# upgrade as quickly as possible to the most recent version.
#
# The preference type should be either "machine_instance", "preferences"
# or "profile", matching the types listed in the metadata of a plug-in.
# This is abstracted to prevent having to maintain the same code in lots
# of different functions that do basically the same.
#
# This function uses a breadth-first search to get the fewest number of
# steps required to upgrade to the destination version.
#
# \param preference_type The type of preference to compute the shortest
# upgrade paths of.
# \param destination_version The version to compute the shortest paths to.
# \return A dictionary with an entry for each version number from which we
# can reach the destination version, naming the version upgrade plug-in
# with which to convert for the next step.
def _findShortestUpgradePaths(self, preference_type, destination_version):
by_destination_version = self._sortByDestinationVersion(preference_type)
result = {}
#Perform a breadth-first search.
registry = PluginRegistry.getInstance()
front = collections.deque() #Use as a queue for breadth-first iteration: Append right, pop left.
done = {} #Flag explored upgrades as done.
for neighbour in by_destination_version[destination_version]:
front.append(neighbour)
source_version = registry.getMetaData(neighbour.getPluginId())["version_upgrade"][preference_type]["from"]
if source_version not in result: #First time we encounter this version. Due to breadth-first search, this must be part of the shortest path then.
result[source_version] = neighbour
done += neighbour
while len(front) > 0:
upgrade = front.popleft() #To make it a queue, pop on the opposite side of where you append!
for neighbour in by_destination_version[registry.getMetaData(upgrade.getPluginId())["version_upgrade"][preference_type]["to"]]:
if neighbour in done: #Already encountered elsewhere. No need to re-compute.
continue
front.append(neighbour)
source_version = registry.getMetaData(neighbour.getPluginId())["version_upgrade"][preference_type]["from"]
if source_version not in result: #First time we encounter this version. Due to breadth-first search, this must be part of the shortest path then.
result[source_version] = neighbour
done += neighbour
return result
## Creates a look-up table to get plug-ins by what version they upgrade
# to.
#
# \param preference_type The type of preference file the version number
# applies to.
# \return A dictionary with an entry for every version that the upgrade
# plug-ins can convert to, and which plug-ins can convert to that version.
def _sortByDestinationVersion(self, preference_type):
result = {}
registry = PluginRegistry.getInstance()
for plugin in self._versionUpgrades:
destination = registry.getMetaData(plugin.getPluginId())["version_upgrade"][preference_type]["to"]
if not destination in result: #Entry doesn't exist yet.
result[destination] = []
result[destination].append(plugin) #Sort this plug-in under the correct entry.
return result
|
Python
| 0
|
@@ -6645,27 +6645,24 @@
-destination
+metadata
= regis
@@ -6709,32 +6709,168 @@
ersion_upgrade%22%5D
+%0A if preference_type not in metadata: #Filter by preference_type.%0A continue%0A destination = metadata
%5Bpreference_type
|
f782633c857932803283dd9c26621e69f6ccb44e
|
fix version
|
l10n_es_aeat_sii/__manifest__.py
|
l10n_es_aeat_sii/__manifest__.py
|
# -*- coding: utf-8 -*-
# Copyright 2017 Ignacio Ibeas <ignacio@acysos.com>
# (c) 2017 Diagram Software S.L.
# Copyright (c) 2017-TODAY MINORISA <ramon.guiu@minorisa.net>
# (c) 2017 Studio73 - Pablo Fuentes <pablo@studio73.es>
# (c) 2017 Studio73 - Jordi Tolsà <jordi@studio73.es>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Suministro Inmediato de Información en el IVA",
"version": "11.0.1.3.2",
"category": "Accounting & Finance",
"website": "https://www.acysos.com",
"author": "Acysos S.L.",
"license": "AGPL-3",
"application": False,
"installable": True,
"external_dependencies": {
"python": ["zeep",
"requests"],
},
"depends": [
"account",
"account_invoice_refund_link",
"l10n_es_aeat",
"queue_job",
"account_payment_partner",
],
"data": [
"data/ir_config_parameter.xml",
"data/aeat_sii_mapping_registration_keys_data.xml",
"data/aeat_sii_map_data.xml",
"data/aeat_sii_map_data_1_1.xml",
"data/aeat_sii_mapping_payment_keys_data.xml",
"data/account_fiscal_position_data.xml",
"views/res_company_view.xml",
"views/account_invoice_view.xml",
"views/aeat_sii_view.xml",
"views/aeat_sii_result_view.xml",
"views/aeat_check_sii_result_view.xml",
"wizard/aeat_sii_password_view.xml",
"views/aeat_sii_mapping_registration_keys_view.xml",
"views/aeat_sii_map_view.xml",
"security/ir.model.access.csv",
"security/aeat_sii.xml",
"views/product_view.xml",
"views/account_view.xml",
"views/account_payment_mode_view.xml",
],
'images': ['static/description/banner.jpg'],
"post_init_hook": "post_init_sii_hook",
}
|
Python
| 0.000001
|
@@ -418,17 +418,17 @@
ion%22: %221
-1
+2
.0.1.3.2
|
7d7b12d176a0315057c54eff794fbe7117c7f6da
|
Include the Python version in the support tracking data.
|
reviewboard/admin/support.py
|
reviewboard/admin/support.py
|
from __future__ import unicode_literals
import base64
import time
from datetime import datetime
from hashlib import sha1
from django.conf import settings
from django.contrib.auth.models import User
from djblets.siteconfig.models import SiteConfiguration
from reviewboard import get_package_version
def get_install_key():
"""Returns the installation key for this server."""
return sha1(settings.SECRET_KEY).hexdigest()
def serialize_support_data(request=None, force_is_admin=False):
"""Serializes support data into a base64-encoded string."""
siteconfig = SiteConfiguration.objects.get_current()
is_admin = (force_is_admin or
(request is not None and request.user.is_staff))
return base64.b64encode('\t'.join([
get_install_key(),
'%d' % is_admin,
siteconfig.site.domain,
siteconfig.get('site_admin_name'),
siteconfig.get('site_admin_email'),
get_package_version(),
'%d' % User.objects.filter(is_active=True).count(),
'%d' % int(time.mktime(datetime.now().timetuple())),
siteconfig.get('company'),
]))
def get_default_support_url(request=None, force_is_admin=False):
"""Returns the URL for the default Review Board support page."""
siteconfig = SiteConfiguration.objects.get_current()
if siteconfig.get('send_support_usage_stats'):
support_data = serialize_support_data(request, force_is_admin)
else:
support_data = ''
return settings.DEFAULT_SUPPORT_URL % {
'support_data': support_data,
}
def get_register_support_url(request=None, force_is_admin=False):
"""Returns the URL for registering the Review Board support page."""
siteconfig = SiteConfiguration.objects.get_current()
if siteconfig.get('send_support_usage_stats'):
support_data = serialize_support_data(request, force_is_admin)
else:
support_data = ''
return settings.REGISTER_SUPPORT_URL % {
'support_data': support_data,
}
def get_support_url(request):
"""Returns the URL for the configured support page."""
siteconfig = SiteConfiguration.objects.get_current()
return (siteconfig.get('support_url') or
get_default_support_url(request))
|
Python
| 0
|
@@ -48,16 +48,27 @@
base64%0A
+import sys%0A
import t
@@ -1117,24 +1117,67 @@
'company'),%0A
+ '%25s.%25s.%25s' %25 sys.version_info%5B:3%5D,%0A
%5D))%0A%0A%0Ade
|
64d5847f058dec81a288482665a1d8208f0f4e17
|
Fix security issue in singularity + misc cleanups (#8657)
|
var/spack/repos/builtin/packages/singularity/package.py
|
var/spack/repos/builtin/packages/singularity/package.py
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Singularity(AutotoolsPackage):
"""Singularity is a container platform focused on supporting 'Mobility of
Compute'"""
homepage = "http://singularity.lbl.gov/"
url = "https://github.com/singularityware/singularity/archive/2.4.tar.gz"
version('2.4.5', '9afa903ee019448104b4f40be77a46e7')
version('2.4', 'd357ce68ef2f8149edd84155731531465dbe74148c37719f87f168fc39384377')
version('2.3.1', '292ff7fe3db09c854b8accf42f763f62')
version('develop', git='https://github.com/singularityware/singularity.git', branch='master')
depends_on('m4', type='build')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
|
Python
| 0
|
@@ -1405,19 +1405,34 @@
= %22http
+s
://
+www.sylabs.io/
singular
@@ -1438,16 +1438,8 @@
rity
-.lbl.gov
/%22%0A
@@ -1504,19 +1504,49 @@
ity/
-archive/2.4
+releases/download/2.5.2/singularity-2.5.2
.tar
@@ -1559,147 +1559,125 @@
-v
+# V
ersion
-('2.4.5', '9afa903ee019448104b4f40be77a46e7')%0A version('2.4', 'd357ce68ef2f8149edd84155731531465dbe74148c37719f87f168fc39384377')
+s before 2.5.2 suffer from a serious security problem.%0A # https://nvd.nist.gov/vuln/detail/CVE-2018-12021
%0A
@@ -1692,47 +1692,47 @@
('2.
-3.1
+5.2
', '2
-92ff7fe3db09c854b8accf42f763f62
+edc1a8ac9a4d7d26fba6244f1c5fd95
')%0A
@@ -1829,16 +1829,127 @@
ster')%0A%0A
+ depends_on('libarchive', when='@2.5.2:')%0A # these are only needed if we're grabbing the unreleased tree%0A
depe
@@ -1971,32 +1971,49 @@
type='build'
+, when='@develop'
)%0A depends_on
@@ -2029,32 +2029,49 @@
f', type='build'
+, when='@develop'
)%0A depends_on
@@ -2095,16 +2095,33 @@
='build'
+, when='@develop'
)%0A de
@@ -2153,10 +2153,174 @@
='build'
+, when='@develop')%0A%0A # When installing as root, the copy has to run before chmod runs%0A def install(self, spec, prefix):%0A make('install', parallel=False
)%0A
|
346e6c45b3c2a49168eac9fd1b703bd5c05503b1
|
fix sql дата начала-создания направления из подверждения исследования
|
rmis_integration/sql_func.py
|
rmis_integration/sql_func.py
|
from django.db import connection
from laboratory.settings import TIME_ZONE
def get_confirm_direction(d_s, d_e, limit):
with connection.cursor() as cursor:
cursor.execute(
"""WITH
t_all_direction AS (
SELECT DISTINCT ON (napravleniye_id) napravleniye_id FROM public.directions_issledovaniya
WHERE time_confirmation AT TIME ZONE %(tz)s BETWEEN %(d_start)s AND %(d_end)s),
t_not_confirm_direction AS (
SELECT DISTINCT ON (napravleniye_id) napravleniye_id FROM public.directions_issledovaniya
WHERE napravleniye_id IN (SELECT napravleniye_id FROM t_all_direction) AND time_confirmation IS NULL),
t_only_confirm_direction AS (
SELECT napravleniye_id FROM t_all_direction
WHERE napravleniye_id NOT IN (SELECT napravleniye_id FROM t_not_confirm_direction)),
t_istochnik_f_rmis_auto_send AS (
SELECT id FROM directions_istochnikifinansirovaniya
WHERE rmis_auto_send = false)
SELECT id FROM directions_napravleniya
WHERE id IN (SELECT napravleniye_id FROM t_only_confirm_direction)
AND
rmis_number != ANY(ARRAY['NONERMIS', '', NULL])
AND
result_rmis_send = false
AND
NOT (imported_from_rmis = True and imported_directions_rmis_send = False)
AND
NOT (istochnik_f_id IN (SELECT id FROM t_istochnik_f_rmis_auto_send) and force_rmis_send = False)
ORDER BY data_sozdaniya
LIMIT %(limit)s
""",
params={'d_start': d_s, 'd_end': d_e, 'tz': TIME_ZONE, 'limit': limit},
)
row = cursor.fetchall()
return row
|
Python
| 0.000076
|
@@ -394,31 +394,10 @@
z)s
-BETWEEN %25(d_start)s AND
+%3C=
%25(d
@@ -1178,32 +1178,115 @@
AND %0A
+ data_sozdaniya AT TIME ZONE %25(tz)s %3E= %25(d_start)s%0A AND %0A
|
77ffc94a0439dd7309df0630cddba7daf60bc5ee
|
Add imported requests to Audit context.
|
src/ggrc/converters/requests.py
|
src/ggrc/converters/requests.py
|
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: silas@reciprocitylabs.com
# Maintained By: silas@reciprocitylabs.com
from .base import *
from ggrc.models import Request
from .base_row import *
from collections import OrderedDict
class RequestRowConverter(BaseRowConverter):
model_class = Request
def find_by_slug(self, slug):
return self.model_class.query.filter_by(slug=slug).first()
def setup_object(self):
self.obj = self.setup_object_by_slug(self.attrs)
if self.obj.id is not None:
self.add_warning('slug', "Request already exists and will be updated")
def reify(self):
self.handle('objective_id', ObjectiveHandler)
self.handle('request_type', RequestTypeColumnHandler, is_required=True)
self.handle('status', StatusColumnHandler, valid_states=Request.VALID_STATES, default_value='Draft')
self.handle_date('requested_on', is_required=True)
self.handle_date('due_on', is_required=True)
self.handle_text_or_html('description')
self.handle_text_or_html('test')
self.handle_text_or_html('notes')
self.handle_raw_attr('auditor_contact') # default to audit lead
self.handle(
'assignee', AssigneeHandler, is_required=True,
person_must_exist=True)
def save_object(self, db_session, **options):
if options.get('audit'):
self.obj.audit_id = options.get('audit').id
db_session.add(self.obj)
class RequestsConverter(BaseConverter):
metadata_map = OrderedDict([
('Type', 'type'),
('Program Code', 'slug')
])
object_map = OrderedDict([
('Request Type', 'request_type'),
('Request Description', 'description'),
('Objective Code', 'objective_id'),
('Notes', 'notes'),
('Test', 'test'),
('Assignee', 'assignee'),
('Audit Contact', 'auditor_contact'),
('Requested On', 'requested_on'),
('Due On', 'due_on'),
('Status', 'status'),
])
row_converter = RequestRowConverter
# Overwrite validate functions since they assume a program rather than a directive
def validate_code(self, attrs):
if not attrs.get('slug'):
self.errors.append('Missing Program Code heading')
elif attrs['slug'] != self.program().slug:
self.errors.append('Program Code must be {}'.format(self.program().slug))
def validate_metadata(self, attrs):
self.validate_metadata_type(attrs, "Requests")
self.validate_code(attrs)
def program(self):
return self.options['program']
def do_export_metadata(self):
yield self.metadata_map.keys()
yield ['Requests', self.program().slug]
yield[]
yield[]
yield self.object_map.keys()
|
Python
| 0
|
@@ -278,16 +278,23 @@
s import
+ Audit,
Request
@@ -1409,18 +1409,23 @@
s):%0A
-if
+audit =
options
@@ -1437,16 +1437,29 @@
'audit')
+%0A if audit
:%0A
@@ -1476,37 +1476,55 @@
udit
-_id = options.get('audit').id
+ = audit%0A self.obj.context = audit.context
%0A
|
d4c98e176833a4b37b7edf1d68741bcfa8c50213
|
Return the name_sha of each flaky test in build_flaky_tests
|
changes/api/build_flaky_tests.py
|
changes/api/build_flaky_tests.py
|
from __future__ import absolute_import
from sqlalchemy.orm import joinedload
from changes.api.base import APIView
from changes.config import db
from changes.constants import Result
from changes.models.build import Build
from changes.models.job import Job
from changes.models.phabricatordiff import PhabricatorDiff
from changes.models.source import Source
from changes.models.test import TestCase
# This constant must match MAX_TESTS_TO_ADD in citools' quarantine keeper
MAX_TESTS_TO_ADD = 2
class BuildFlakyTestsAPIView(APIView):
def get(self, build_id):
build = Build.query.get(build_id)
if build is None:
return '', 404
jobs = list(Job.query.filter(
Job.build_id == build.id,
))
if jobs:
flaky_tests_query = db.session.query(
TestCase.id,
TestCase.name,
TestCase.name_sha,
TestCase.job_id
).filter(
TestCase.job_id.in_([j.id for j in jobs]),
TestCase.result == Result.passed,
TestCase.reruns > 1
).order_by(TestCase.name.asc()).all()
else:
flaky_tests_query = []
flaky_tests = []
for test in flaky_tests_query:
item = {
'id': test.id,
'name': test.name,
'job_id': test.job_id,
}
# Quarantine Keeper only needs the author if there are at most
# MAX_TESTS_TO_ADD to add. If there are less, it will only send
# an alert and we don't want to waste time querying the DB
if len(flaky_tests_query) <= MAX_TESTS_TO_ADD:
first_build = self._get_first_build(build.project_id, test.name_sha)
last_test = self._get_last_testcase(build.project_id, test.name_sha)
possible_authors = [
last_test.owner,
first_build.author.email,
]
for author in possible_authors:
if author:
item['author'] = {'email': author}
break
phab_diff = PhabricatorDiff.query.filter(
Source.id == first_build.source.id,
).first()
if phab_diff:
item['diff_id'] = phab_diff.revision_id
flaky_tests.append(item)
context = {
'projectSlug': build.project.slug,
'repositoryUrl': build.project.repository.url,
'flakyTests': {
'count': len(flaky_tests),
'items': flaky_tests
}
}
return self.respond(context)
@staticmethod
def _get_first_build(project_id, test_name_sha):
"""Get the first build (by date created) containing a test case.
Args:
:param project_id: string
:param test_name_sha: string
Returns:
Build
"""
first_test = TestCase.query.filter(
TestCase.project_id == project_id,
TestCase.name_sha == test_name_sha,
).order_by(TestCase.date_created.asc()).limit(1).first()
if first_test is None:
return None
first_build = Build.query.options(
joinedload('author'),
joinedload('source'),
).filter(
Build.id == first_test.job.build_id,
).first()
return first_build
@staticmethod
def _get_last_testcase(project_id, test_name_sha):
"""Get the most recent TestCase instance for the specified name.
Args:
:param project_id: string
:param test_name_sha: string
Returns:
TestCase
"""
most_recent_test = TestCase.query.filter(
TestCase.project_id == project_id,
TestCase.name_sha == test_name_sha,
).order_by(TestCase.date_created.desc()).limit(1).first()
return most_recent_test
|
Python
| 0.05876
|
@@ -1350,24 +1350,67 @@
test.name,%0A
+ 'name_sha': test.name_sha,%0A
|
3b950782e0f4140715084ebace2820db4f61c600
|
Fix reindent-rst.py: it works on binary files
|
Tools/scripts/reindent-rst.py
|
Tools/scripts/reindent-rst.py
|
#!/usr/bin/env python
# Make a reST file compliant to our pre-commit hook.
# Currently just remove trailing whitespace.
import sys, re, shutil
ws_re = re.compile(r'\s+(\r?\n)$')
def main(argv=sys.argv):
rv = 0
for filename in argv[1:]:
try:
with open(filename, 'rb') as f:
lines = f.readlines()
new_lines = [ws_re.sub(r'\1', line) for line in lines]
if new_lines != lines:
print('Fixing %s...' % filename)
shutil.copyfile(filename, filename + '.bak')
with open(filename, 'wb') as f:
f.writelines(new_lines)
except Exception as err:
print('Cannot fix %s: %s' % (filename, err))
rv = 1
return rv
if __name__ == '__main__':
sys.exit(main())
|
Python
| 0.000008
|
@@ -159,16 +159,17 @@
compile(
+b
r'%5Cs+(%5Cr
@@ -373,16 +373,17 @@
_re.sub(
+b
r'%5C1', l
|
4eb100414a139d15d55bc752965d81e96bf5404d
|
Refactor from review
|
src/globus_cli/login_manager.py
|
src/globus_cli/login_manager.py
|
import functools
import click
from .tokenstore import token_storage_adapter
class LoginManager:
def __init__(self):
self._token_storage = token_storage_adapter()
def has_login(self, resource_server: str):
"""
Determines if the user has a refresh token for the given
resource server
"""
tokens = self._token_storage.get_token_data(resource_server)
if tokens is None or "refresh_token" not in tokens:
return False
return True
def requires_login(*args: str, pass_manager: bool = False):
"""
Command decorator for specifying a resource server that the user must have
tokens for in order to run the command.
Simple usage for commands that have static resource needs: simply list all
needed resource servers as args:
@requries_login("auth.globus.org")
@requires_login("auth.globus.org", "transfer.api.globus.org")
Usage for commands which have dynamic resource servers depending
on the arguments passed to the command (e.g. commands for the GCS API)
@requies_login(pass_manager=True)
def command(login_manager, endpoint_id)
login_manager.<do the thing>(endpoint_id)
"""
resource_servers = args
def inner(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
manager = LoginManager()
# determine the set of resource servers missing logins
missing_servers = set()
for server_name in resource_servers:
if not manager.has_login(server_name):
missing_servers.add(server_name)
# if we are missing logins, assemble error text
# text is slightly different for 1, 2, or 3+ missing servers
if missing_servers:
if len(missing_servers) == 1:
plural_string = ""
server_string = missing_servers.pop()
elif len(missing_servers) == 2:
plural_string = "s"
server_string = "{} and {}".format(
missing_servers.pop(), missing_servers.pop()
)
else:
plural_string = "s"
single_server = missing_servers.pop()
server_string = ", ".join(missing_servers) + ", and {}".format(
single_server
)
raise click.ClickException(
"Missing login{} for {}, please run 'globus login'".format(
plural_string, server_string
)
)
# if pass_manager is True, pass it as an additional positional arg
if pass_manager:
return func(*args, manager, **kwargs)
else:
return func(*args, **kwargs)
return wrapper
return inner
|
Python
| 0
|
@@ -409,18 +409,22 @@
-if
+return
tokens
@@ -430,15 +430,20 @@
is
+not
None
-or
+and
%22re
@@ -454,20 +454,16 @@
h_token%22
- not
in toke
@@ -468,55 +468,8 @@
kens
-:%0A return False%0A%0A return True
%0A%0A%0Ad
|
6f0bd1e6f85bcd58db5cd99eb3dd8a801ec8af62
|
add comment
|
channelguide/cache/middleware.py
|
channelguide/cache/middleware.py
|
from Cookie import SimpleCookie
import time
from django.conf import settings
from django.core.handlers.wsgi import WSGIRequest
from django.template import Context, loader
import client
class CacheTimingMiddleware(object):
def process_request(self, request):
request.start_time = time.time()
def process_response(self, request, response):
if not hasattr(request, 'start_time'):
return response
total = time.time() - request.start_time
f = file('/tmp/page_timing', 'a')
if hasattr(request, '_cache_hit'):
type = 'C'
else:
type = 'R'
line = '%s!%s!%i!%s!%s!%f\n' % (time.asctime(),type, response.status_code, request.path, request.META.get('QUERY_STRING', ''), total)
f.write(line)
f.close()
del request.start_time
return response
class CacheMiddlewareBase(object):
cache_time = 0 # how many seconds to cache for
def get_cache_key_tuple(self, request):
"""Return a tuple that will be used to create the cache key."""
raise NotImplementedError
def response_to_cache_object(self, request, response):
return response
def response_from_cache_object(self, request, cached_object):
return cached_object
def get_cache_key(self, request):
prefix = self.__class__.__name__ + ":"
return prefix + hex(hash(self.get_cache_key_tuple(request)))
def can_cache_request(self, request):
return (request.method == 'GET' and
'no-cache' not in request.META.get('HTTP_CACHE_CONTROL', ''))
def process_request(self, request):
if not self.can_cache_request(request):
return None
cached_object = client.get(self.get_cache_key(request))
if cached_object is None or settings.DISABLE_CACHE:
return None
else:
request._cache_hit = True
return self.response_from_cache_object(request, cached_object)
def process_response(self, request, response):
if 'Cache-Control' not in response.headers:
response.headers['Cache-Control'] = 'max-age=0'
if (request.method == 'GET' and response.status_code == 200 and
not hasattr(request, '_cache_hit')):
client.set(self.get_cache_key(request),
self.response_to_cache_object(request, response),
time=self.cache_time)
return response
class CacheMiddleware(CacheMiddlewareBase):
def get_cache_key_tuple(self, request):
cookie = request.META.get('HTTP_COOKIE')
if type(cookie) is SimpleCookie:
# Maybe this is the test browser, which sends the HTTP_COOKIE
# value as an python Cookie object
return (request.path, request.META['QUERY_STRING'],
cookie.output())
else:
return (request.path, request.META['QUERY_STRING'], cookie)
class TableDependentCacheMiddleware(CacheMiddleware):
def __init__(self, *tables):
self.table_keys = ['Table:' + (hasattr(t, 'name') and t.name or t)
for t in tables]
def get_cache_key(self, request):
cache_key = CacheMiddlewareBase.get_cache_key(self, request)
if not self.table_keys:
return cache_key
ret = client.get_multi(self.table_keys)
if len(ret) != len(self.table_keys):
for k in (key for key in self.table_keys if key not in ret):
v = time.time()
client.set(k, v)
ret[k] = v
appends = ['%s' % ret[k] for k in self.table_keys]
key = cache_key + ':' + ':'.join(appends)
return key
class AggressiveCacheMiddleware(TableDependentCacheMiddleware):
"""Aggresively Caches a page. This should only be used for pages that
* Don't use any session data, or any cookie data
* Are displayed the same for each user (except the account bar)
* Don't do any authentication
This middleware caches pages without regard to the cookies. When a
request is about to be processed, if there is a page in the cache, it uses
that page, but replaces the account bar with a newly generated account
bar.
"""
account_bar_start = '<!-- START ACCOUNT BAR -->'
account_bar_end = '<!-- END ACCOUNT BAR -->'
def get_cache_key_tuple(self, request):
return (request.path, request.META['QUERY_STRING'])
def response_from_cache_object(self, request, cached_response):
t = loader.get_template("guide/account-bar.html")
new_account_bar = t.render(Context({'user': request.user})).decode('ascii')
content = cached_response.content
start = content.find(self.account_bar_start)
head = content[:start]
end = content.find(self.account_bar_end, start) + len(self.account_bar_end)
tail = content[end:]
cached_response.content = head
cached_response.content += new_account_bar
cached_response.content += tail
return cached_response
|
Python
| 0
|
@@ -824,32 +824,141 @@
uest.start_time%0A
+ footer = '%5Cn%3C!-- %25s --%3E' %25 line%0A response.content = response.content + footer.encode('utf-8')%0A
return r
|
65e0d22cfc340a9df8d1b25e114cf7173198644b
|
Fix incorrect variable used in pressure_fit
|
UConnRCMPy/pressure_traces.py
|
UConnRCMPy/pressure_traces.py
|
from __future__ import print_function
import numpy as np
import cantera as ct
from .constants import (cantera_version,
one_atm_in_bar,
one_atm_in_torr,
one_bar_in_pa,
)
from .utilities import ParsedFilename
class PressureTrace(object):
"""Generic class for pressure traces"""
def file_loader(self, filename):
"""
Load a voltage trace from a text file.
Load a voltage trace from a text file. Check if the file exists
and if not, try again after adding the proper file extension.
"""
self.voltage = None
try:
self.voltage = np.genfromtxt(filename)
except OSError:
filename += '.txt'
self.voltage = np.genfromtxt(filename)
if self.voltage is None:
raise OSError('Data file not found')
def smoothing(self, data, span=21):
window = np.ones(span)/span
return np.convolve(data, window, 'same')
def pressure_fit(self):
beg_compress = np.floor(self.p_EOC_idx - 0.08*self.sampfreq)
time = np.linspace(0, (beg_compress - 1)/self.sampfreq, beg_compress)
fit_pres = self.pressure[:beg_compress]
fit_pres[0:9] = fit_pres[10]
self.linear_fit = np.polyfit(time, fit_pres, 1)
def find_EOC(self):
self.max_p = np.amax(self.pressure)
self.max_p_idx = np.argmax(self.pressure)
minpi = self.maxpi - 100
while self.pressure[minpi] >= self.pressure[minpi - 100]:
minpi -= 1
self.p_EOC = np.amax(self.pressure[0:minpi])
self.p_EOC_idx = np.argmax(self.pressure[0:minpi])
diff = abs(self.pressure[self.p_EOC_idx] - self.pressure[15])
if diff < 5:
self.p_EOC, self.p_EOC_idx = self.max_p, self.max_p_idx
def derivative(self):
"""
"""
m = len(self.pressure)
self.dpdt = np.zeros(m)
for i in range(m-2):
self.dpdt[i] = (-self.pressure[i+2] + 4*self.pressure[i+1] -
3*self.pressure[i])/(2*(self.time[i+1] -
self.time[i]))
self.dpdt[np.isinf(self.dpdt)] = 0
class PressureFromTemperature(PressureTrace):
"""Class for pressure trace computed from a temperature trace."""
def __init__(self, temperature, P_in):
"""Create a pressure trace given a temperature trace.
The required method to set the temperature and entropy of the
Solution to set the state are not implemented, so this method
is a stub for now.
"""
pass
# gas = ct.Solution('species.cti')
# gas.TP = temperature[0], P_in
# initial_entropy = gas.entropy_mass
# self.pressure = np.zeros((len(temperature)))
# for i, v in enumerate(temperature):
# gas.ST = initial_entropy, temperature[i]
class PressureFromVolume(PressureTrace):
""" Class for pressure trace computed from a volume trace."""
def __init__(self, volume, P_in, T_in=None):
gas = ct.Solution('species.cti')
if cantera_version[2] >= 1 and cantera_version[1] >= 2:
gas.DP = 1.0/volume[0], P_in*one_bar_in_pa
elif T_in is None:
raise OSError
else:
gas.TP = T_in, P_in
initial_volume = gas.volume_mass
initial_entropy = gas.entropy_mass
self.pressure = np.zeros((len(volume)))
for i, v in enumerate(volume):
gas.SV = initial_entropy, v*initial_volume
self.pressure[i] = gas.P/one_bar_in_pa
class ReactivePressureTrace(PressureTrace, ParsedFilename):
"""Class for reactive pressure traces."""
def __init__(self):
filename = input('Filename: ')
self.file_loader(filename)
super().__init__(filename)
initial_pressure_in_bar = self.pin*one_atm_in_bar/one_atm_in_torr
self.pres = (self.voltage[:, 1] - self.voltage[0, 1])*self.factor
self.pres += initial_pressure_in_bar
self.time = self.voltage[:, 0]
self.smoothing()
self.derivative()
self.find_EOC()
class NonReactivePressureTrace(PressureTrace):
"""Class for non-reactive pressure traces."""
def __init__(self, filename):
self.file_loader(filename)
file_info = ParsedFilename(filename)
initial_pressure_in_bar = file_info.pin*one_atm_in_bar/one_atm_in_torr
self.pres = (self.voltage[:, 1] - self.voltage[0, 1])*file_info.factor
self.pres += initial_pressure_in_bar
self.time = self.voltage[:, 0]
self.smoothing()
self.find_EOC()
class SimulatedPressureTrace(PressureTrace):
"""Class for pressure traces derived from simulations."""
def __init__(self, filename='export.csv'):
self.data = np.genfromtxt(filename, delimiter=',', names=True)
self.pres = self.data['Pressure_(bar)']
self.time = self.data['Time_(sec)']
def derivative(self):
m = len(self.pres)
self.dpdt = np.zeros(m)
for i in range(1, m-2):
x = self.time[i]
x_min = self.time[i-1]
x_plu = self.time[i+1]
y = self.pressure[i]
y_min = self.pressure[i-1]
y_plu = self.pressure[i+1]
self.dpdt[i] = (y_min*(x - x_plu)/((x_min - x)*(x_min - x_plu)) +
y*(2*x - x_min - x_plu)/((x - x_min)*(x - x_plu)) +
y_plu*(x - x_min)/((x_plu - x_min)*(x_plu - x)))
|
Python
| 0.000004
|
@@ -1117,24 +1117,25 @@
08*self.
-samp
freq
+uency
)%0A
@@ -1186,16 +1186,17 @@
elf.
-samp
freq
+uency
, be
|
7ca9e20b57153e3f42a61b8596ca43d264a89014
|
Bump version number
|
VMEncryption/main/Common.py
|
VMEncryption/main/Common.py
|
#!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2015 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class CommonVariables:
utils_path_name = 'Utils'
extension_name = 'AzureDiskEncryptionForLinux'
extension_version = '0.1.0.999175'
extension_type = extension_name
extension_media_link = 'https://amextpaas.blob.core.windows.net/prod/' + extension_name + '-' + str(extension_version) + '.zip'
extension_label = 'Windows Azure VMEncryption Extension for Linux IaaS'
extension_description = extension_label
"""
disk/file system related
"""
sector_size = 512
luks_header_size = 4096 * 512
default_block_size = 52428800
min_filesystem_size_support = 52428800 * 3
#TODO for the sles 11, we should use the ext3
default_file_system = 'ext4'
default_mount_name = 'encrypted_disk'
dev_mapper_root = '/dev/mapper/'
disk_by_id_root = '/dev/disk/by-id'
"""
parameter key names
"""
PassphraseFileNameKey = 'BekFileName'
KeyEncryptionKeyURLKey = 'KeyEncryptionKeyURL'
KeyVaultURLKey = 'KeyVaultURL'
AADClientIDKey = 'AADClientID'
KeyEncryptionAlgorithmKey = 'KeyEncryptionAlgorithm'
DiskFormatQuerykey = "DiskFormatQuery"
PassphraseKey = 'Passphrase'
"""
value for VolumeType could be OS or Data
"""
VolumeTypeKey = 'VolumeType'
AADClientSecretKey = 'AADClientSecret'
SecretUriKey = 'SecretUri'
VolumeTypeOS = 'OS'
VolumeTypeData = 'Data'
VolumeTypeAll = 'All'
SupportedVolumeTypes = [ VolumeTypeOS, VolumeTypeData, VolumeTypeAll ]
"""
command types
"""
EnableEncryption = 'EnableEncryption'
EnableEncryptionFormat = 'EnableEncryptionFormat'
DisableEncryption = 'DisableEncryption'
QueryEncryptionStatus = 'QueryEncryptionStatus'
"""
encryption config keys
"""
EncryptionEncryptionOperationKey = 'EncryptionOperation'
EncryptionDecryptionOperationKey = 'DecryptionOperation'
EncryptionVolumeTypeKey = 'VolumeType'
EncryptionDiskFormatQueryKey = 'DiskFormatQuery'
"""
crypt ongoing item config keys
"""
OngoingItemMapperNameKey = 'MapperName'
OngoingItemHeaderFilePathKey = 'HeaderFilePath'
OngoingItemOriginalDevNamePathKey = 'DevNamePath'
OngoingItemOriginalDevPathKey = 'DevicePath'
OngoingItemPhaseKey = 'Phase'
OngoingItemHeaderSliceFilePathKey = 'HeaderSliceFilePath'
OngoingItemFileSystemKey = 'FileSystem'
OngoingItemMountPointKey = 'MountPoint'
OngoingItemDeviceSizeKey = 'Size'
OngoingItemCurrentSliceIndexKey = 'CurrentSliceIndex'
OngoingItemFromEndKey = 'FromEnd'
OngoingItemCurrentDestinationKey = 'CurrentDestination'
OngoingItemCurrentTotalCopySizeKey = 'CurrentTotalCopySize'
OngoingItemCurrentLuksHeaderFilePathKey = 'CurrentLuksHeaderFilePath'
OngoingItemCurrentSourcePathKey = 'CurrentSourcePath'
OngoingItemCurrentBlockSizeKey = 'CurrentBlockSize'
"""
encryption phase devinitions
"""
EncryptionPhaseBackupHeader = 'BackupHeader'
EncryptionPhaseCopyData = 'EncryptingData'
EncryptionPhaseRecoverHeader = 'RecoverHeader'
EncryptionPhaseEncryptDevice = 'EncryptDevice'
EncryptionPhaseDone = 'Done'
"""
decryption phase constants
"""
DecryptionPhaseCopyData = 'DecryptingData'
DecryptionPhaseDone = 'Done'
"""
logs related
"""
InfoLevel = 'Info'
WarningLevel = 'Warning'
ErrorLevel = 'Error'
"""
error codes
"""
extension_success_status = 'success'
extension_error_status = 'error'
process_success = 0
success = 0
os_not_supported = 1
luks_format_error = 2
scsi_number_not_found = 3
device_not_blank = 4
environment_error = 5
luks_open_error = 6
mkfs_error = 7
folder_conflict_error = 8
mount_error = 9
mount_point_not_exists = 10
passphrase_too_long_or_none = 11
parameter_error = 12
create_encryption_secret_failed = 13
encrypttion_already_enabled = 14
passphrase_file_not_found = 15
command_not_support = 16
volue_type_not_support = 17
copy_data_error = 18
encryption_failed = 19
tmpfs_error = 20
backup_slice_file_error = 21
unmount_oldroot_error = 22
unknown_error = 100
class TestHooks:
search_not_only_ide = False
use_hard_code_passphrase = False
hard_code_passphrase = "Quattro!"
class DeviceItem(object):
def __init__(self):
#NAME,TYPE,FSTYPE,MOUNTPOINT,LABEL,UUID,MODEL
self.name = None
self.type = None
self.file_system = None
self.mount_point = None
self.label = None
self.uuid = None
self.model = None
self.size = None
def __str__(self):
return "name:" + str(self.name) + " type:" + str(self.type) + " fstype:" + str(self.file_system) + " mountpoint:" + str(self.mount_point) + " label:" + str(self.label) + " model:" + str(self.model)
class CryptItem(object):
def __init__(self):
self.mapper_name = None
self.dev_path = None
self.mount_point = None
self.file_system = None
self.luks_header_path = None
self.uses_cleartext_key = None
def __str__(self):
return ("name: " + str(self.mapper_name) + " dev_path:" + str(self.dev_path) +
" mount_point:" + str(self.mount_point) + " file_system:" + str(self.file_system) +
" luks_header_path:" + str(self.luks_header_path) +
" uses_cleartext_key:" + str(self.uses_cleartext_key))
|
Python
| 0.000002
|
@@ -769,17 +769,17 @@
.0.99917
-5
+6
'%0A ex
|
5221630769f53853fea490e04301c2a19711894c
|
Fix #296. Generate name according to profile's sex. Thanks @Dutcho for the report
|
faker/providers/profile/__init__.py
|
faker/providers/profile/__init__.py
|
# coding=utf-8
from .. import BaseProvider
import itertools
class Provider(BaseProvider):
"""
This provider is a collection of functions to generate personal profiles and identities.
"""
def simple_profile(self):
"""
Generates a basic profile with personal informations
"""
return {
"username": self.generator.user_name(),
"name": self.generator.name(),
"sex": self.random_element(["M", "F"]),
"address": self.generator.address(),
"mail": self.generator.free_email(),
#"password":self.generator.password()
"birthdate": self.generator.date(),
}
def profile(self, fields=None):
"""
Generates a complete profile.
If "fields" is not empty, only the fields in the list will be returned
"""
if fields is None:
fields = []
d = {
"job": self.generator.job(),
"company": self.generator.company(),
"ssn": self.generator.ssn(),
"residence": self.generator.address(),
"current_location": (self.generator.latitude(), self.generator.longitude()),
"blood_group": "".join(self.random_element(list(itertools.product(["A", "B", "AB", "0"], ["+", "-"])))),
"website": [self.generator.url() for i in range(1, self.random_int(2, 5))]
}
d = dict(d, **self.generator.simple_profile())
#field selection
if len(fields) > 0:
d = dict((k, v) for (k, v) in d.items() if k in fields)
return d
|
Python
| 0.000669
|
@@ -325,16 +325,68 @@
-return %7B
+sex = self.random_element(%5B%22F%22, %22M%22%5D)%0A if sex == 'F':
%0A
@@ -398,19 +398,14 @@
-%22user
name
-%22:
+ =
sel
@@ -420,20 +420,46 @@
tor.
-user_name(),
+name_female()%0A elif sex == 'M':
%0A
@@ -471,15 +471,14 @@
-%22
name
-%22:
+ =
sel
@@ -493,19 +493,40 @@
tor.name
-(),
+_male()%0A return %7B
%0A
@@ -535,19 +535,24 @@
%22
+u
se
-x
+rname
%22: self.
rand
@@ -551,34 +551,79 @@
elf.
-random_element(%5B%22M%22, %22F%22%5D)
+generator.user_name(),%0A %22name%22: name,%0A %22sex%22: sex
,%0A
|
981a74b116081f3ce1d97262c3c88104a953cdf4
|
Use numpy's float supporting range
|
saau/sections/misc/header.py
|
saau/sections/misc/header.py
|
import matplotlib.pyplot as plt
from operator import gt, lt, itemgetter
from lxml.etree import fromstring, XMLSyntaxError
def frange(start, stop, step):
cur = start
op = gt if start > stop else lt
while op(cur, stop):
yield cur
cur += step
def parse_lines(lines):
for line in lines:
try:
xml_line = fromstring(line.encode('utf-8'))
except XMLSyntaxError:
attrs = []
else:
attrs = [thing.tag for thing in xml_line.getiterator()]
line = list(xml_line.getiterator())[-1].text
yield line, attrs
def render_header_to(ax, sy, lines, sx=0.5):
calc = lambda q: q / 20
y_points = map(calc, frange(sy, 0, -1))
parsed = list(parse_lines(lines))
lines = map(itemgetter(0), parsed)
line_attrs = map(itemgetter(1), parsed)
lines = [
ax.figure.text(sx, y, text, ha='center')
for y, text in zip(y_points, lines)
]
for idx, attrs in enumerate(line_attrs):
if 'b' in attrs:
lines[idx].set_weight('extra bold')
if 'i' in attrs:
lines[idx].set_style('italic')
return ax
class Header:
__init__ = lambda self, _, a: None
has_required_data = lambda _: True
def build_image(self):
ax = plt.axes()
render_header_to(ax)
plt.show()
return ax
|
Python
| 0.000001
|
@@ -49,16 +49,8 @@
port
- gt, lt,
ite
@@ -112,155 +112,27 @@
or%0A%0A
-%0Adef frange(start, stop, step):%0A cur = start%0A op = gt if start %3E stop else lt%0A%0A while op(cur, stop):%0A yield cur%0A cur += step
+import numpy as np%0A
%0A%0A%0Ad
@@ -566,17 +566,20 @@
p(calc,
-f
+np.a
range(sy
|
0100816670ad988e51ffbce1d3c2725f8ed69439
|
change kurisu command to Zoidbot
|
addons/extras.py
|
addons/extras.py
|
import datetime
import discord
import os
import random
import re
import string
from discord.ext import commands
from sys import argv
class Extras:
"""
Extra things.
"""
def __init__(self, bot):
self.bot = bot
print('Addon "{}" loaded'.format(self.__class__.__name__))
prune_key = "nokey"
@commands.command()
async def kurisu(self):
"""About Kurisu"""
embed = discord.Embed(title="Kurisu", color=discord.Color.green())
embed.set_author(name="916253 and ihaveahax")
embed.set_thumbnail(url="http://i.imgur.com/hjVY4Et.jpg")
embed.url = "https://github.com/916253/Kurisu"
embed.description = "Kurisu, the Nintendo Homebrew Discord bot!"
await self.bot.say("", embed=embed)
@commands.command()
async def membercount(self):
"""Prints the member count of the server."""
await self.bot.say("{} has {:,} members!".format(self.bot.server.name, self.bot.server.member_count))
@commands.has_permissions(ban_members=True)
@commands.command(hidden=True)
async def embedtext(self, *, text):
"""Embed content."""
await self.bot.say(embed=discord.Embed(description=text))
@commands.command(hidden=True)
async def timedelta(self, length):
# thanks Luc#5653
units = {
"d": 86400,
"h": 3600,
"m": 60,
"s": 1
}
seconds = 0
match = re.findall("([0-9]+[smhd])", length) # Thanks to 3dshax server's former bot
if match is None:
return None
for item in match:
seconds += int(item[:-1]) * units[item[-1]]
curr = datetime.datetime.now()
diff = datetime.timedelta(seconds=seconds)
# http://stackoverflow.com/questions/2119472/convert-a-timedelta-to-days-hours-and-minutes
days, hours, minutes = td.days, td.seconds//3600, (td.seconds//60)%60
msg = "```\ncurr: {}\nnew: {}\ndiff: {}\n```".format(
curr,
curr + diff,
diff
)
await self.bot.say(msg)
@commands.has_permissions(manage_nicknames=True)
@commands.command()
async def estprune(self, days=30):
"""Estimate count of members that would be pruned based on the amount of days. Staff only."""
if days > 30:
await self.bot.say("Maximum 30 days")
return
if days < 1:
await self.bot.say("Minimum 1 day")
return
msg = await self.bot.say("I'm figuring this out!".format(self.bot.server.name))
count = await self.bot.estimate_pruned_members(server=self.bot.server, days=days)
await self.bot.edit_message(msg, "{:,} members inactive for {} day(s) would be kicked from {}!".format(count, days, self.bot.server.name))
@commands.has_permissions(manage_nicknames=True)
@commands.command(pass_context=True)
async def prune30(self, ctx, key=""):
"""Prune members that are inactive for 30 days. Staff only."""
if self.bot.pruning > 0:
await self.bot.say("Pruning is already in progress.")
return
if key != self.prune_key:
if key != "":
await self.bot.say("That's not the correct key.")
self.prune_key = ''.join(random.sample(string.ascii_letters, 8))
await self.bot.say("Are you sure you want to prune members inactive for 30 days?\nTo see how many members get kicked, use `.estprune`.\nTo confirm the prune, use the command `.prune30 {}`.".format(self.prune_key))
return
self.prune_key = ''.join(random.sample(string.ascii_letters, 8))
await self.bot.say("Starting pruning!")
count = await self.bot.prune_members(self.bot.server, days=30)
self.bot.pruning = count
await self.bot.send_message(self.bot.mods_channel, "{:,} are currently being kicked from {}!".format(count, self.bot.server.name))
msg = "👢 **Prune**: {} pruned {:,} members".format(ctx.message.author.mention, count)
await self.bot.send_message(self.bot.modlogs_channel, msg)
@commands.has_permissions(manage_nicknames=True)
@commands.command()
async def disableleavelogs(self):
"""DEBUG COMMAND"""
self.bot.pruning = True
await self.bot.say("disable")
@commands.has_permissions(manage_nicknames=True)
@commands.command()
async def enableleavelogs(self):
"""DEBUG COMMAND"""
self.bot.pruning = False
await self.bot.say("enable")
@commands.has_permissions(administrator=True)
@commands.command(pass_context=True, hidden=True)
async def dumpchannel(self, ctx, channel_name, limit=100):
"""Dump 100 messages from a channel to a file."""
channel = ctx.message.channel_mentions[0]
await self.bot.say("Dumping {} messages from {}".format(limit, channel.mention))
os.makedirs("#{}-{}".format(channel.name, channel.id), exist_ok=True)
async for message in self.bot.logs_from(channel, limit=limit):
with open("#{}-{}/{}.txt".format(channel.name, channel.id, message.id), "w") as f:
f.write(message.content)
await self.bot.say("Done!")
@commands.command(pass_context=True, hidden=True)
async def togglechannel(self, ctx, channelname):
"""Enable or disable access to specific channels."""
author = ctx.message.author
await self.bot.delete_message(ctx.message)
if channelname == "elsewhere":
if self.bot.elsewhere_role in author.roles:
await self.bot.remove_roles(author, self.bot.elsewhere_role)
await self.bot.send_message(author, "Access to #elsewhere removed.")
else:
await self.bot.add_roles(author, self.bot.elsewhere_role)
await self.bot.send_message(author, "Access to #elsewhere granted.")
else:
await self.bot.send_message(author, "{} is not a valid toggleable channel.".format(channelname))
def setup(bot):
bot.add_cog(Extras(bot))
|
Python
| 0.000017
|
@@ -362,14 +362,15 @@
def
-kurisu
+zoidbot
(sel
@@ -390,22 +390,23 @@
%22%22About
-Kurisu
+Zoidbot
%22%22%22%0A
@@ -438,22 +438,23 @@
(title=%22
-Kurisu
+Zoidbot
%22, color
@@ -472,13 +472,11 @@
lor.
-g
re
-en
+d
())%0A
@@ -510,28 +510,19 @@
me=%22
-916253 and ihaveahax
+T3CHNOLOG1C
%22)%0A
@@ -630,21 +630,27 @@
com/
-916253/Kurisu
+T3CHNOLOG1C/Zoidbot
%22%0A
@@ -680,37 +680,30 @@
= %22
-Kurisu, the Nintendo Homebrew
+Zoidbot, T3CHNOLOG1C's
Dis
|
4e2fa3a249b9027f9e50f0f957163cad3bdc28bf
|
Fix typo
|
src/gramcore/features/points.py
|
src/gramcore/features/points.py
|
"""Functions for extracting interest points.
These are applied to numpy.arrays representing images.
"""
import numpy
from skimage import feature
def harris(parameters):
"""Harris interest point operator.
It wraps `skimage.feature.harris`. The `threshold`, `eps` and
`gaussian_deviation` options are not supported.
This function returns an array of 0s and 1s. Harris points are marked
with 1s. This way the result can be easily transformed to an image. It
works on RGB and greyscale images.
The wrapped function returns a set of point coordinates in a list. For
some reason it is not possible to do something like:
>>> points = feature.harris(data, min_distance=5)
>>> data[points] = 1
Instead a for loop is used.
:param parameters['data'][0]: input array
:type parameters['data'][0]: numpy.array
:param parameters['min_distance']: minimum number of pixels separating
interest points and image boundary,
defaults to 10
:type parameters['min_distance']: float
:return: numpy.array, it contains 1s where points were found, otherwise 0
"""
data = parameters['data'][0]
min_distance = parameters.get('min_distance', 10)
points = feature.harris(data, min_distance=pmin_distance)
result = numpy.zeros((data.shape[0], data.shape[1]), dtype='uint8')
for point in points:
result[point[0], point[1]] = 1
return result
|
Python
| 0.999999
|
@@ -743,34 +743,175 @@
-Instead a for loop is used
+so a for loop is used.%0A%0A .. note::%0A%0A The coordinates returned are not directly on the corner, but a pixel%0A inside the object (TODO: is this expected?)
.%0A%0A
@@ -1471,17 +1471,16 @@
istance=
-p
min_dist
|
0e72816ac15652953c5d02a7543c72567d0bc069
|
Update utils.py
|
django_libs/utils.py
|
django_libs/utils.py
|
"""Additional helpful utility functions."""
import random
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from bs4 import BeautifulSoup
from HTMLParser import HTMLParser
from .loaders import load_member_from_setting
class conditional_decorator(object):
"""
Allows you to use decorators based on a condition.
Useful to require login only if a setting is set::
@conditional_decorator(method_decorator(login_required), settings.FOO)
def dispatch(self, request, *args, **kwargs):
return super(...).dispatch(...)
"""
def __init__(self, dec, condition):
self.decorator = dec
self.condition = condition
def __call__(self, func):
if not self.condition:
# Return the function unchanged, not decorated.
return func
return self.decorator(func)
def create_random_string(length=7, chars='ABCDEFGHJKMNPQRSTUVWXYZ23456789',
repetitions=False):
"""
Returns a random string, based on the provided arguments.
It returns capital letters and numbers by default.
Ambiguous characters are left out, repetitions will be avoided.
"""
if repetitions:
return ''.join(random.choice(chars) for _ in range(length))
return ''.join(random.sample(chars, length))
def get_profile(user):
"""
Makes sure to always return a valid profile for the user.
If none exists, it creates one.
:user: A Django ``User`` instance.
"""
# try if we get a profile via the regular method
try:
return user.get_profile()
except ObjectDoesNotExist:
pass
# check if we set a custom method for profile fetching
setting = getattr(settings, 'GET_PROFILE_METHOD', None)
if setting:
method = load_member_from_setting('GET_PROFILE_METHOD')
return method(user)
app_label, model_name = settings.AUTH_PROFILE_MODULE.split('.')
# the models.get_model method allows to read load the model from the app's
# model cache to allow the setting to be written as 'app_name.ModelName'
profile_cls = models.get_model(app_label, model_name)
return profile_cls.objects.create(user=user)
class HTML2PlainParser(HTMLParser):
"""Custom html parser to convert html code to plain text."""
def __init__(self):
self.reset()
self.text = '' # Used to push the results into a variable
self.links = [] # List of aggregated links
# Settings
self.ignored_elements = getattr(
settings, 'HTML2PLAINTEXT_IGNORED_ELEMENTS',
['html', 'head', 'style', 'meta', 'title', 'img']
)
self.newline_before_elements = getattr(
settings, 'HTML2PLAINTEXT_NEWLINE_BEFORE_ELEMENTS',
['br', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'div', 'p', 'li']
)
self.newline_after_elements = getattr(
settings, 'HTML2PLAINTEXT_NEWLINE_AFTER_ELEMENTS',
['h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'div', 'p', 'td']
)
self.stroke_before_elements = getattr(
settings, 'HTML2PLAINTEXT_STROKE_BEFORE_ELEMENTS',
['tr']
)
self.stroke_after_elements = getattr(
settings, 'HTML2PLAINTEXT_STROKE_AFTER_ELEMENTS',
['tr']
)
self.stroke_text = getattr(settings, 'HTML2PLAINTEXT_STROKE_TEXT',
'------------------------------\n')
def handle_starttag(self, tag, attrs):
"""Handles every start tag like e.g. <p>."""
if (tag in self.newline_before_elements):
self.text += '\n'
if (tag in self.stroke_before_elements
and not self.text.endswith(self.stroke_text)):
# Put a stroke in front of every relevant element, if there is some
# content between it and its predecessor
self.text += self.stroke_text
if tag == 'a':
# If it's a link, append it to the link list
for attr in attrs:
if attr[0] == 'href':
self.links.append((len(self.links) + 1, attr[1]))
def handle_data(self, data):
"""Handles data between tags."""
# Only proceed with unignored elements
if self.lasttag not in self.ignored_elements:
# Remove any predefined linebreaks
text = data.replace('\n', '')
# If there's some text left, proceed!
if text:
if self.lasttag == 'li':
# Use a special prefix for list elements
self.text += ' * '
self.text += text
if self.lasttag in self.newline_after_elements:
# Add a linebreak at the end of the content
self.text += '\n'
def handle_endtag(self, tag):
"""Handles every end tag like e.g. </p>."""
if tag in self.stroke_after_elements:
if self.text.endswith(self.stroke_text):
# Only add a stroke if there isn't already a stroke posted
# In this case, there was no content between the tags, so
# remove the starting stroke
self.text = self.text[:-len(self.stroke_text)]
else:
# If there's no linebreak before the stroke, add one!
if not self.text.endswith('\n'):
self.text += '\n'
self.text += self.stroke_text
if tag == 'a':
# If it's a link, add a footnote
self.text += '[{}]'.format(len(self.links))
elif tag == 'br' and self.text and not self.text.endswith('\n'):
# If it's a break, check if there's no break at the end of the
# content. If there's none, add one!
self.text += '\n'
# Reset the lasttag, otherwise this parse can geht confused, if the
# next element is not wrapped in a new tag.
if tag == self.lasttag:
self.lasttag = None
def html_to_plain_text(html):
"""Converts html code into formatted plain text."""
# Use BeautifulSoup to normalize the html
soup = BeautifulSoup(html, "lxml")
# Init the parser
parser = HTML2PlainParser()
parser.feed(str(soup))
# Strip the end of the plain text
result = parser.text.rstrip()
# Add footnotes
if parser.links:
result += '\n\n'
for link in parser.links:
result += '[{}]: {}\n'.format(link[0], link[1])
return result
|
Python
| 0.000001
|
@@ -6253,12 +6253,19 @@
l, %22
-lxml
+html.parser
%22)%0A
|
f4bbb244716f9471b520f53ebffaf34a31503cd1
|
Remove unused imports (besides they are Py 2.x only)
|
Web/scripts/CPWeb/__init__.py
|
Web/scripts/CPWeb/__init__.py
|
"""
CPWeb - A collection of commonly used routines to produce CoolProp's online documentation
=====
"""
from __future__ import division, absolute_import, print_function
import codecs
import csv
import cStringIO
def get_version():
return 5.0
if __name__ == "__main__":
print('You are using version %s of the Python package for creating CoolProp\' online documentation.'%(get_version()))
print()
|
Python
| 0
|
@@ -168,52 +168,8 @@
on%0A%0A
-import codecs%0Aimport csv%0Aimport cStringIO%0A%0A%0A
def
|
3b77fbb82d2ba098f00f7221070f9610d0d90809
|
add unknown person
|
game.py
|
game.py
|
import random
from adventurelib import Item, Bag, when, start
import rooms
import characters
from sys import exit
people = '123456'
rooms = 'abcdef'
# murder configuration
# who was where
# who is the murderer
# current configuration
# who was where
# player location
murder_config_people = list(people)
random.shuffle(murder_config_people)
murder_location = random.choice(rooms)
murderer = people[rooms.find(murder_location)]
current_config_people = list(people)
random.shuffle(current_config_people)
current_location = random.choice(rooms)
@when('where am i')
def my_room():
print("I am in: " , current_location)
@when('go to ROOM')
@when('go to the ROOM')
def to_room(room):
if room in rooms:
print("I am now in %s" % room)
global current_location
current_location = room
else:
print("I can't find the %s" % room)
@when('it was M')
def accuse(m):
if m == murderer:
print ("Yes, %s is the murderer!" % m)
exit
else:
print ("%s said: 'How could you!'" % m)
start()
|
Python
| 0.999999
|
@@ -603,17 +603,16 @@
am in: %22
-
, curren
@@ -984,32 +984,60 @@
exit%0A else:%0A
+ if m in people:%0A
print (%22
@@ -1072,11 +1072,85 @@
m)%0A
+ else:%0A print (%22No one has ever heard of '%25s'!%22 %25 m)%0A%0A%0A
start()
+%0A
|
55ff20aa2d2504fb85fa2f63cc9b52934245b849
|
make the subscription also work for new minions, fixes #8
|
saltobserver/redis_stream.py
|
saltobserver/redis_stream.py
|
from saltobserver import app, redis_pool
import gevent
from redis import Redis
from distutils.version import StrictVersion
import json
import time
class RedisStream(object):
def __init__(self):
self.redis = Redis(connection_pool=redis_pool)
actual_version = StrictVersion(self.redis.info()['redis_version'])
minimum_version = StrictVersion("2.8.0")
if actual_version < minimum_version:
raise NotImplementedError
self.redis.config_set('notify-keyspace-events', 'Ks')
self.pubsub = self.redis.pubsub()
# TODO: update subscription on newcomer minions
self.pubsub.psubscribe(["__keyspace@0__:{0}:*.*".format(minion) for minion in self.redis.smembers('minions')])
self.clients = list()
def _generator(self):
for message in self.pubsub.listen():
if message['type'] == 'pmessage':
app.logger.debug("Message received from Redis, building data packet.")
minion_id = message['channel'].split(':')[1]
function = message['channel'].split(':')[2]
jid = self.redis.lindex('{0}:{1}'.format(minion_id, function), 0)
success = True if json.loads(self.redis.get('{0}:{1}'.format(minion_id, jid))).get('retcode') == 0 else False
try:
timestamp = time.strptime(jid, "%Y%m%d%H%M%S%f")
except ValueError:
continue # do not pass info with faked jid's
yield dict(minion_id=minion_id, function=function, jid=jid, success=success, time=time.strftime('%Y-%m-%d, at %H:%M:%S', timestamp))
def register(self, client, function):
self.clients.append((client, function))
app.logger.debug("Client %s (function %s) registered." % (client, function))
def send_or_discard_connection(self, client_tupl, data):
client, function = client_tupl
try:
client.send(json.dumps(data))
app.logger.debug("Data for jid %s sent to %s (function %s)" % (data['jid'], client, function))
except Exception as e: # TODO: this is either a ValueError from json, or some other exception from gevents websocket stuff
self.clients.remove(client_tupl)
app.logger.debug("%s (function %s) removed with reason: %s" % (client, function, e))
def run(self):
for data in self._generator():
sent = 0
for client, function in self.clients:
if data['function'] == function:
gevent.spawn(self.send_or_discard_connection, (client, function), data)
sent = sent + 1
app.logger.debug("Attempted to send data packet sent to %s of %s clients." % (sent, len(self.clients)))
def start(self):
gevent.spawn(self.run)
|
Python
| 0
|
@@ -566,64 +566,8 @@
b()%0A
- # TODO: update subscription on newcomer minions%0A
@@ -593,17 +593,16 @@
bscribe(
-%5B
%22__keysp
@@ -613,78 +613,74 @@
0__:
-%7B0%7D
+*
:*.*%22
-.format(minion) for minion in self.redis.smembers('minions')%5D)
+)%0A # TODO: make redis db number (this %5E) configurable
%0A
|
388eebc7cd168b5fcb31f737741673750bbb2a16
|
rename variable to reflect reality
|
examples/circuit_failure_rates.py
|
examples/circuit_failure_rates.py
|
#!/usr/bin/env python
##
## This example uses ICircuitListener to monitor how many circuits have
## failed since the monitor started up. If this figure is more than 50%,
## a warning-level message is logged.
##
## Like the :ref:`stream_circuit_logger.py` example, we also log all new
## circuits.
##
import os
import sys
import random
import signal
from twisted.internet import reactor, task
from twisted.internet.endpoints import TCP4ClientEndpoint
from twisted.python import usage
from zope.interface import implements
import txtorcon
class Options(usage.Options):
"""
command-line options we understand
"""
optParameters = [
['failed', 'f', 0, 'Starting value for number of failed circuits.'],
['built', 'b', 0, 'Starting value for the total number of built cicuits.']
]
class CircuitFailureWatcher(txtorcon.CircuitListenerMixin):
total_circuits = 0
failed_circuits = 0
percent = 0.0
failed_circuit_ids = []
per_guard_built = {}
per_guard_failed = {}
def print_update(self):
print self.information()
def update_percent(self):
self.percent = 100.0 * (float(self.failed_circuits) / float(self.total_circuits + self.failed_circuits))
if self.percent > 50.0:
print 'WARNING: %02.1f percent of all routes have failed: %d failed, %d built' % (self.percent, self.failed_circuits, self.total_circuits)
def information(self):
rtn = '%02.1f%% of all circuits have failed: %d failed, %d built' % (self.percent, self.failed_circuits, self.total_circuits)
for g in self.per_guard_built.keys():
per_guard_percent = 100.0*(self.per_guard_failed[g]/(self.per_guard_built[g]+self.per_guard_failed[g]))
rtn = rtn + '\n %s: %d built, %d failed: %02.1f%%' % (g, self.per_guard_built[g], self.per_guard_failed[g],
per_guard_percent)
return rtn
def circuit_built(self, circuit):
"""ICircuitListener API"""
if circuit.purpose == 'GENERAL':
if len(circuit.path) > 0 and circuit.path[0] not in self.state.entry_guards.values():
print "WEIRD: first circuit hop not in entry guards:",circuit,circuit.path
return
self.total_circuits += 1
self.update_percent()
if len(circuit.path) != 3 and len(circuit.path) != 4:
print "WEIRD: circuit has odd pathlength:",circuit,circuit.path
try:
self.per_guard_built[circuit.path[0].unique_name] += 1
except KeyError:
self.per_guard_built[circuit.path[0].unique_name] = 1.0
self.per_guard_failed[circuit.path[0].unique_name] = 0.0
def circuit_failed(self, circuit, reason):
"""ICircuitListener API"""
if circuit.purpose == 'GENERAL':
if len(circuit.path) > 0 and circuit.path[0] not in self.state.entry_guards.values():
print "WEIRD: first circuit hop not in entry guards:",circuit,circuit.path
return
self.failed_circuits += 1
print "failed",circuit.id
if not circuit.id in self.failed_circuit_ids:
self.failed_circuit_ids.append(circuit.id)
else:
print "WARNING: duplicate message for",circuit
if len(circuit.path) > 0:
try:
self.per_guard_failed[circuit.path[0].unique_name] += 1
except KeyError:
self.per_guard_failed[circuit.path[0].unique_name] = 1.0
self.per_guard_built[circuit.path[0].unique_name] = 0.0
self.update_percent()
listener = CircuitFailureWatcher()
def setup(state):
print 'Connected to a Tor version %s' % state.protocol.version
global options, listener
if options['failed']:
listener.failed_circuits = int(options['failed'])
if options['built']:
listener.total_circuits = int(options['built'])
listener.state = state # FIXME use ctor (ditto for options, probably)
for circ in filter(lambda x: x.purpose == 'GENERAL', state.circuits.values()):
if circ.state == 'BUILT':
listener.circuit_built(circ)
state.add_circuit_listener(listener)
# print an update every minute
task.LoopingCall(listener.print_update).start(60.0)
def setup_failed(arg):
print "SETUP FAILED",arg
print arg
reactor.stop()
options = Options()
options.parseOptions(sys.argv[1:])
def on_shutdown(*args):
global listener
print 'To carry on where you left off, run:'
print ' %s --failed %d --built %d' % (sys.argv[0], listener.failed_circuits, listener.total_circuits)
reactor.addSystemEventTrigger('before', 'shutdown', on_shutdown)
print "Connecting to localhost:9051 with AUTHCOOKIE authentication..."
d = txtorcon.build_tor_connection(TCP4ClientEndpoint(reactor, "localhost", 9051),
build_state=True)
d.addCallback(setup).addErrback(setup_failed)
reactor.run()
|
Python
| 0.000248
|
@@ -885,21 +885,21 @@
):%0A%0A
-total
+built
_circuit
@@ -1187,29 +1187,29 @@
float(self.
-total
+built
_circuits +
@@ -1390,37 +1390,37 @@
_circuits, self.
-total
+built
_circuits)%0A%0A
@@ -1556,29 +1556,29 @@
cuits, self.
-total
+built
_circuits)%0A
@@ -2321,21 +2321,21 @@
self.
-total
+built
_circuit
@@ -4057,37 +4057,37 @@
listener.
-total
+built
_circuits = int(
@@ -4813,21 +4813,21 @@
istener.
-total
+built
_circuit
|
144a35d639ccd3a60f100793df00fd62aa81766b
|
document no trust algo
|
game.py
|
game.py
|
"""
For player in game:
if current player:
send move
else:
listen for move
receive move
decide winner
"""
|
Python
| 0
|
@@ -1,9 +1,27 @@
%22%22%22%0A
-F
+Play with trust:%0A%0Af
or p
@@ -106,24 +106,24 @@
en for move%0A
-
rece
@@ -145,12 +145,441 @@
winner%0A
+%0A%0APlay trusting no one:%0A%0ASwap hashes:%0A for player in game:%0A if current player:%0A send hasher(move + salt)%0A else:%0A listen for hash%0A receive hash%0A%0ASwap salts:%0A for player in game:%0A if current player:%0A send move + salt%0A else:%0A listen for move + salt%0A receive move + salt%0A verify hasher(move + salt) == hash%0A%0Adecide winner%0A
%22%22%22%0A
|
0e36db47311b936076161ff0d724e6d8733c8726
|
Test accessibility of custom IOLoop subclass via base IOLoop.instance.
|
zmq/tests/test_ioloop.py
|
zmq/tests/test_ioloop.py
|
#-----------------------------------------------------------------------------
# Copyright (c) 2010-2012 Brian Granger, Min Ragan-Kelley
#
# This file is part of pyzmq
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import time
import os
import threading
import zmq
from zmq.tests import BaseZMQTestCase
from zmq.eventloop import ioloop
from zmq.eventloop.minitornado.ioloop import _Timeout
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
def printer():
os.system("say hello")
raise Exception
print (time.time())
class Delay(threading.Thread):
def __init__(self, f, delay=1):
self.f=f
self.delay=delay
self.aborted=False
self.cond=threading.Condition()
super(Delay, self).__init__()
def run(self):
self.cond.acquire()
self.cond.wait(self.delay)
self.cond.release()
if not self.aborted:
self.f()
def abort(self):
self.aborted=True
self.cond.acquire()
self.cond.notify()
self.cond.release()
class TestIOLoop(BaseZMQTestCase):
def test_simple(self):
"""simple IOLoop creation test"""
loop = ioloop.IOLoop()
dc = ioloop.PeriodicCallback(loop.stop, 200, loop)
pc = ioloop.PeriodicCallback(lambda : None, 10, loop)
pc.start()
dc.start()
t = Delay(loop.stop,1)
t.start()
loop.start()
if t.isAlive():
t.abort()
else:
self.fail("IOLoop failed to exit")
def test_timeout_compare(self):
"""test timeout comparisons"""
loop = ioloop.IOLoop()
t = _Timeout(1, 2, loop)
t2 = _Timeout(1, 3, loop)
self.assertEqual(t < t2, id(t) < id(t2))
t2 = _Timeout(2,1, loop)
self.assertTrue(t < t2)
def test_poller_events(self):
"""Tornado poller implementation maps events correctly"""
req,rep = self.create_bound_pair(zmq.REQ, zmq.REP)
poller = ioloop.ZMQPoller()
poller.register(req, ioloop.IOLoop.READ)
poller.register(rep, ioloop.IOLoop.READ)
events = dict(poller.poll(0))
self.assertEqual(events.get(rep), None)
self.assertEqual(events.get(req), None)
poller.register(req, ioloop.IOLoop.WRITE)
poller.register(rep, ioloop.IOLoop.WRITE)
events = dict(poller.poll(1))
self.assertEqual(events.get(req), ioloop.IOLoop.WRITE)
self.assertEqual(events.get(rep), None)
poller.register(rep, ioloop.IOLoop.READ)
req.send(b'hi')
events = dict(poller.poll(1))
self.assertEqual(events.get(rep), ioloop.IOLoop.READ)
self.assertEqual(events.get(req), None)
def test_instance(self):
"""Test IOLoop.instance returns the right object"""
loop = ioloop.IOLoop.instance()
self.assertEqual(loop.__class__, ioloop.IOLoop)
def test_close_all(self):
"""Test close(all_fds=True)"""
loop = ioloop.IOLoop.instance()
req,rep = self.create_bound_pair(zmq.REQ, zmq.REP)
loop.add_handler(req, lambda msg: msg, ioloop.IOLoop.READ)
loop.add_handler(rep, lambda msg: msg, ioloop.IOLoop.READ)
self.assertEqual(req.closed, False)
self.assertEqual(rep.closed, False)
loop.close(all_fds=True)
self.assertEqual(req.closed, True)
self.assertEqual(rep.closed, True)
|
Python
| 0
|
@@ -732,16 +732,163 @@
Timeout%0A
+try:%0A from tornado.ioloop import IOLoop as BaseIOLoop%0Aexcept ImportError:%0A from zmq.eventloop.minitornado.ioloop import IOLoop as BaseIOLoop%0A
%0A%0A#-----
@@ -3515,24 +3515,117 @@
oop.IOLoop)%0A
+ loop = BaseIOLoop.instance()%0A self.assertEqual(loop.__class__, ioloop.IOLoop)%0A
%0A def
|
866e0ec72163debd9f46b1ecb8e4d07b040694b4
|
Fix absolute import
|
sand/cytoscape/themes/ops.py
|
sand/cytoscape/themes/ops.py
|
import sand.cytoscape.themes.colors as c
import sand.cytoscape.themes.label_positions as p
settings = {
# node style
'NODE_TRANSPARENCY': 255,
'NODE_SIZE': 25,
'NODE_BORDER_WIDTH': 4,
'NODE_BORDER_PAINT': c.BRIGHT_GREEN,
'NODE_FILL_COLOR': c.DARK_GREEN,
'NODE_SELECTED_PAINT': c.BRIGHT_YELLOW,
# node label style
'NODE_LABEL_COLOR': c.BRIGHT_GRAY,
'NODE_LABEL_FONT_SIZE': 16,
'NODE_LABEL_POSITION': p.LOWER_RIGHT,
# edge style
'EDGE_TRANSPARENCY': 255,
'EDGE_WIDTH': 2.5,
'EDGE_LINE_TYPE': 'SOLID',
'EDGE_STROKE_SELECTED_PAINT': c.BRIGHT_YELLOW,
'EDGE_STROKE_UNSELECTED_PAINT': c.BRIGHT_GRAY,
'EDGE_TARGET_ARROW_UNSELECTED_PAINT': c.BRIGHT_GRAY,
'EDGE_TARGET_ARROW_SHAPE': 'DELTA',
# network style
'NETWORK_BACKGROUND_PAINT': c.DARK_GRAY
}
|
Python
| 0.000173
|
@@ -1,74 +1,44 @@
-import sand.cytoscape.themes.colors as c%0Aimport sand.cytoscape.themes.
+from . import colors as c%0Afrom . import
labe
|
75635315598ccbcad887bf77f7cdc99772157033
|
Add construct_data function to construct data for the API
|
gist.py
|
gist.py
|
import os
import sys
from parser import parser
args = parser.parse_args()
def process_files(args):
"""
:param args:
The arguments parsed by argparse
:returns:
A dict containing file_names as keys and a
dict containing a key `content` as the value
Example return:
{
"file_name": {
"content": {
# file contents
}
}
}
"""
files = [os.path.abspath(file) for file in args.files]
file_contents = {}
for file in files:
try:
f = open(file)
file_contents[os.path.split(file)[1]] = f.read()
f.close()
except FileNotFoundError:
print('File "{}"\n\tdoes not exist'.format(file))
should_create = input('Create the gist without this file [Y/n]: ') or 'Y'
if not should_create == 'Y':
sys.exit("gist: exiting ...")
return file_contents
def create_gist(data):
"""
:param data:
The JSON data to be posted to the API
:returns:
request object of the POST request made to create the gist
"""
end_point = 'https://api.github.com/gists'
rq = requests.post(end_point, json=data)
return rq
|
Python
| 0
|
@@ -1264,8 +1264,326 @@
turn rq%0A
+%0Adef construct_data(args):%0A %22%22%22%0A :param args:%0A The arguments parsed by argparse%0A :returns:%0A %60data%60 dict to be passed to crete the POST request%0A %22%22%22%0A data = %7B%0A %22public%22: args.secret,%0A %22description%22: args.description,%0A %22files%22: process_files(args)%0A %7D%0A return data%0A
|
4fc918a7f55d8ef5dbcb2cf65f23d2d08a5ed6d0
|
Exclude broken PDF.
|
scrapers/philadelphia.py
|
scrapers/philadelphia.py
|
#!/usr/bin/env python
import datetime
import os
import re
from utils import BaseParser, prevnext
from utils import ParserSpeech as Speech, ParserSection as Section
class PhilaParser(BaseParser):
instance = 'philadelphia'
def get_transcripts(self):
base_url = 'http://legislation.phila.gov/transcripts/Stated%%20Meetings/%d/sm%s.pdf'
# List manually got from http://legislation.phila.gov/council-transcriptroom/transroom_date.aspx
transcripts = [
'2014-03-27', '2014-03-20', '2014-03-13', '2014-03-06',
'2014-02-27', '2014-02-20', '2014-02-06',
'2014-01-30', '2014-01-23',
'2013-12-12', '2013-12-05',
'2013-11-21', '2013-11-14',
'2013-10-31', '2013-10-24', '2013-10-17', '2013-10-10', '2013-10-03',
'2013-09-26', '2013-09-19', '2013-09-12',
# '2013-06-20', Won't download
'2013-06-13', '2013-06-06',
'2013-05-23', '2013-05-16', '2013-05-09', '2013-05-02',
# '2013-04-25', Broken PDF
'2013-04-18', '2013-04-11', '2013-04-04',
'2013-03-21', '2013-03-14', '2013-03-07',
'2013-02-28', '2013-02-21', '2013-02-14', '2013-02-07',
'2013-01-31', '2013-01-24',
]
for date in transcripts:
date = datetime.datetime.strptime(date, '%Y-%m-%d').date()
url = base_url % (date.year, date.strftime('%m%d%y'))
if date.isoformat() == '2013-03-21':
url = base_url % (date.year, '0321b3')
yield { 'date': date, 'url': url, 'text': self.get_pdf(url) }
url = base_url % (date.year, '0321a3')
yield { 'date': date, 'url': url, 'text': self.get_pdf(url) }
def top_section_title(self, data):
return 'Council meeting, %s' % data['date'].strftime('%d %B %Y').lstrip('0')
def parse_transcript(self, data):
print "PARSING %s" % data['url']
page, num = 1, 1
speech = None
state = 'text'
Speech.reset(True)
for prev_line, line, next_line in prevnext(data['text']):
# Page break
if '\014' in line:
page += 1
num = 0
continue
if state == 'skip1':
state = 'text'
continue
# Empty line, or line matching page footer
if re.match('\s*$', line):
continue
if re.match(' *Strehlow & Associates, Inc.$| *\(215\) 504-4622$', line):
continue
# Ignore title page for now
if page == 1:
continue
# Start of certificate/index
if re.match(' *\d+ *(CERTIFICATE|- - -)$', line):
state = 'index'
if state == 'index':
continue
# Each page starts with page number
if num == 0:
m = re.match(' +(\d+)$', line)
assert int(m.group(1)) == page
num += 1
continue
# Heading somewhere within this page, just ignore it
if num == 1:
num += 1
continue
# Let's check we haven't lost a line anywhere...
assert re.match(' *%d( |$)' % num, line), '%s != %s' % (num, line)
line = re.sub('^ *%d( |$)' % num, '', line)
num += 1
# Narrative messages
m = re.match(' +(\(.*\))$', line)
if m:
yield speech
speech = Speech( speaker=None, text=line )
continue
m1 = re.match(' +(\(.*)$', line)
m2 = re.match(' *\d+ +(.*\))$', next_line)
if m1 and m2:
yield speech
speech = Speech( speaker=None, text='%s %s' % (m1.group(1), m2.group(1)) )
state = 'skip1'
num += 1
continue
# Okay, here we have a non-empty, non-page number, non-narrative line of just text
# print page, num, line
# New speaker
m = re.match(" *([A-Z '.]+):(?: (.*)|$)", line)
if m:
yield speech
speaker = self.fix_name(m.group(1))
text = m.group(2) or ''
speech = Speech( speaker=speaker, text=text )
continue
# We must now already have a speech by the time we're here
if not speech:
raise Exception, 'Reached here without a speech - need to deal with "%s"' % line
if re.match(' ', line):
speech.add_para(line.strip())
else:
speech.add_text(line.strip())
yield speech
parser = PhilaParser()
parser.run()
|
Python
| 0
|
@@ -899,32 +899,34 @@
load%0A
+ #
'2013-06-13', '
@@ -919,24 +919,47 @@
2013-06-13',
+ Broken PDF%0A
'2013-06-06
|
8db806d30d7591828528ac937e8f3b334e957ed3
|
remove shim should by symmetric to add_shim
|
_distutils_hack/__init__.py
|
_distutils_hack/__init__.py
|
import sys
import os
import re
import importlib
import warnings
is_pypy = '__pypy__' in sys.builtin_module_names
def enabled():
"""
Allow selection of distutils by environment variable.
"""
which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')
return which == 'local'
def warn_distutils_present():
if 'distutils' not in sys.modules:
return
if is_pypy and sys.version_info < (3, 7):
# PyPy for 3.6 unconditionally imports distutils, so bypass the warning
# https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
return
warnings.warn(
"Distutils was imported before Setuptools. This usage is discouraged "
"and may exhibit undesirable behaviors or errors. Please use "
"Setuptools' objects directly or at least import Setuptools first.")
def clear_distutils():
if 'distutils' not in sys.modules:
return
warnings.warn("Setuptools is replacing distutils.")
mods = [name for name in sys.modules if re.match(r'distutils\b', name)]
for name in mods:
del sys.modules[name]
def ensure_local_distutils():
clear_distutils()
distutils = importlib.import_module('setuptools._distutils')
distutils.__name__ = 'distutils'
sys.modules['distutils'] = distutils
# sanity check that submodules load as expected
core = importlib.import_module('distutils.core')
assert '_distutils' in core.__file__, core.__file__
def do_override():
"""
Ensure that the local copy of distutils is preferred over stdlib.
See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
for more motivation.
"""
warn_distutils_present()
if enabled():
ensure_local_distutils()
class DistutilsMetaFinder:
def find_spec(self, fullname, path, target=None):
if path is not None or fullname != "distutils":
return None
return self.get_distutils_spec()
def get_distutils_spec(self):
import importlib.util
class DistutilsLoader(importlib.util.abc.Loader):
def create_module(self, spec):
return importlib.import_module('._distutils', 'setuptools')
def exec_module(self, module):
pass
return importlib.util.spec_from_loader('distutils', DistutilsLoader())
DISTUTILS_FINDER = DistutilsMetaFinder()
def add_shim():
sys.meta_path.insert(0, DISTUTILS_FINDER)
def remove_shim():
try:
sys.path.remove(DISTUTILS_FINDER)
except ValueError:
pass
|
Python
| 0.000001
|
@@ -2540,16 +2540,21 @@
sys.
+meta_
path.rem
|
37ab58016e69993b5ab1d63c99d9afcf54bd95af
|
Implement more TGT Neutral Epics
|
fireplace/cards/tgt/neutral_epic.py
|
fireplace/cards/tgt/neutral_epic.py
|
from ..utils import *
##
# Minions
# Kodorider
class AT_099:
inspire = Summon(CONTROLLER, "AT_099t")
|
Python
| 0.000006
|
@@ -37,69 +37,551 @@
%0A%0A#
-Kodorider%0Aclass AT_099:%0A%09inspire = Summon(CONTROLLER, %22AT_099t%22
+Twilight Guardian%0Aclass AT_017:%0A%09play = HOLDING_DRAGON & Buff(SELF, %22AT_017e%22)%0A%0A%0A# Sideshow Spelleater%0Aclass AT_098:%0A%09play = Summon(CONTROLLER, Copy(ENEMY_HERO_POWER))%0A%0A%0A# Kodorider%0Aclass AT_099:%0A%09inspire = Summon(CONTROLLER, %22AT_099t%22)%0A%0A%0A# Master of Ceremonies%0Aclass AT_117:%0A%09play = Find(FRIENDLY_MINIONS + SPELLPOWER) & Buff(SELF, %22AT_117e%22)%0A%0A%0A# Frost Giant%0Aclass AT_120:%0A%09cost = lambda self, i: i - self.controller.times_hero_power_used_this_game%0A%0A%0A# Crowd Favorite%0Aclass AT_121:%0A%09events = Play(CONTROLLER, BATTLECRY).on(Buff(SELF, %22AT_121e%22)
)%0A
|
dfe1213ba9de5e5e5aaf9690a2cf5e3b295869fa
|
Remove Python 3 incompatible print statement
|
examples/graph/degree_sequence.py
|
examples/graph/degree_sequence.py
|
#!/usr/bin/env python
"""
Random graph from given degree sequence.
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
__date__ = "$Date: 2004-11-03 08:11:09 -0700 (Wed, 03 Nov 2004) $"
__credits__ = """"""
__revision__ = "$Revision: 503 $"
# Copyright (C) 2004 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
from networkx import *
z=[5,3,3,3,3,2,2,2,1,1,1]
print is_valid_degree_sequence(z)
print("Configuration model")
G=configuration_model(z) # configuration model
degree_sequence=list(degree(G).values()) # degree sequence
print("Degree sequence %s" % degree_sequence)
print("Degree histogram")
hist={}
for d in degree_sequence:
if d in hist:
hist[d]+=1
else:
hist[d]=1
print("degree #nodes")
for d in hist:
print('%d %d' % (d,hist[d]))
|
Python
| 0.998568
|
@@ -475,17 +475,17 @@
1%5D%0Aprint
-
+(
is_valid
@@ -503,16 +503,17 @@
uence(z)
+)
%0A%0Aprint(
@@ -884,10 +884,8 @@
st%5Bd%5D))%0A
-%0A%0A
|
ed9e55468eb18c31594bb1cbfc5e940ea5b68151
|
Update Si7021.py
|
Adafruit_SI7021/Si7021.py
|
Adafruit_SI7021/Si7021.py
|
# The MIT License (MIT)
# Copyright (c) 2016 John Robinson
# Author: John Robinson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
import math
# Default I2C address for device.
SI7021_I2CADDR_DEFAULT = 0x40
## Commands.
SI7021_CMD_MEAS_RH_MSTR = 0xE5 # Measure Relative Humidity, Hold Master Mode
SI7021_CMD_MEAS_RH_NOMSTR = 0xF5 # Measure Relative Humidity, No Hold Master Mode
SI7021_CMD_MEAS_TEMP_MSTR = 0xE3 # Measure Temperature, Hold Master Mode
SI7021_CMD_MEAS_TEMP_NOMSTR = 0xF3 # Measure Temperature, No Hold Master Mode
SI7021_CMD_READ_RH_TEMP = 0xE3 # Read Temperature Value from Previous RH Measurement
SI7021_CMD_RESET = 0xFE # Reset
SI7021_CMD_WRITE_REG1 = 0xE6 # Write RH/T User Register 1
SI7021_CMD_READ_REG1 = 0xE7 # Read RH/T User Register 1
SI7021_CMD_WRITE_HEATER_REG = 0x51 # Write Heater Control Register
SI7021_CMD_READ_HEATER_REG = 0x11 # Read Heater Control Register
#SI7021_CMD_READ_ID_B1 = 0xFA, 0X0F # Read Electronic ID 1st Byte
#SI7021_CMD_READ_ID_B2 = 0xFC, 0xC9 # Read Electronic ID 2nd Byte
#SI7021_CMD_READ_FIRMWARE_REV = 0x84, 0xB8 # Read Firmware Revision
## Configuration register values.
SI7021_REG1_CONFIG_MEAS_RES = 0x81 # Measurement Resolution:
# 00: RH 12b, Temp 14b
# 01: RH 8b, Temp 12b
# 10: RH 10b, Temp 13b
# 11: RH 11b, Temp 11b
SI7021_REG1_CONFIG_VDDS = 0x40 # VDD Status
# 0: VDD OK, 1: VDD Low
SI7021_REG1_CONFIG_HTRE = 0x04 # Heater Enabled
# 1: Enabled, 0: Disabled
SI7021_REG2_CONFIG_HEATER = 0x0F # Heater Current Values
# 0000 ~= 3.09 mA -> 1111 ~- 94.20 mA (non-linear)
class Si7021(object):
"""Class to represent an Adafruit SI7021 temperature and humidity measurement breakout board.
"""
def __init__(self, address=SI7021_I2CADDR_DEFAULT, i2c=None, **kwargs):
"""Initialize Si7021 device on the specified I2C address and bus number.
Address defaults to 0x40 and bus number defaults to the appropriate bus
for the hardware."""
self._logger = logging.getLogger('Adafruit_Si7021.Si7021')
if i2c is None:
import Adafruit_GPIO.I2C as I2C
i2c = I2C
self._device = i2c.get_i2c_device(address, **kwargs)
def begin(self):
"""Start taking temperature measurements. Returns True if the device is
intialized, False otherwise."""
## Check manufacturer and device ID match expected values.
#mid = self._device.readU16BE(MCP9808_REG_MANUF_ID)
#did = self._device.readU16BE(MCP9808_REG_DEVICE_ID)
#self._logger.debug('Read manufacturer ID: {0:04X}'.format(mid))
#self._logger.debug('Read device ID: {0:04X}'.format(did))
#return mid == 0x0054 and did == 0x0400
reg1 = self._device.readU8(SI7021_CMD_READ_REG1)
self._logger.debug('Register 1 raw value 0x%02X', reg1)
return True
def readRH(self):
"""Read Relative humidity and return its value in % RH."""
# Send command to read RH
rh = self._device.readU8(SI7021_CMD_MEAS_RH_NOMSTR);
self._logger.debug('Raw relative humidity register value: 0x{0:04X}'.format(rh & 0xFFFF))
# Scale and convert to a percentage value.
rel_humid = 125.0 * rh / 65536.0 - 6.0
return rel_humid
#def readTempC(self):
#"""Read sensor and return its value in degrees celsius."""
## Read temperature register value.
#t = self._device.readU16BE(MCP9808_REG_AMBIENT_TEMP)
#self._logger.debug('Raw ambient temp register value: 0x{0:04X}'.format(t & 0xFFFF))
## Scale and convert to signed value.
#temp = (t & 0x0FFF) / 16.0
#if t & 0x1000:
#temp -= 256.0
#return temp
|
Python
| 0
|
@@ -4680,17 +4680,16 @@
id%0A%0A
-#
def read
@@ -4709,17 +4709,16 @@
-#
%22%22%22Read
@@ -4777,17 +4777,16 @@
#
-#
Read te
@@ -4819,17 +4819,16 @@
-#
t = self
@@ -4850,32 +4850,35 @@
6BE(
-MCP9808_REG_AMBIENT_TEMP
+SI7021_CMD_MEAS_TEMP_NOMSTR
)%0A
@@ -4875,33 +4875,32 @@
NOMSTR)%0A
-#
self._logger.deb
@@ -4976,17 +4976,16 @@
#
-#
Scale a
@@ -5024,17 +5024,16 @@
-#
temp = (
t &
@@ -5032,77 +5032,38 @@
= (
-t & 0x0FFF) / 16.0%0A #if t & 0x1000:%0A #temp -= 256.0
+ 175.72 * t ) / 65536.0 -46.85
%0A
@@ -5063,25 +5063,24 @@
.85%0A
-#
return temp%0A
|
fcb3d026faf4648bbacc73f84e0e6dd6a25eeb6d
|
delete plotting function
|
code/lamost/li_giants/residuals.py
|
code/lamost/li_giants/residuals.py
|
""" Calculate residuals """
import numpy as np
import matplotlib.pyplot as plt
from math import log10, floor
from matplotlib import rc
import matplotlib.gridspec as gridspec
from matplotlib.colors import LogNorm
plt.rc('text', usetex=True)
from matplotlib.ticker import MaxNLocator
import sys
sys.path.insert(0, '/home/annaho/TheCannon')
from TheCannon import model
from TheCannon import dataset
def get_residuals(ds, m):
""" Using the dataset and model object, calculate the residuals and return
Parameters
----------
ds: dataset object
m: model object
Return
------
residuals: array of residuals, spec minus model spec
"""
m.infer_spectra(ds)
resid = ds.test_flux - m.model_spectra
return resid
def load_model():
""" Load the model
Parameters
----------
direc: directory with all of the model files
Returns
-------
m: model object
"""
direc = "/home/annaho/TheCannon/code/lamost/mass_age/cn"
m = model.CannonModel(2)
m.coeffs = np.load(direc + "/coeffs.npz")['arr_0']
m.scatters = np.load(direc + "/scatters.npz")['arr_0']
m.chisqs = np.load(direc + "/chisqs.npz")['arr_0']
m.pivots = np.load(direc + "/pivots.npz")['arr_0']
return m
def load_dataset(date):
""" Load the dataset for a single date
Parameters
----------
date: the date (string) for which to load the data & dataset
Returns
-------
ds: the dataset object
"""
DATA_DIR = "/home/annaho/TheCannon/data/lamost"
WL_DIR = "/home/annaho/TheCannon/code/lamost/mass_age/cn"
wl = np.load(WL_DIR + "/wl_cols.npz")['arr_0']
ds = dataset.Dataset(wl, [], [], [], [], [], [], [])
test_label = np.load("%s/%s_all_cannon_labels.npz" %(DATA_DIR,date))['arr_0']
ds.test_label_vals = test_label
ds.test_flux = np.load("%s/%s_test_flux.npz" %(DATA_DIR,date))['arr_0']
ds.test_ivar = np.load("%s/%s_test_ivar.npz" %(DATA_DIR,date))['arr_0']
return ds
def plot_fit():
plt.plot(wl, resid[ii])
plt.xlim(6400,7000)
plt.ylim(-0.1,0.1)
plt.axvline(x=6707.8, c='r', linestyle='--', linewidth=2)
plt.axvline(x=6103, c='r', linestyle='--', linewidth=2)
plt.show()
plt.savefig("resid_%s.png" %ii)
plt.close()
def run_all_data():
""" Load the data that we're using to search for Li-rich giants.
Store it in dataset and model objects. """
DATA_DIR = "/home/annaho/TheCannon/code/apogee_lamost/xcalib_4labels"
dates = os.listdir("/home/share/LAMOST/DR2/DR2_release")
dates = np.array(dates)
dates = np.delete(dates, np.where(dates=='.directory')[0][0])
dates = np.delete(dates, np.where(dates=='all_folders.list')[0][0])
dates = np.delete(dates, np.where(dates=='dr2.lis')[0][0])
for date in dates:
print ("loading data for %s" %date)
load_date(date)
if __name__=="__main__":
# load a spectrum
ds = load_dataset("20121006")
#m = load_model()
#print(ds.test_flux.shape)
#print(m.coeffs.shape)
#resid = get_residuals(ds, m)
|
Python
| 0.000001
|
@@ -1991,291 +1991,8 @@
s%0A%0A%0A
-%0Adef plot_fit():%0A plt.plot(wl, resid%5Bii%5D)%0A plt.xlim(6400,7000)%0A plt.ylim(-0.1,0.1)%0A plt.axvline(x=6707.8, c='r', linestyle='--', linewidth=2)%0A plt.axvline(x=6103, c='r', linestyle='--', linewidth=2)%0A plt.show()%0A plt.savefig(%22resid_%25s.png%22 %25ii)%0A plt.close()%0A%0A%0A
def
@@ -2665,17 +2665,16 @@
6%22)%0A
-#
m = load
@@ -2690,67 +2690,8 @@
-#print(ds.test_flux.shape)%0A #print(m.coeffs.shape)%0A #
resi
|
72e30b3b881418d40dd0446842176fc5c4468802
|
Add name url converter
|
flask_roots/routing.py
|
flask_roots/routing.py
|
from werkzeug.routing import BaseConverter
class RegexConverter(BaseConverter):
def __init__(self, url_map, *items):
super(RegexConverter, self).__init__(url_map)
self.regex = items[0]
def setup_routing(app):
app.url_map.converters['re'] = RegexConverter
|
Python
| 0.000001
|
@@ -202,16 +202,843 @@
ms%5B0%5D%0A%0A%0A
+def strip_accents(s):%0A s = unicode(s)%0A return ''.join((c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn'))%0A%0A%0Adef _urlify_name(name):%0A %22%22%22Converts a name or title into something we can put into a URI.%0A %0A This is designed to only be for one way usage (ie. we can't use the%0A urlified names to figure out what photo or photoset we are talking about).%0A %22%22%22%0A return re.sub(r'%5CW+', '-', name).strip('-') or 'Untitled'%0A%0A%0Adef urlify_name(name):%0A return _urlify_name(strip_accents(name).encode('ascii', 'ignore'))%0A%0A%0Aclass NameConverter(BaseConverter):%0A%0A def to_python(self, value):%0A return value%0A%0A def to_url(self, value):%0A if not isinstance(value, str) and hasattr(value, 'name'):%0A value = value.name%0A return urlify_name(str(value)).lower()%0A%0A%0A
def setu
@@ -1095,16 +1095,68 @@
RegexConverter%0A
+ app.url_map.converters%5B'name'%5D = NameConverter%0A%0A
|
78b27c492db12ab2ca208e34a342ee86e1a8a307
|
Set correct Fileheaders when Uploading to S3
|
flask_admin/contrib/fileadmin/s3.py
|
flask_admin/contrib/fileadmin/s3.py
|
import time
try:
from boto import s3
from boto.s3.prefix import Prefix
from boto.s3.key import Key
except ImportError:
s3 = None
from flask import redirect
from flask_admin.babel import gettext
from . import BaseFileAdmin
class S3Storage(object):
"""
Storage object representing files on an Amazon S3 bucket.
Usage::
from flask_admin.contrib.fileadmin import BaseFileAdmin
from flask_admin.contrib.fileadmin.s3 import S3Storage
class MyS3Admin(BaseFileAdmin):
# Configure your class however you like
pass
fileadmin_view = MyS3Admin(storage=S3Storage(...))
"""
def __init__(self, bucket_name, region, aws_access_key_id,
aws_secret_access_key):
"""
Constructor
:param bucket_name:
Name of the bucket that the files are on.
:param region:
Region that the bucket is located
:param aws_access_key_id:
AWS Access Key ID
:param aws_secret_access_key:
AWS Secret Access Key
Make sure the credentials have the correct permissions set up on
Amazon or else S3 will return a 403 FORBIDDEN error.
"""
if not s3:
raise ValueError('Could not import boto. You can install boto by '
'using pip install boto')
connection = s3.connect_to_region(
region,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
)
self.bucket = connection.get_bucket(bucket_name)
self.separator = '/'
def get_files(self, path, directory):
def _strip_path(name, path):
if name.startswith(path):
return name.replace(path, '', 1)
return name
def _remove_trailing_slash(name):
return name[:-1]
def _iso_to_epoch(timestamp):
dt = time.strptime(timestamp.split(".")[0], "%Y-%m-%dT%H:%M:%S")
return int(time.mktime(dt))
files = []
directories = []
if path and not path.endswith(self.separator):
path += self.separator
for key in self.bucket.list(path, self.separator):
if key.name == path:
continue
if isinstance(key, Prefix):
name = _remove_trailing_slash(_strip_path(key.name, path))
key_name = _remove_trailing_slash(key.name)
directories.append((name, key_name, True, 0, 0))
else:
last_modified = _iso_to_epoch(key.last_modified)
name = _strip_path(key.name, path)
files.append((name, key.name, False, key.size, last_modified))
return directories + files
def _get_bucket_list_prefix(self, path):
parts = path.split(self.separator)
if len(parts) == 1:
search = ''
else:
search = self.separator.join(parts[:-1]) + self.separator
return search
def _get_path_keys(self, path):
search = self._get_bucket_list_prefix(path)
return {key.name for key in self.bucket.list(search, self.separator)}
def is_dir(self, path):
keys = self._get_path_keys(path)
return path + self.separator in keys
def path_exists(self, path):
if path == '':
return True
keys = self._get_path_keys(path)
return path in keys or (path + self.separator) in keys
def get_base_path(self):
return ''
def get_breadcrumbs(self, path):
accumulator = []
breadcrumbs = []
for n in path.split(self.separator):
accumulator.append(n)
breadcrumbs.append((n, self.separator.join(accumulator)))
return breadcrumbs
def send_file(self, file_path):
key = self.bucket.get_key(file_path)
if key is None:
raise ValueError()
return redirect(key.generate_url(3600))
def save_file(self, path, file_data):
key = Key(self.bucket, path)
key.set_contents_from_file(file_data.stream)
def delete_tree(self, directory):
self._check_empty_directory(directory)
self.bucket.delete_key(directory + self.separator)
def delete_file(self, file_path):
self.bucket.delete_key(file_path)
def make_dir(self, path, directory):
dir_path = self.separator.join([path, (directory + self.separator)])
key = Key(self.bucket, dir_path)
key.set_contents_from_string('')
def _check_empty_directory(self, path):
if not self._is_directory_empty(path):
raise ValueError(gettext('Cannot operate on non empty '
'directories'))
return True
def rename_path(self, src, dst):
if self.is_dir(src):
self._check_empty_directory(src)
src += self.separator
dst += self.separator
self.bucket.copy_key(dst, self.bucket.name, src)
self.delete_file(src)
def _is_directory_empty(self, path):
keys = self._get_path_keys(path + self.separator)
return len(keys) == 1
def read_file(self, path):
key = Key(self.bucket, path)
return key.get_contents_as_string()
def write_file(self, path, content):
key = Key(self.bucket, path)
key.set_contents_from_file(content)
class S3FileAdmin(BaseFileAdmin):
"""
Simple Amazon Simple Storage Service file-management interface.
:param bucket_name:
Name of the bucket that the files are on.
:param region:
Region that the bucket is located
:param aws_access_key_id:
AWS Access Key ID
:param aws_secret_access_key:
AWS Secret Access Key
Sample usage::
from flask_admin import Admin
from flask_admin.contrib.fileadmin.s3 import S3FileAdmin
admin = Admin()
admin.add_view(S3FileAdmin('files_bucket', 'us-east-1', 'key_id', 'secret_key')
"""
def __init__(self, bucket_name, region, aws_access_key_id,
aws_secret_access_key, *args, **kwargs):
storage = S3Storage(bucket_name, region, aws_access_key_id,
aws_secret_access_key)
super(S3FileAdmin, self).__init__(*args, storage=storage, **kwargs)
|
Python
| 0
|
@@ -4196,32 +4196,115 @@
f.bucket, path)%0A
+ headers = %7B%0A 'Content-Type' : file_data.content_type,%0A %7D%0A
key.set_
@@ -4338,16 +4338,33 @@
a.stream
+, headers=headers
)%0A%0A d
|
42463351a598d45f2738c894e00d0eceec308f9c
|
Add docstring
|
aegea/billing.py
|
aegea/billing.py
|
"""
View detailed billing reports.
Detailed billing reports can be configured at https://console.aws.amazon.com/billing/home#/preferences.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os, sys, json, zipfile, csv, io
from io import BytesIO, TextIOWrapper
from datetime import datetime
import boto3, requests
from botocore.exceptions import ClientError
from . import register_parser
from .util.printing import format_table, page_output, get_field, get_cell, tabulate
from .util.aws import ARN
def filter_line_items(args):
def filter_fn(item):
if args.min_cost and float(item["Cost"]) < args.min_cost:
return False
return True
return filter_fn
def billing(args):
s3 = boto3.resource("s3")
iam = boto3.resource("iam")
account_id = ARN(iam.CurrentUser().user.arn).account_id
args.detailed_billing_reports_bucket = args.detailed_billing_reports_bucket.format(account_id=account_id)
now = datetime.utcnow()
report = "{account_id}-aws-billing-detailed-line-items-with-resources-and-tags-{year}-{month}.csv.zip"
report = report.format(account_id=account_id, year=args.year or now.year, month="%02d" % (args.month or now.month))
try:
billing_object = s3.Bucket(args.detailed_billing_reports_bucket).Object(report)
billing_object_body = billing_object.get()["Body"]
except ClientError as e:
console_url = "https://console.aws.amazon.com/billing/home#/preferences"
msg = "Can't get detailed billing report {} from bucket {} in account {}: {}. Go to {} to set up detailed billing."
sys.exit(msg.format(report, args.detailed_billing_reports_bucket, account_id, e, console_url))
zbuf = BytesIO(billing_object_body.read())
with zipfile.ZipFile(zbuf) as zfile:
with TextIOWrapper(zfile.open(report.rstrip(".zip"))) as fh:
reader = csv.DictReader(fh)
page_output(tabulate(filter(filter_line_items(args), reader), args))
parser = register_parser(billing, help='List contents of AWS detailed billing reports')
parser.add_argument("--columns", nargs="+")
#parser.add_argument("--sort-by")
parser.add_argument("--year", type=int, help="Year to get billing reports for. Defaults to current year")
parser.add_argument("--month", type=int, help="Month (numeral) to get billing reports for. Defaults to current month")
parser.add_argument("--detailed-billing-reports-bucket", help="Name of S3 bucket to retrieve detailed billing reports from")
parser.add_argument("--min-cost", type=float, help="Omit billing line items below this cost")
|
Python
| 0.000005
|
@@ -2090,16 +2090,37 @@
reports'
+, description=__doc__
)%0Aparser
|
19025b97d38706eda4f425667b69f7803a39ca35
|
add tinyint as a bool type
|
flask_admin/contrib/sqla/filters.py
|
flask_admin/contrib/sqla/filters.py
|
import warnings
from flask.ext.admin.babel import lazy_gettext
from flask.ext.admin.model import filters
from flask.ext.admin.contrib.sqla import tools
class BaseSQLAFilter(filters.BaseFilter):
"""
Base SQLAlchemy filter.
"""
def __init__(self, column, name, options=None, data_type=None):
"""
Constructor.
:param column:
Model field
:param name:
Display name
:param options:
Fixed set of options
:param data_type:
Client data type
"""
super(BaseSQLAFilter, self).__init__(name, options, data_type)
self.column = column
# Common filters
class FilterEqual(BaseSQLAFilter):
def apply(self, query, value):
return query.filter(self.column == value)
def operation(self):
return lazy_gettext('equals')
class FilterNotEqual(BaseSQLAFilter):
def apply(self, query, value):
return query.filter(self.column != value)
def operation(self):
return lazy_gettext('not equal')
class FilterLike(BaseSQLAFilter):
def apply(self, query, value):
stmt = tools.parse_like_term(value)
return query.filter(self.column.ilike(stmt))
def operation(self):
return lazy_gettext('contains')
class FilterNotLike(BaseSQLAFilter):
def apply(self, query, value):
stmt = tools.parse_like_term(value)
return query.filter(~self.column.ilike(stmt))
def operation(self):
return lazy_gettext('not contains')
class FilterGreater(BaseSQLAFilter):
def apply(self, query, value):
return query.filter(self.column > value)
def operation(self):
return lazy_gettext('greater than')
class FilterSmaller(BaseSQLAFilter):
def apply(self, query, value):
return query.filter(self.column < value)
def operation(self):
return lazy_gettext('smaller than')
# Customized type filters
class BooleanEqualFilter(FilterEqual, filters.BaseBooleanFilter):
pass
class BooleanNotEqualFilter(FilterNotEqual, filters.BaseBooleanFilter):
pass
# Base SQLA filter field converter
class FilterConverter(filters.BaseFilterConverter):
strings = (FilterEqual, FilterNotEqual, FilterLike, FilterNotLike)
numeric = (FilterEqual, FilterNotEqual, FilterGreater, FilterSmaller)
bool = (BooleanEqualFilter, BooleanNotEqualFilter)
enum = (FilterEqual, FilterNotEqual)
def convert(self, type_name, column, name, **kwargs):
if type_name.lower() in self.converters:
return self.converters[type_name.lower()](column, name, **kwargs)
return None
@filters.convert('string', 'unicode', 'text', 'unicodetext', 'varchar')
def conv_string(self, column, name, **kwargs):
return [f(column, name, **kwargs) for f in self.strings]
@filters.convert('boolean')
def conv_bool(self, column, name, **kwargs):
return [f(column, name, **kwargs) for f in self.bool]
@filters.convert('integer', 'smallinteger', 'numeric', 'float', 'biginteger')
def conv_int(self, column, name, **kwargs):
return [f(column, name, **kwargs) for f in self.numeric]
@filters.convert('date')
def conv_date(self, column, name, **kwargs):
return [f(column, name, data_type='datepicker', **kwargs) for f in self.numeric]
@filters.convert('datetime')
def conv_datetime(self, column, name, **kwargs):
return [f(column, name, data_type='datetimepicker', **kwargs) for f in self.numeric]
@filters.convert('enum')
def conv_enum(self, column, name, options=None, **kwargs):
if not options:
options = [
(v, v)
for v in column.type.enums
]
return [f(column, name, options, **kwargs) for f in self.enum]
|
Python
| 0.000001
|
@@ -2898,16 +2898,27 @@
boolean'
+, 'tinyint'
)%0A de
|
6bcda3ee17fe75039c1ead65ff6888cdc20b29a3
|
print statements
|
viaduct/models/page.py
|
viaduct/models/page.py
|
import datetime
from viaduct import db
from viaduct.models import Group
page_ancestor = db.Table('page_ancestor',
db.Column('page_id', db.Integer, db.ForeignKey('page.id')),
db.Column('ancestor_id', db.Integer, db.ForeignKey('page.id'))
)
class Page(db.Model):
__tablename__ = 'page'
id = db.Column(db.Integer, primary_key=True)
parent_id = db.Column(db.Integer, db.ForeignKey('page.id'))
path = db.Column(db.String(256), unique=True)
parent = db.relationship('Page',
remote_side=id,
backref=db.backref('children', lazy='dynamic'))
ancestors = db.relationship('Page', secondary=page_ancestor,
primaryjoin=id==page_ancestor.c.page_id,
secondaryjoin=id==page_ancestor.c.ancestor_id,
backref=db.backref('descendants', lazy='dynamic'), lazy='dynamic')
revisions = db.relationship('PageRevision', backref='page', lazy='dynamic')
def __init__(self, path):
self.path = path
def __repr__(self):
return '<Page(%s, "%s")>' % (self.id, self.path)
@staticmethod
def get_by_path(path):
return Page.query.filter(Page.path==path).first()
def has_revisions(self):
return self.revisions.count() > 0
def can_read(self, user):
if(PagePermission.get_user_rights(user, self.id) > 0):
return True
else:
return False
def can_write(self, user):
if(PagePermission.get_user_rights(user, self.id) > 1):
return True
else:
return False
def get_newest_revision(self):
return self.revisions.order_by(PageRevision.timestamp.desc()).first()
class PageRevision(db.Model):
__tablename__ = 'page_revision'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(128))
filter_html = db.Column(db.Boolean)
content = db.Column(db.Text)
comment = db.Column(db.String(1024))
timestamp = db.Column(db.DateTime)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
page_id = db.Column(db.Integer, db.ForeignKey('page.id'))
author = db.relationship('User', backref=db.backref('page_edits',
lazy='dynamic'))
def __init__(self, page, author, title, content, comment="", filter_html=True,
timestamp=datetime.datetime.utcnow()):
self.title = title
self.content = content
self.comment = comment
self.filter_html = filter_html
self.user_id = author.id if author != None else -1
self.page_id = page.id
self.timestamp = timestamp
class PagePermission(db.Model):
__tablename__ = 'page_permission'
id = db.Column(db.Integer, primary_key=True)
permission = db.Column(db.Integer)
page_id = db.Column(db.Integer, db.ForeignKey('page.id'))
group_id = db.Column(db.Integer, db.ForeignKey('group.id'))
def __init__(self, group_id, page_id, permission=0):
self.permission = permission
self.group_id = group_id
self.page_id = page_id
@staticmethod
def get_user_rights(user, page_id):
rights = 0
if not user or not user.is_active():
groups = [Group.query.filter(Group.name=='all').first()]
else:
groups = user.groups.all()
page = Page.query.filter(Page.id==page_id).first()
if page:
for group in groups:
if group.name == 'administrators':
return 2
permissions = PagePermission.query.filter(PagePermission.page_id==page.id,
PagePermission.group_id==group.id).first()
if permissions:
print permissions.permission
if (permissions.permission >= 2) :
print "permissions1"
return permissions.permission
if (permissions.permission > rights):
print "permissions2"
rights = permissions.permission
return rights
def set_permission(self, permission):
self.permission = permission
|
Python
| 0.999987
|
@@ -3235,24 +3235,47 @@
.permission%0A
+%09%09%09%09%09print permissions%0A
%09%09%09%09%09if (per
|
803fead9cbfa9d2a950e9fa16f42e905f6a942d7
|
add module imports
|
flocker/ca/__init__.py
|
flocker/ca/__init__.py
|
# Copyright ClusterHQ Inc. See LICENSE file for details.
"""
A minimal certificate authority.
"""
__all__ = [
"RootCredential", "ControlCredential", "NodeCredential",
"ComparableKeyPair", "PathError", "CertificateAlreadyExistsError",
"KeyAlreadyExistsError", "EXPIRY_20_YEARS",
"AUTHORITY_CERTIFICATE_FILENAME", "AUTHORITY_KEY_FILENAME",
"CONTROL_CERTIFICATE_FILENAME", "CONTROL_KEY_FILENAME"
]
from ._ca import (
RootCredential, ControlCredential, NodeCredential,
ComparableKeyPair, PathError, CertificateAlreadyExistsError,
KeyAlreadyExistsError, EXPIRY_20_YEARS,
AUTHORITY_CERTIFICATE_FILENAME, AUTHORITY_KEY_FILENAME,
CONTROL_CERTIFICATE_FILENAME, CONTROL_KEY_FILENAME
)
|
Python
| 0.000001
|
@@ -166,16 +166,34 @@
ential%22,
+ %22UserCredential%22,
%0A %22Co
@@ -503,16 +503,32 @@
dential,
+ UserCredential,
%0A Com
|
b45db0476212891dd23934d775bc3082cbcaabdf
|
Fix KLD
|
ws/CSUIBotClass2014/MCL/kldmcl.py
|
ws/CSUIBotClass2014/MCL/kldmcl.py
|
# @obj: implement the standard MCL alg.; table 8.2 on the book Prob. Robotics by S. Thrun
# @author: vektor dewanto
import numpy as np
import CSUIBotClass2014.action_model.model_uas as act_model
import CSUIBotClass2014.perception_model.beam_range_finder_model as obs_model
def normalize_weight(X):
# Normalize all weights, so that they sum up to one
total_w = sum([xw[1] for xw in X])
X = [(xw[0], xw[1]/total_w) for xw in X]
return X
def resample(X_bar):
'''
draw i with probability proportional to w_t^i
'''
X_bar = normalize_weight(X_bar)
X = []
while len(X) < len(X_bar):
candidate_idx = np.random.random_integers(low=0, high= len(X_bar)-1)
candidate_w = X_bar[candidate_idx][1]
sampled = np.random.binomial(n=1, p=candidate_w)# a Bernoulli dist.
if sampled==1:
X.append(X_bar[candidate_idx])
return X
def run(X_past, u, z, m):
'''
\param X: is a list of tuples (x, w)
\param u: the control/action
\param z: the observation
\param m: the given map
'''
X_bar = []
X = []
n_particle = len(X_past)# fixed #particle for ever :(
for i in range(n_particle):
x = act_model.sample_motion_model(u, X_past[i][0], m)
w = 1-obs_model.beam_range_finder_model(z, x, m)
X_bar.append((x, w))
X = resample(X_bar)
return X
|
Python
| 0.000003
|
@@ -266,16 +266,52 @@
bs_model
+%0Afrom scipy import stats%0Aimport math
%0A%0Adef no
@@ -908,17 +908,15 @@
-X.append(
+return
X_ba
@@ -931,17 +931,16 @@
ate_idx%5D
-)
%0A
@@ -1146,29 +1146,118 @@
-X_bar
+epsilon = 0.05%0A delta = 0.01%0A Xt
= %5B%5D
+
%0A
-X
+b
= %5B
-%5D
+%5B0%5D*20%5D*20%0A M = 0%0A Mx = 0%0A Mxmin = 20%0A k = 0
%0A
@@ -1315,50 +1315,145 @@
:(%0A
+
%0A
-for i in range(n_particle):%0A x
+while True:%0A xt1 = resample(X_past)%0A print %22menunggu pagi%22%0A print len(X_past)%0A print xt1%0A xmt
= a
@@ -1488,17 +1488,11 @@
(u,
-X_past%5Bi%5D
+xt1
%5B0%5D,
@@ -1547,16 +1547,18 @@
del(z, x
+mt
, m)%0A
@@ -1563,20 +1563,17 @@
X
-_bar
+t
.append(
@@ -1578,42 +1578,421 @@
d((x
+mt
, w))%0A
+%0A
-%0A X = resample(X_bar)
+ idx = int(math.floor(xmt%5B'x'%5D))%0A idy = int(math.floor(xmt%5B'y'%5D))%0A if(b%5Bidy%5D%5Bidx%5D==0):%0A k += 1%0A b%5Bidy%5D%5Bidx%5D = 1%0A if(k%3E1):%0A var1 = 2.0/(9*(k-1))%0A Mx = ((k-1)/2.0*epsilon*%0A (1 - var1 + math.sqrt(var1)*stats.norm.ppf(1-delta))**3)%0A M+=1%0A if not ((M%3CMx) or (M%3CMxmin)):%0A return Xt
%0A%0A
@@ -1989,21 +1989,22 @@
urn Xt%0A%0A return X
+t
%0A
|
bf476a199492c7966b6a3886da284867622a8b04
|
Update populate_vm_metrics.py
|
perfmetrics/scripts/populate_vm_metrics.py
|
perfmetrics/scripts/populate_vm_metrics.py
|
"""Executes vm_metrics.py by passing appropriate arguments.
To run the script:
>> python3 populate_vm_metrics.py <start_time> <end_time>
"""
import socket
import sys
import time
import os
from vm_metrics import vm_metrics
INSTANCE = socket.gethostname()
metric_data_name = ['start_time_sec', 'cpu_utilization_peak','cpu_utilization_mean',
'network_bandwidth_peak', 'network_bandwidth_mean', 'gcs/ops_latency',
'gcs/read_bytes_count', 'gcs/ops_error_count']
if __name__ == '__main__':
argv = sys.argv
if len(argv) != 3:
raise TypeError('Incorrect number of arguments.\n'
'Usage: '
'python3 populate_vm_metrics.py <start_time> <end_time>')
print('Waiting for 250 seconds for metrics to be updated on VM...')
# It takes up to 240 seconds for sampled data to be visible on the VM metrics graph
# So, waiting for 250 seconds to ensure the returned metrics are not empty
time.sleep(250)
vm_metrics_obj = vm_metrics.VmMetrics()
start_time_sec = int(argv[1])
end_time_sec = int(argv[2])
period = end_time_sec - start_time_sec
print(f'Getting VM metrics for ML model')
vm_metrics_obj.fetch_metrics_and_write_to_google_sheet(start_time_sec, end_time_sec, INSTANCE, period, 'read', 'ml_metrics!')
|
Python
| 0.000004
|
@@ -1294,13 +1294,12 @@
_metrics
-!
')%0A%0A
|
853c6ec8d1c4f518e28f9f14547e2d8999c17ad9
|
Update models.py
|
flask_appbuilder/security/models.py
|
flask_appbuilder/security/models.py
|
from sqlalchemy import Table, Column, Integer, String, Boolean, ForeignKey
from sqlalchemy.orm import relationship, column_property
from flask.ext.appbuilder import Base
class Permission(Base):
__tablename__ = 'ab_permission'
id = Column(Integer, primary_key=True)
name = Column(String(100), unique = True, nullable=False)
def __repr__(self):
return self.name
class ViewMenu(Base):
__tablename__ = 'ab_view_menu'
id = Column(Integer, primary_key=True)
name = Column(String(100), unique = True, nullable=False)
def __eq__(self, other):
return (isinstance(other, self.__class__)) and (self.name == other.name)
def __neq__(self, other):
return self.name != other.name
def __repr__(self):
return self.name
class PermissionView(Base):
__tablename__ = 'ab_permission_view'
id = Column(Integer, primary_key=True)
permission_id = Column(Integer, ForeignKey('ab_permission.id'))
permission = relationship("Permission", backref='permissionview')
view_menu_id = Column(Integer, ForeignKey('ab_view_menu.id'))
view_menu = relationship("ViewMenu")
def __repr__(self):
return str(self.permission).replace('_',' ') + ' on ' + str(self.view_menu)
assoc_permissionview_role = Table('ab_permission_view_role', Base.metadata,
Column('id', Integer, primary_key=True),
Column('permission_view_id', Integer, ForeignKey('ab_permission_view.id')),
Column('role_id', Integer, ForeignKey('ab_role.id'))
)
class Role(Base):
__tablename__ = 'ab_role'
id = Column(Integer, primary_key=True)
name = Column(String(64), unique = True, nullable=False)
permissions = relationship('PermissionView', secondary = assoc_permissionview_role, backref='role')
def __repr__(self):
return self.name
class User(Base):
__tablename__ = 'ab_user'
id = Column(Integer, primary_key=True)
first_name = Column(String(64), nullable = False)
last_name = Column(String(64), nullable = False)
full_name = column_property(first_name + " " + last_name)
username = Column(String(32), unique=True, nullable = False)
password = Column(String(32))
active = Column(Boolean)
email = Column(String(64))
role_id = Column(Integer, ForeignKey('ab_role.id'))
role = relationship("Role")
@staticmethod
def make_unique_nickname(nickname):
if User.query.filter_by(nickname = nickname).first() == None:
return nickname
version = 2
while True:
new_nickname = nickname + str(version)
if User.query.filter_by(nickname = new_nickname).first() == None:
break
version += 1
return new_nickname
def check_password(self, password):
return check_password_hash(self.password, password)
def is_authenticated(self):
return True
def is_active(self):
return self.active
def is_anonymous(self):
return False
def get_id(self):
return unicode(self.id)
def get_full_name(self):
return self.first_name + " " + self.last_name
def __repr__(self):
return (self.get_full_name())
|
Python
| 0
|
@@ -1019,20 +1019,16 @@
rmission
-view
')%0A v
|
c18972be7609b3de061ec41977ad73efccd5213c
|
Fix HTTP Basic authentication decorator
|
agir/lib/http.py
|
agir/lib/http.py
|
import base64
from functools import wraps
from hashlib import sha1
from django.http import HttpResponse
from django.utils.crypto import constant_time_compare
EMPTY_HASH = sha1().digest()
class HttpResponseUnauthorized(HttpResponse):
status_code = 401
def __init__(self, content=b'', realm="api", *args, **kwargs):
super().__init__(content, *args, **kwargs)
self['WWW-Authenticate'] = f'Basic realm="{realm}"'
def check_basic_auth(request, identities):
auth = request.META.get('HTTP_AUTHORIZATION', '').split()
if len(auth) != 2 or auth[0].lower() != 'basic':
return HttpResponseUnauthorized()
try:
user, password = base64.b64decode(auth[1]).decode().split(':')
except:
return HttpResponseUnauthorized()
h = sha1()
h.update(password)
digest = h.digest()
user_exists = user in identities
identical_password = constant_time_compare(digest, identities.get(user, EMPTY_HASH))
if not user_exists or not identical_password:
return HttpResponseUnauthorized()
def with_http_basic_auth(identities):
hashed_identities = {}
for user, password in identities.items():
h = sha1()
h.update(password.encode('utf8'))
hashed_identities[user] = h.digest()
def decorator(view):
if isinstance(view, type):
wrapped_dispatch = type.dispatch
@wraps(wrapped_dispatch)
def wrapper(self, request, *args, **kwargs):
check_basic_auth(request, hashed_identities)
return wrapped_dispatch(self, request, *args, **kwargs)
view.dispatch = wrapper
return view
@wraps(view)
def wrapper(request, *args, **kwargs):
check_basic_auth(request, hashed_identities)
return view(request, *args, **kwargs)
return wrapper
return decorator
|
Python
| 0.000091
|
@@ -434,10 +434,27 @@
lm%7D%22
+, charset=%22UTF-8%22
'%0A
-
%0A%0Ade
@@ -717,23 +717,15 @@
1%5D).
-decode().
split(
+b
':')
@@ -1063,16 +1063,33 @@
ized()%0A%0A
+ return None%0A%0A
%0Adef wit
@@ -1283,16 +1283,31 @@
ies%5Buser
+.encode('utf8')
%5D = h.di
@@ -1521,32 +1521,39 @@
+return
check_basic_auth
@@ -1580,39 +1580,19 @@
ntities)
-%0A return
+ or
wrapped
@@ -1774,16 +1774,23 @@
+return
check_ba
@@ -1825,35 +1825,19 @@
ntities)
-%0A return
+ or
view(re
|
70de505674e5675d969a84339b6bb59431333ed3
|
Revise comments and add space lines, & revise main()
|
lc0234_palindrome_linked_list.py
|
lc0234_palindrome_linked_list.py
|
"""Leetcode 234. Palindrome Linked List
Easy
URL: https://leetcode.com/problems/palindrome-linked-list/
Given a singly linked list, determine if it is a palindrome.
Example 1:
Input: 1->2
Output: false
Example 2:
Input: 1->2->2->1
Output: true
Follow up:
Could you do it in O(n) time and O(1) space?
"""
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, val):
self.val = val
self.next = None
class LinkedList(object):
def __init__(self):
self.head = None
def append(self, val):
if not self.head:
self.head = ListNode(val)
return None
current = self.head
while current.next:
current = current.next
current.next = ListNode(val)
class Solution1(object):
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
Time complexity: O(n).
Space complexity: O(n).
"""
stack = []
current = head
while current:
stack.append(current.val)
current = current.next
for i in range(len(stack) // 2):
if stack[i] != stack[len(stack) - 1 - i]:
return False
return True
class Solution2(object):
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
Time complexity: O(n).
Space complexity: O(1).
"""
# Find the middle node: slow
slow = fast = head
while fast and fast.next:
slow = slow.next
fast = fast.next.next
# Reverse the 2nd half of linked list using slow.
reverse = None
while slow:
nxt = slow.next
slow.next = reverse
reverse = slow
slow = nxt
# Traverse the 1st half and reversed 2nd half at the same time
# and compare their val.
while reverse:
if reverse.val != head.val:
return False
reverse = reverse.next
head = head.next
return True
def main():
import time
# 1->2->2->1: Yes.
a_list = LinkedList()
a_list.append(1)
a_list.append(2)
a_list.append(2)
a_list.append(1)
print a_list.head.val
print a_list.head.next.val
print a_list.head.next.next.val
print a_list.head.next.next.next.val
start_time = time.time()
print 'Naive: {}'.format(Solution1().isPalindrome(a_list.head))
print 'Time: {}'.format(time.time() - start_time)
start_time = time.time()
print 'Optimized: {}'.format(Solution2().isPalindrome(a_list.head))
print 'Time: {}'.format(time.time() - start_time)
# 1->2->3->1: No.
a_list = LinkedList()
a_list.append(1)
a_list.append(2)
a_list.append(3)
a_list.append(1)
print a_list.head.val
print a_list.head.next.val
print a_list.head.next.next.val
print a_list.head.next.next.next.val
start_time = time.time()
print 'Naive: {}'.format(Solution1().isPalindrome(a_list.head))
print 'Time: {}'.format(time.time() - start_time)
start_time = time.time()
print 'Optimized: {}'.format(Solution2().isPalindrome(a_list.head))
print 'Time: {}'.format(time.time() - start_time)
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -453,338 +453,21 @@
ass
-LinkedList(object):%0A def __init__(self):%0A self.head = None%0A%0A def append(self, val):%0A if not self.head:%0A self.head = ListNode(val)%0A return None%0A current = self.head%0A while current.next:%0A current = current.next%0A current.next = ListNode(val)%0A%0A%0Aclass Solution1
+SolutionStack
(obj
@@ -636,32 +636,71 @@
n).%0A %22%22%22%0A
+ # Use stack to collect values.%0A
stack =
@@ -702,16 +702,17 @@
ck = %5B%5D%0A
+%0A
@@ -726,17 +726,16 @@
= head%0A
-%0A
@@ -1206,24 +1206,32 @@
e node: slow
+ + fast.
%0A slo
@@ -1465,32 +1465,33 @@
nxt = slow.next%0A
+%0A
slow
@@ -1533,16 +1533,17 @@
= slow%0A
+%0A
@@ -1910,229 +1910,90 @@
-a_list = LinkedList()%0A a_list.append(1)%0A a_list.append(2)%0A a_list.append(2)%0A a_list.append(1)%0A %0A print a_list.head.val%0A print a_list.head.next.val%0A print a_list.head.next.next.val%0A print a_list.
+head.ListNode(1)%0A head.next = ListNode(2)%0A head.next.next = ListNode(2)%0A
head
@@ -1999,36 +1999,46 @@
d.next.next.next
-.val
+ = ListNode(1)
%0A%0A start_time
@@ -2081,33 +2081,37 @@
.format(Solution
-1
+Stack
().isPalindrome(
@@ -2102,39 +2102,32 @@
().isPalindrome(
-a_list.
head))%0A print
@@ -2251,39 +2251,32 @@
().isPalindrome(
-a_list.
head))%0A print
@@ -2351,229 +2351,90 @@
-a_list = LinkedList()%0A a_list.append(1)%0A a_list.append(2)%0A a_list.append(3)%0A a_list.append(1)%0A %0A print a_list.head.val%0A print a_list.head.next.val%0A print a_list.head.next.next.val%0A print a_list.
+head.ListNode(1)%0A head.next = ListNode(2)%0A head.next.next = ListNode(3)%0A
head
@@ -2448,20 +2448,30 @@
ext.next
-.val
+ = ListNode(1)
%0A%0A st
@@ -2530,17 +2530,21 @@
Solution
-1
+Stack
().isPal
@@ -2543,39 +2543,32 @@
().isPalindrome(
-a_list.
head))%0A print
@@ -2704,15 +2704,8 @@
ome(
-a_list.
head
|
b695762ffdf0f03337368f430e45a2406b7451ad
|
fix url
|
app/remote_rpc.py
|
app/remote_rpc.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by follow on 2016/10/28
from flask import Flask
from flaskext.xmlrpc import XMLRPCHandler, Fault
import jenkins
import time
import base64
import commands
from jinja2.nodes import Output
import os
import MySQLdb
import re
app = Flask(__name__)
handler = XMLRPCHandler('api')
handler.connect(app, '/api')
j = jenkins.Jenkins("http://127.1:8080", 'rpcuser', '2266bcc74441b07e9c50ba468a620199')
manager_host = '10.1.2.49'
@handler.register
def Hello(name='follow'):
if not name:
raise Fault('WTF')
return "hello %s" % name
@handler.register
def GetInfo():
data = j.get_info()
print data
return data
@handler.register
def GetQueueInfo():
data = j.get_queue_info()
print data
return data
@handler.register
def GetJobs():
data = j.get_jobs()
print data
return data
@handler.register
def GetBuildConsoleOutput(name, number):
data = j.get_build_console_output(name, number)
d1 = base64.b64encode(data)
print d1
return d1
@handler.register
def GetJobInfo(n):
data = j.get_job_info(n)
return str(data)
@handler.register
def BuildJob(n):
data = j.build_job(n)
return data
@handler.register
def CleanTheMess():
db = MySQLdb.connect("10.168.2.125", "svn", "svnpassword", "svntool")
cursor = db.cursor()
sql = "update scm_projectstatus set status=0,approve_status=0 where status > 0 and approve_status = 3"
sql2 = "delete from scm_proj_with_user where projectid=(select projectid from scm_projectstatus where status > 0 and approve_status = 3)"
cursor.execute(sql)
cursor.execute(sql2)
db.commit()
db.close()
return
@handler.register
def DoCmd(c):
print "cmd is %s" % c
if c == "start":
command = "su - scm -c /home/scm/apache-tomcat-7.0.39/bin/startup.sh"
status = [os.system(command), "nothing"]
con = False
while not con:
pidinfo = commands.getstatusoutput('netstat -nlp | grep 8080 | awk \'{print $7}\' | cut -d / -f 1')
print "pid is:%s" % str(pidinfo[1])
if not re.match("\d", pidinfo[1]):
print "sleep"
time.sleep(1)
else:
con = True
elif c == "update":
command = "rm -fr /home/scm/apache-tomcat-7.0.39/work/*; rm -fr /home/scm/apache-tomcat-7.0.39/webapps/*; cp -a /root/.jenkins/jobs/Genscript-SCM-QA/workspace/target/scm-test/scm.war /home/scm/apache-tomcat-7.0.39/webapps/scm.war"
time.sleep(2)
status = commands.getstatusoutput(command)
else:
pid = commands.getstatusoutput('netstat -nlp | grep 8080 | awk \'{print $7}\' | cut -d / -f 1')[1]
if re.match("\d", pid):
command = "kill -9 %s" % pid
status = commands.getstatusoutput(command)
print "start to kill %s" % pid
print status
time.sleep(2)
return status
@handler.register
def GetProcessInfo():
status = {}
pidinfo = commands.getstatusoutput('netstat -nlp | grep 8080 | awk \'{print $7}\' | cut -d / -f 1')
status['qa_mtime'] = commands.getoutput(
'stat /home/scm/apache-tomcat-7.0.39/webapps/scm.war | grep \'^Modify\' | cut -d " " -f 2-3 | cut -d . -f1')
status['newest_mtime'] = commands.getoutput(
'stat /opt/scm-manager/wars/*.war | grep \'^Modify\' | cut -d " " -f 2-3 | cut -d . -f1')
if pidinfo[1] == "":
return status
else:
status['pid'] = pidinfo[1]
status['uptime'] = commands.getoutput('ps -p %s -o lstart | sed -n \'2p\'' % pidinfo[1])
return status
@handler.register
def GetBuildInfo(name, number):
data = j.get_build_info(name, number)
t1 = str(data['timestamp'])
duration = data['duration'] / 1000
t2 = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(int(t1[:10])))
data['timestamp'] = t2
data['duration'] = duration
data['changeSet'] = 'changeSet'
return data # xml-rpc 不能使用长整数 WTF
@handler.register
def DownloadPackage(path, filename):
file_url = "http://{}:{}/{}/{}".format(manager_host, "80", path, filename)
download_dir = "/opt/scm-manager/wars"
download_command = "aria2c -s 2 -x 2 {} -d {}".format(file_url, download_dir)
commands.getoutput("rm -f {}/*.war".format(download_dir))
output = commands.getoutput(download_command)
return output
app.run('0.0.0.0', port=8085, debug=True)
|
Python
| 0.86565
|
@@ -4088,19 +4088,44 @@
//%7B%7D:%7B%7D/
-%7B%7D/
+uploaded_file/%7B%7D?folder=SCM-
%7B%7D%22.form
@@ -4150,14 +4150,8 @@
80%22,
- path,
fil
@@ -4155,16 +4155,22 @@
filename
+, path
)%0A do
|
dfba61137a76addd4d85cf864505222a66737d8c
|
Use OpenEx when available with visOpenCopy and visOpenRO flags
|
visio2img/visio2img.py
|
visio2img/visio2img.py
|
# -*- coding: utf-8 -*-
# Copyright 2014 Yassu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from math import log
from optparse import OptionParser
def is_pywin32_available():
""" Tests pywin32 is installed """
try:
import win32com # NOQA: import test
return True
except ImportError:
return False
def filter_pages(pages, pagenum, pagename):
""" Choices pages by pagenum and pagename """
if pagenum:
try:
pages = [list(pages)[pagenum - 1]]
except IndexError:
raise IndexError('Invalid page number: %d' % pagenum)
if pagename:
pages = [page for page in pages if page.name == pagename]
if pages == []:
raise IndexError('Page not found: pagename=%s' % pagename)
return pages
class VisioFile(object):
@classmethod
def Open(cls, filename):
obj = cls()
obj.open(filename)
return obj
def __init__(self):
self.app = None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
return False
def open(self, filename):
assert self.app is None
visio_pathname = os.path.abspath(filename) # visio requires abspath
if not os.path.exists(visio_pathname):
raise IOError('No such visio file: %s', filename)
try:
import win32com.client
self.app = win32com.client.Dispatch('Visio.InvisibleApp')
except:
msg = 'Visio not found. visio2img requires Visio.'
raise OSError(msg)
try:
self.app.Documents.Open(visio_pathname)
except:
self.close()
msg = 'Could not open file (already opend by other process?): %s'
raise IOError(msg % filename)
def close(self):
if self.app:
self.app.Quit()
self.app = None
@property
def pages(self):
if self.app:
return self.app.ActiveDocument.Pages
else:
return []
def export_img(visio_filename, image_filename, pagenum=None, pagename=None):
""" Exports images from visio file """
# visio requires absolute path
image_pathname = os.path.abspath(image_filename)
if not os.path.isdir(os.path.dirname(image_pathname)):
msg = 'Could not write image file: %s' % image_filename
raise IOError(msg)
with VisioFile.Open(visio_filename) as visio:
pages = filter_pages(visio.pages, pagenum, pagename)
try:
if len(pages) == 1:
pages[0].Export(image_pathname)
else:
digits = int(log(len(pages), 10)) + 1
basename, ext = os.path.splitext(image_pathname)
filename_format = "%s%%0%dd%s" % (basename, digits, ext)
for i, page in enumerate(pages):
filename = filename_format % (i + 1)
page.Export(filename)
except:
raise IOError('Could not write image: %s' % image_pathname)
def parse_options(args):
""" Parses command line options """
usage = 'usage: %prog [options] visio_filename image_filename'
parser = OptionParser(usage=usage)
parser.add_option('-p', '--page', action='store',
type='int', dest='pagenum',
help='pick a page by page number')
parser.add_option('-n', '--name', action='store',
type='string', dest='pagename',
help='pick a page by page name')
options, argv = parser.parse_args(args)
if options.pagenum and options.pagename:
parser.error('options --page and --name are mutually exclusive')
if len(argv) != 2:
parser.print_usage(sys.stderr)
parser.exit()
output_ext = os.path.splitext(argv[1])[1].lower()
if output_ext not in ('.gif', '.jpg', '.png'):
parser.error('Unsupported image format: %s' % argv[1])
return options, argv
def main(args=sys.argv[1:]):
""" main funcion of visio2img """
if not is_pywin32_available():
sys.stderr.write('win32com module not found')
return -1
try:
options, argv = parse_options(args)
export_img(argv[0], argv[1], options.pagenum, options.pagename)
return 0
except (IOError, OSError, IndexError) as err:
sys.stderr.write("error: %s" % err)
return -1
|
Python
| 0
|
@@ -2116,32 +2116,479 @@
)%0A%0A try:%0A
+ if hasattr(self.app.Documents, %22OpenEx%22):%0A # Visio %3E= 4.5 supports OpenEx%0A # visOpenCopy + visOpenRO allows opening documents even%0A # if they're open in another visio instance...%0A visOpenCopy = 0x1%0A visOpenRO = 0x2%0A open_flags = visOpenCopy %7C visOpenRO%0A self.app.Documents.OpenEx(visio_pathname, open_flags)%0A else:%0A
self
|
a63dea0f7cf448e8423359e5bc0989586fe94af8
|
Fix could not handle error collectly if visio not found
|
visio2img/visio2img.py
|
visio2img/visio2img.py
|
#!/usr/bin/env python3
import sys
from sys import stderr
from os import path
from optparse import OptionParser
from math import log
__all__ = ('export_img')
GEN_IMG_FORMATS = ('.gif', '.jpeg', '.jpg', '.png')
VISIO_FORMATS = ('.vsd', '.vsdx')
def is_pywin32_available():
try:
import win32com # NOQA: import test
return True
except ImportError:
return False
class FileNotFoundError(Exception):
"""
exception represents the input file is not found
"""
class IllegalImageFormatException(TypeError):
"""
This exception means Exceptions for Illegal Image Format.
"""
class VisioNotFoundException(Exception):
"""
This excetion means system has no visio program.
"""
def _get_pages(app, page_num=None):
"""
app -> page
if page_num is None, return all pages.
if page_num is int object, return path_num-th page(from 1).
"""
pages = app.ActiveDocument.Pages
try:
return [list(pages)[page_num - 1]] if page_num else pages
except IndexError:
raise IndexError('This file has no {}-th page.'.format(page_num))
def _check_format(visio_filename, gen_img_filename):
visio_extension = path.splitext(visio_filename)[1]
gen_img_extension = path.splitext(gen_img_filename)[1]
if visio_extension not in VISIO_FORMATS:
err_str = (
'Input filename is not llegal for visio file. \n'
'This program is suppert only vsd extension.'
)
raise IllegalImageFormatException(err_str)
if gen_img_extension not in GEN_IMG_FORMATS:
err_str = (
'Output filename is not llegal for visio file. \n'
'This program is suppert gif, jpeg, png extension.'
)
raise IllegalImageFormatException(err_str)
def export_img(visio_filename, gen_img_filename,
page_num=None, page_name=None):
"""
export as image format
If exported page, return True and else return False.
"""
import win32com.client
from pywintypes import com_error
# to absolute path
visio_filename = path.abspath(visio_filename)
gen_img_filename = path.abspath(gen_img_filename)
# define filename without extension and extension variable
gen_img_filename_without_extension, gen_img_extension = (
path.splitext(gen_img_filename))
_check_format(visio_filename, gen_img_filename)
# if file is not found, exit from program
if not path.exists(visio_filename):
raise FileNotFoundError('Input File is not found.')
gen_img_dir_name = path.dirname(gen_img_filename)
if not path.isdir(gen_img_dir_name):
raise FileNotFoundError('Directory of Output File is not found')
try:
# make instance for visio
_, visio_extension = path.splitext(visio_filename)
application = win32com.client.Dispatch('Visio.InvisibleApp')
# case: system has no visio
if application is None:
raise VisioNotFoundException('System has no Visio.')
application.Visible = False
application.Documents.Open(visio_filename)
# make pages of picture
pages = _get_pages(application, page_num=page_num)
# filter of page names
if page_name is not None:
# generator of page and page names
page_with_names = zip(pages, pages.GetNames())
page_list = list(filter(
lambda pn: pn[1] == page_name,
page_with_names))
pages = [p_w_n[0] for p_w_n in page_list]
# define page_names
if len(pages) == 1:
page_names = [gen_img_filename]
else: # len(pages) >= 2
figure_length = int(log(len(pages), 10)) + 1
page_names = (
(gen_img_filename_without_extension +
("{0:0>" + str(figure_length) + "}").format(page_cnt + 1) +
gen_img_extension
for page_cnt in range(len(pages))))
# Export pages
for page, page_name in zip(pages, page_names):
page.Export(page_name)
if list(pages) == []:
return False
return True # pages is not empty
except com_error:
raise IllegalImageFormatException(
'Output filename is not llegal for Image File.')
finally:
application.Quit()
def main(args=sys.argv[1:]):
# define parser
parser = OptionParser()
parser.add_option(
'-p', '--page',
action='store',
type='int',
dest='page',
help='transform only one page(set number of this page)'
)
parser.add_option(
'-n', '--name',
action='store',
type='string',
dest='page_name',
help='transform only same as setted name page'
)
(options, argv) = parser.parse_args(args)
if (options.page is not None) and (options.page_name is not None):
stderr.write('page and page name option is appointed.')
return -1
# if len(arguments) != 2, raise exception
if len(argv) != 2:
stderr.write('Enter Only input_filename and output_filename')
return -1
if not is_pywin32_available():
stderr.write('win32com module not found')
return -1
# define input_filename and output_filename
visio_filename = argv[0]
gen_img_filename = argv[1]
try:
is_exported = export_img(visio_filename, gen_img_filename,
page_num=options.page,
page_name=options.page_name)
if is_exported is False:
stderr.write("No page Output")
return -1
return 0
except (FileNotFoundError, IllegalImageFormatException, IndexError) as err:
# expected exception
stderr.write(str(err)) # print message
return -1
except Exception as err:
print('Error')
return -1
|
Python
| 0
|
@@ -1997,35 +1997,8 @@
%22%22%22%0A
- import win32com.client%0A
@@ -2715,41 +2715,38 @@
-# make instance for visio
+import win32com.client
%0A
_,
@@ -2745,79 +2745,17 @@
- _,
visio
-_extension = path.splitext(visio_filename)%0A application
+app
= w
@@ -2804,198 +2804,129 @@
p')%0A
-%0A
- # case: system has no visio%0A if application is None:%0A raise VisioNotFoundException('System has no Visio.')%0A%0A application.Visible = False%0A application
+except:%0A raise VisioNotFoundException('Visio not found. visio2img requires Visio.')%0A%0A try:%0A visioapp
.Doc
@@ -3013,27 +3013,24 @@
t_pages(
-application
+visioapp
, page_n
@@ -4185,19 +4185,16 @@
-application
+visioapp
.Qui
@@ -5551,16 +5551,40 @@
ndError,
+ VisioNotFoundException,
Illegal
|
3e8d113a6fa32c7c9163d3334e484993c29080ba
|
remove split test
|
vlermv/test/test_s3.py
|
vlermv/test/test_s3.py
|
import json
import pytest
from .._s3 import S3Vlermv, split
class FakeBucket:
def __init__(self, name, **db):
self.db = db
self.name = name
def list(self):
for key in self.db:
yield self.new_key(key)
def new_key(self, key):
return FakeKey(self.db, key)
def get_key(self, key):
if key in self.db:
return FakeKey(self.db, key)
def delete_key(self, key):
del(self.db[key])
class FakeKey:
def __init__(self, db, key):
self.db = db
self.key = key
def get_contents_as_string(self):
return self.db[self.key]
def get_contents_to_filename(self, filename):
with open(filename, 'wb') as fp:
fp.write(self.db[self.key])
def set_contents_from_string(self, payload, **kwargs):
self.db[self.key] = payload
def set_contents_from_filename(self, filename, **kwargs):
with open(filename, 'rb') as fp:
self.db[self.key] = fp.read()
CONTRACT = {
'bids': [],
'contract': 'http://search.worldbank.org/wcontractawards/procdetails/OP00032101',
'method.selection': 'QCBS ? Quality andCost-Based Selection',
'price': 'INR 1,96,53,750',
'project': None
}
PAYLOAD = json.dumps(CONTRACT).encode('utf-8')
def test_read():
d = S3Vlermv('contracts', serializer = json,
bucket = FakeBucket('aoeu', OP00032101 = PAYLOAD))
assert d['OP00032101'] == CONTRACT
def test_write():
fakebucket = FakeBucket('aoeu')
d = S3Vlermv('contracts', bucket = fakebucket, serializer = json)
assert fakebucket.db == {}
d['OP00032101'] = CONTRACT
assert fakebucket.db == {'OP00032101': PAYLOAD}
def test_split():
assert split('a/bb/cc') == ('a', 'bb', 'cc')
assert split('one') == ('one',)
def test_delete():
fakebucket = FakeBucket('aoeu')
d = S3Vlermv('contracts', bucket = fakebucket, serializer = json)
d['OP00032101'] = CONTRACT
del(d['OP00032101'])
assert len(fakebucket.db) == 0
|
Python
| 0.000006
|
@@ -51,15 +51,8 @@
ermv
-, split
%0A%0Acl
@@ -1687,112 +1687,8 @@
D%7D%0A%0A
-def test_split():%0A assert split('a/bb/cc') == ('a', 'bb', 'cc')%0A assert split('one') == ('one',)%0A%0A
def
|
d4ffe068638aa1394c1a34eaa43859edb47c0473
|
Update hodograph_inset example for plot the colormap by height.
|
examples/plots/Hodograph_Inset.py
|
examples/plots/Hodograph_Inset.py
|
# Copyright (c) 2016 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Hodograph Inset
===============
Layout a Skew-T plot with a hodograph inset into the plot.
"""
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import numpy as np
import pandas as pd
import metpy.calc as mpcalc
from metpy.cbook import get_test_data
from metpy.plots import add_metpy_logo, Hodograph, SkewT
from metpy.units import units
###########################################
# Upper air data can be obtained using the siphon package, but for this example we will use
# some of MetPy's sample data.
col_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed']
df = pd.read_fwf(get_test_data('may4_sounding.txt', as_file_obj=False),
skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names)
df['u_wind'], df['v_wind'] = mpcalc.wind_components(df['speed'],
np.deg2rad(df['direction']))
# Drop any rows with all NaN values for T, Td, winds
df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed',
'u_wind', 'v_wind'), how='all').reset_index(drop=True)
###########################################
# We will pull the data out of the example dataset into individual variables and
# assign units.
p = df['pressure'].values * units.hPa
T = df['temperature'].values * units.degC
Td = df['dewpoint'].values * units.degC
wind_speed = df['speed'].values * units.knots
wind_dir = df['direction'].values * units.degrees
u, v = mpcalc.wind_components(wind_speed, wind_dir)
###########################################
# Create a new figure. The dimensions here give a good aspect ratio
fig = plt.figure(figsize=(9, 9))
add_metpy_logo(fig, 115, 100)
# Grid for plots
skew = SkewT(fig, rotation=45)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r')
skew.plot(p, Td, 'g')
skew.plot_barbs(p, u, v)
skew.ax.set_ylim(1000, 100)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
# Good bounds for aspect ratio
skew.ax.set_xlim(-50, 60)
# Create a hodograph
ax_hod = inset_axes(skew.ax, '40%', '40%', loc=1)
h = Hodograph(ax_hod, component_range=80.)
h.add_grid(increment=20)
h.plot_colormapped(u, v, np.hypot(u, v))
# Show the plot
plt.show()
|
Python
| 0
|
@@ -1412,16 +1412,55 @@
units.%0A%0A
+hght = df%5B'height'%5D.values * units.hPa%0A
p = df%5B'
@@ -2515,22 +2515,12 @@
v,
-np.hypot(u, v)
+hght
)%0A%0A#
|
c289c8a165850af76d45aa2d6773418a2c5c1ad3
|
Add Pylint exemption.
|
Python_Toolbox_Template.pyt
|
Python_Toolbox_Template.pyt
|
# -*- coding=utf-8 -*-
"""##TODO: Docstring."""
##TODO: Standard lib imports.
import logging
##TODO: Third-party imports.
##TODO: Local imports.
import arcpy
LOG = logging.getLogger(__name__)
PARAMETER_ATTRIBUTES = {
'example_parameter': {
# Match parameter name to dictionary key.
'name': 'example_parameter',
'displayName': "Example Parameter",
# Direction: 'Input' or 'Output'.
'direction': 'Input',
# datatype: http://desktop.arcgis.com/en/arcmap/latest/analyze/creating-tools/defining-parameter-data-types-in-a-python-toolbox.htm
'datatype': 'GPBoolean',
# parameterType: 'Required', 'Optional', or 'Derived'.
'parameterType': 'Required',
# emabled: True or False.
'enabled': True,
# category (optional). Note having one will collapse category on open.
'category': None,
'multiValue': False,
# Value type must be Python type match for datatype.
'value': True,
# symbology (optional): Path to layer file for drawing output.
'symbology': None,
},
}
class Toolbox(object): # pylint: disable=too-few-public-methods
"""Define the toolbox.
Toolbox class is required for constructing and ArcGIS Python toolbox.
The name of toolbox is the basename of this file.
"""
def __init__(self):
self.label = "##TODO: Toolbox label."
# Alias is toolbox namespace when attached to ArcPy (arcpy.{alias}).
# Attach using arcpy.AddToolbox().
self.alias = '##TODO: Toolbox alias.'
# List of tool classes associated with this toolbox.
# self.tools must be list (not other iterable).
self.tools = [
# Add tools here by their class name to make visible in toolbox.
ToolExample,
]
class ToolExample(object):
"""Example of an individual tool in an ArcGIS Python toolbox."""
def __init__(self):
# Label is how tool is named within toolbox.
self.label = "##TODO: Label."
# Category is name of sub-toolset tool will be in (optional).
self.category = None
# Description is longer text for tool, shown in side panel.
self.description = """
##TODO: Description.
"""
# Sets whether the tool controls ArcGIS focus while running.
self.canRunInBackground = False
# Recommended: collect parameter attributes here, to have a default
# reference in instance.
self.parameter_attributes = (
PARAMETER_ATTRIBUTES['example_parameter'],
)
def getParameterInfo(self): # pylint: disable=no-self-use
"""Load parameters into toolbox."""
# Create the parameters in a separate place (allows reusability),
# then add them here. Recommended: use parameter_from_attributes
# to allow initial definition to be a dictionary/attribute map.
# Return value must be list (not other iterable).
parameters = [parameter_from_attributes(attributes)
for attributes in self.parameter_attributes]
return parameters
def isLicensed(self): # pylint: disable=no-self-use
"""Set whether tool is licensed to execute."""
# If tool needs extra licensing, checking here will prevent execution.
return True
def updateMessages(self, parameters): # pylint: disable=no-self-use
"""Modify messages created by internal validation for each parameter.
This method is called after internal validation.
"""
# No update requirements at this time.
return
def updateParameters(self, parameters): # pylint: disable=no-self-use
"""Modify parameters before internal validation is performed.
This method is called whenever a parameter has been changed.
"""
# Follow the below format for checking for changes.
# Same code can be used for updateMessages.
# Remove code if not needed.
parameter_map = {parameter.name: parameter for parameter in parameters}
if parameter_changed(parameter_map['a_parameter']):
# Do something.
pass
return
def execute(self, parameters, messages): # pylint: disable=no-self-use
"""Procedural code of the tool."""
# Set up logger-like object, logs to both ArPy and file's logger.
log = ArcLogger(loggers=[LOG])
# value_map contains dictionary with parameter name/value key/values.
value_map = parameter_value_map(parameters)
log.info("TODO: Steps of the tool here.")
return
# Tool-specific helpers.
##TODO: Put objects specific to tool(s) only in this toolbox here.
# Helpers.
##TODO: Put more generic objects here.
class ArcLogger(object):
"""Faux-logger for logging to ArcPy/ArcGIS messaging system."""
arc_function = {
logging.NOTSET: (lambda msg: None),
# No debug level in Arc messaging system 👎.
logging.DEBUG: (lambda msg: None),
logging.INFO: arcpy.AddMessage,
logging.WARNING: arcpy.AddWarning,
logging.ERROR: arcpy.AddError,
# No debug level in Arc messaging system 👎. Map to error level.
logging.CRITICAL: arcpy.AddError,
}
def __init__(self, loggers=None):
"""Instance initialization."""
self.loggers = loggers if loggers else []
def debug(self, msg):
"""Log message with level DEBUG."""
self.log(logging.DEBUG, msg)
def info(self, msg):
"""Log message with level INFO."""
self.log(logging.INFO, msg)
def warning(self, msg):
"""Log message with level WARNING."""
self.log(logging.WARNING, msg)
def error(self, msg):
"""Log message with level ERROR."""
self.log(logging.ERROR, msg)
def critical(self, msg):
"""Log message with level CRITICAL."""
self.log(logging.CRITICAL, msg)
def log(self, lvl, msg):
"""Log message with level lvl."""
self.arc_function[lvl](msg)
for logger in self.loggers:
logger.log(lvl, msg)
def parameter_changed(parameter):
"""Return True if parameter is in a pre-validation changed state."""
return all([parameter.altered, not parameter.hasBeenValidated])
def parameter_from_attributes(attribute_map):
"""Create ArcPy parameter object using an attribute mapping.
Note that this doesn't check if the attribute exists in the default
parameter instance. This means that you can attempt to set a new
attribute, but the result will depend on how the class implements setattr
(usually this will just attach the new attribute).
"""
parameter = arcpy.Parameter()
for attribute_name, attribute_value in attribute_map.items():
# Apply filter later.
if attribute_name.startswith('filter.'):
continue
else:
setattr(parameter, attribute_name, attribute_value)
# Filter attributes don't stick using setattr.
if 'filter.type' in attribute_map:
parameter.filter.type = attribute_map['filter.type']
if 'filter.list' in attribute_map:
parameter.filter.list = attribute_map['filter.list']
return parameter
def parameter_value(parameter):
"""Return value of parameter."""
def handle_value_object(value_object):
"""Return actual value from value object.
Some values embedded in 'value object' (.value.value), others aren't.
"""
return getattr(value_object, 'value', value_object)
if not parameter.multiValue:
result = handle_value_object(parameter.value)
# Multivalue parameters place their values in .values (.value. holds a
# ValueTable object).
else:
result = [handle_value_object(value) for value in parameter.values]
return result
def parameter_value_map(parameters):
"""Create value map from ArcPy parameter objects."""
return {parameter.name: parameter_value(parameter)
for parameter in parameters}
|
Python
| 0
|
@@ -2474,16 +2474,48 @@
= False
+ # pylint: disable=invalid-name
%0D%0A
|
130234f0f62c04b3cc0a4b20f0de789959abf4c9
|
Change default zoom to 16 and make it overridable
|
molly/maps/__init__.py
|
molly/maps/__init__.py
|
from molly.maps.osm.utils import fit_to_map
class Map:
"""
An object which represents a Map. This should be added to a context and then
passed to @C{render_map} in your template to get the appropriate HTML
"""
def __init__(self, centre_point, points, min_points, zoom, width, height):
"""
@param centre_point: A tuple of longitude, latitude and colour
corresponding to the "centre" of the map. This is
NOT necessarily the central latitude/longitude of
the generated image, but simply a special marker
which is indicated with a star.
@type centre_point: (float, float, str) or None
@param points: An (ordered) list of points to be plotted on the map.
These are indicated on the map with numbered markers.
This list consists of tuples of longitude, latitude and a
string indicating the colours of the markers to be
rendered.
@type points: [(float, float, str)]
@param min_points: The minimum number of points to be displayed on the
resulting map
@type min_points: int
@param zoom: A bound on the maximum zoom level to be rendered. If this
zoom level is too small to fit @C{min_points} points on it,
then the map will be zoomed out further to fit in. If this
is None, then this is equivalent to the smallest zoom
level.
@type zoom: int
@param width: The width of the generated map image, in pixels
@type width: int
@param height: The height of the generated map image, in pixels
@type height: int
"""
self.centre_point = centre_point
self.min_points = min_points
self.width = width
self.height = height
self.static_map_hash, (self.points, self.zoom) = fit_to_map(
centre_point = centre_point,
points = points,
min_points = min_points,
zoom = zoom,
width = width,
height = height,
)
def map_from_point(point, width, height, colour='green'):
"""
A shortcut which renders a simple map containing only one point rendered as
a star
"""
return Map((point[0], point[1], colour), [], 1, 18, width, height)
|
Python
| 0
|
@@ -2323,16 +2323,25 @@
='green'
+, zoom=16
):%0A %22
@@ -2498,10 +2498,12 @@
1,
-18
+zoom
, wi
|
3b0865bbfcee18afb842cc9f50f8c83c0d70f221
|
Add the other v ;-).
|
sphinx/fabfile.py
|
sphinx/fabfile.py
|
from fabric.api import run, env, roles
from fabric.contrib.files import exists
from fabric.contrib.project import rsync_project
import sys
sys.path.append("source")
import conf
env.roledefs = {
'web': ['bokeh.pydata.org']
}
env.user = "bokeh"
@roles('web')
def deploy(v=None):
if v is None:
v = conf.version
elif v == "latest":
raise RuntimeError("You can not pass 'latest' as fab argument. Use "
"fab latest:x.x.x instead.")
# make a backup of the old directory
run("rm -rf /www/bokeh/en/%s.bak" % v)
run("mkdir -p /www/bokeh/en/%s" % v)
run("cp -ar /www/bokeh/en/%s /www/bokeh/en/%s.bak" % (v, v))
rsync_project(
local_dir="_build/html/",
remote_dir="/www/bokeh/en/%s" % v,
delete=True
)
# set permissions
run("chmod -R g+w /www/bokeh/en/%s" % v)
@roles('web')
def latest(v=None):
if v is None:
raise RuntimeError("You need to specify a version number: fab latest:x.x.x")
if exists("/www/bokeh/en/%s" % v):
# switch the current symlink to new docs
run("rm /www/bokeh/en/latest")
run("ln -s /www/bokeh/en/%s /www/bokeh/en/latest" % v)
else:
raise RuntimeError("We did not detect a %s docs version, please use "
"fab deploy:%s first." % v)
|
Python
| 0.000005
|
@@ -1326,15 +1326,20 @@
s first.%22 %25
-v
+(v, v)
)%0A
|
b3ddba27c92f36ee9534903b43ff632daa148585
|
Fix public body search index by indexing jurisdiction name
|
froide/publicbody/search_indexes.py
|
froide/publicbody/search_indexes.py
|
from django.conf import settings
from haystack import indexes
from haystack import site
from publicbody.models import PublicBody
from helper.searchindex import QueuedRealTimeSearchIndex
PUBLIC_BODY_BOOSTS = getattr(settings, "FROIDE_PUBLIC_BODY_BOOSTS", {})
class PublicBodyIndex(QueuedRealTimeSearchIndex):
text = indexes.EdgeNgramField(document=True, use_template=True)
name = indexes.CharField(model_attr='name', boost=1.5)
jurisdiction = indexes.CharField(model_attr='jurisdiction', default='')
topic_auto = indexes.EdgeNgramField(model_attr='topic_name')
topic_slug = indexes.CharField(model_attr='topic__slug')
name_auto = indexes.EdgeNgramField(model_attr='name')
url = indexes.CharField(model_attr='get_absolute_url')
def index_queryset(self):
"""Used when the entire index for model is updated."""
return PublicBody.objects.get_for_search_index()
def prepare(self, obj):
data = super(PublicBodyIndex, self).prepare(obj)
if obj.classification in PUBLIC_BODY_BOOSTS:
data['boost'] = PUBLIC_BODY_BOOSTS[obj.classification]
print "Boosting %s at %f" % (obj, data['boost'])
return data
site.register(PublicBody, PublicBodyIndex)
|
Python
| 0.000723
|
@@ -495,16 +495,22 @@
sdiction
+__name
', defau
|
8191d25e732b16a0121bd64320348108b9259892
|
Add SecurityQuestionModelAdmin
|
molo/profiles/admin.py
|
molo/profiles/admin.py
|
import csv
from daterange_filter.filter import DateRangeFilter
from django.contrib import admin
from django.http import HttpResponse
from django.contrib.auth.models import User
from django.contrib.auth.admin import UserAdmin
from django.contrib.admin.sites import NotRegistered
from molo.profiles.admin_views import FrontendUsersAdminView
from wagtailmodeladmin.options import ModelAdmin as WagtailModelAdmin
try:
admin.site.unregister(User)
except NotRegistered:
pass
def download_as_csv(ProfileUserAdmin, request, queryset):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment;filename=export.csv'
writer = csv.writer(response)
user_model_fields = UserAdmin.list_display + ('date_joined', )
profile_fields = ('alias', 'mobile_number')
field_names = user_model_fields + profile_fields
writer.writerow(field_names)
for obj in queryset:
if obj.profile.alias:
obj.profile.alias = obj.profile.alias.encode('utf-8')
obj.username = obj.username.encode('utf-8')
obj.date_joined = obj.date_joined.strftime("%Y-%m-%d %H:%M")
writer.writerow(
[getattr(obj, field) for field in user_model_fields] +
[getattr(obj.profile, field) for field in profile_fields])
return response
download_as_csv.short_description = "Download selected as csv"
@admin.register(User)
class ProfileUserAdmin(UserAdmin):
list_display = UserAdmin.list_display + (
'date_joined', '_alias', '_mobile_number', '_date_of_birth')
list_filter = UserAdmin.list_filter + ('date_joined', )
actions = [download_as_csv]
def _alias(self, obj, *args, **kwargs):
if hasattr(obj, 'profile') and obj.profile.alias:
return obj.profile.alias
return ''
def _mobile_number(self, obj, *args, **kwargs):
if hasattr(obj, 'profile') and obj.profile.mobile_number:
return obj.profile.mobile_number
return ''
def _date_of_birth(self, obj, *args, **kwargs):
if hasattr(obj, 'profile') and obj.profile.date_of_birth:
return obj.profile.date_of_birth
return ''
# Below here is for Wagtail Admin
class FrontendUsersDateRangeFilter(DateRangeFilter):
template = 'admin/frontend_users_date_range_filter.html'
class FrontendUsersModelAdmin(WagtailModelAdmin, ProfileUserAdmin):
model = User
menu_label = 'End Users'
menu_icon = 'user'
menu_order = 600
index_view_class = FrontendUsersAdminView
add_to_settings_menu = True
list_display = ('username', '_alias', '_mobile_number', '_date_of_birth',
'email', 'date_joined', 'is_active')
list_filter = (('date_joined', FrontendUsersDateRangeFilter), 'is_active')
search_fields = ('username',)
def get_queryset(self, request):
queryset = User.objects.filter(is_staff=False)
return queryset
|
Python
| 0
|
@@ -333,16 +333,66 @@
minView%0A
+from molo.profiles.models import SecurityQuestion%0A
from wag
@@ -2974,8 +2974,197 @@
ueryset%0A
+%0A%0Aclass SecurityQuestionModelAdmin(WagtailModelAdmin):%0A model = SecurityQuestion%0A menu_label = %22Security Questions%22%0A add_to_settings_menu = True%0A search_fields = (%22questions%22,)%0A
|
6499aecb18104114d47707ba4c1080bb817f7ccc
|
Update loadlogs.py
|
logger/loadlogs.py
|
logger/loadlogs.py
|
#!/usr/bin/env python
from tools import *
from ratchet import *
from logaccess_config import *
# Retrieving from CouchDB a Title dictionary as: dict['bjmbr']=XXXX-XXXX
acrondict = getTitles()
proc_coll = get_proc_collection()
allowed_issns = []
for key, issn in acrondict.items():
allowed_issns.append(issn)
if acrondict:
for logdir in get_logdirs():
print "listing log files at: " + logdir
for logfile in get_files_in_logdir(logdir):
if log_was_processed(proc_coll, logfile):
continue
else:
print "processing: {0}".format(logfile)
reg_logfile(proc_coll, logfile)
rq = RatchetQueue(limit=100)
for line in get_file_lines(logfile):
parsed_line = parse_apache_line(line, acrondict)
if parsed_line:
if parsed_line['access_type'] == "PDF":
pdfid = parsed_line['pdf_path']
issn = parsed_line['pdf_issn']
rq.register_download_access(pdfid, issn, parsed_line['iso_date'])
if parsed_line['access_type'] == "HTML":
if is_allowed_query(parsed_line['query_string'], allowed_issns):
script = parsed_line['query_string']['script'][0]
pid = parsed_line['query_string']['pid'][0].upper().replace('S', '')
if script == "sci_serial":
rq.register_journal_access(pid, parsed_line['iso_date'])
elif script == "sci_abstract":
rq.register_abstract_access(pid, parsed_line['iso_date'])
elif script == "sci_issuetoc":
rq.register_toc_access(pid, parsed_line['iso_date'])
elif script == "sci_arttext":
rq.register_article_access(pid, parsed_line['iso_date'])
elif script == "sci_pdf":
rq.register_pdf_access(pid, parsed_line['iso_date'])
elif script == "sci_home":
rq.register_home_access(pid, parsed_line['iso_date'])
elif script == "sci_issues":
rq.register_issues_access(pid, parsed_line['iso_date'])
elif script == "sci_alphabetic":
rq.register_alpha_access(pid, parsed_line['iso_date'])
rq.send()
else:
print "Connection to CouchDB Fail"
|
Python
| 0.000001
|
@@ -697,11 +697,9 @@
mit=
-100
+5
)%0A
|
3da17a2f61daecc34772ead7e6caffa9da49bf48
|
Add default values and shebang
|
06-setPositionFromArgs.py
|
06-setPositionFromArgs.py
|
# We have to import the minecraft api module to do anything in the minecraft world
from mcpi.minecraft import *
import sys
# this means that the file can be imported without executing anything in this code block
if __name__ == "__main__":
"""
First thing you do is create a connection to minecraft
This is like dialling a phone.
It sets up a communication line between your script and the minecraft world
"""
# Create a connection to Minecraft
# Any communication with the world must use this object
mc = Minecraft.create()
# Get the current tile/block that the player is located at in the world
playerPosition = mc.player.getTilePos()
# create the output message as a string
message = " you are at (" +str(playerPosition.x)+","+str(playerPosition.y)+","+str(playerPosition.z)+")"
# print to the python interpreter standard output (terminal or IDLE probably)
print(message)
# send message to the minecraft chat
mc.postToChat(message)
numOfArgs = len(sys.argv)
if numOfArgs == 3:
newXposn = int(sys.argv[1])
newZposn = int(sys.argv[2])
else:
print("incorrect number of arguments")
sys.exit()
newYposn = mc.getHeight(newXposn, newZposn)
mc.player.setTilePos(newXposn, newYposn, newZposn)
# Get the current tile/block that the player is located at in the world
playerPosition = mc.player.getTilePos()
message = " you are now at (" +str(playerPosition.x)+","+str(playerPosition.y)+","+str(playerPosition.z)+")"
print(message)
mc.postToChat(message)
|
Python
| 0
|
@@ -1,8 +1,31 @@
+#!/usr/bin/env python%0A%0A
# We hav
@@ -1018,24 +1018,81 @@
at(message)%0A
+ #Set Default values%0A newXposn = 0%0A newZposn = 0
%0A numOfAr
|
d41a6769fcb6d9d3788ad23ac031ecebd3775bc1
|
fix "clang-format/tidy" mixup in doc comment [NFC]
|
scripts/clang_tidy_report.py
|
scripts/clang_tidy_report.py
|
#!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the the Apache License v2.0 with LLVM Exceptions (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://llvm.org/LICENSE.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
import re
import subprocess
from typing import Optional
import pathspec
import ignore_diff
from buildkite_utils import annotate
from phabtalk.phabtalk import Report, Step
def run(base_commit, ignore_config, step: Optional[Step], report: Optional[Report]):
"""Apply clang-format and return if no issues were found."""
if report is None:
report = Report() # For debugging.
if step is None:
step = Step() # For debugging.
r = subprocess.run(f'git diff -U0 --no-prefix {base_commit}', shell=True, capture_output=True)
logging.debug(f'git diff {r}')
diff = r.stdout.decode()
if ignore_config is not None and os.path.exists(ignore_config):
ignore = pathspec.PathSpec.from_lines(pathspec.patterns.GitWildMatchPattern,
open(ignore_config, 'r').readlines())
diff = ignore_diff.remove_ignored(diff.splitlines(keepends=True), open(ignore_config, 'r'))
logging.debug(f'filtered diff: {diff}')
else:
ignore = pathspec.PathSpec.from_lines(pathspec.patterns.GitWildMatchPattern, [])
p = subprocess.Popen(['clang-tidy-diff', '-p0', '-quiet'], stdout=subprocess.PIPE, stdin=subprocess.PIPE,
stderr=subprocess.PIPE)
step.reproduce_commands.append(f'git diff -U0 --no-prefix {base_commit} | clang-tidy-diff -p0')
a = ''.join(diff)
logging.info(f'clang-tidy input: {a}')
out = p.communicate(input=a.encode())[0].decode()
logging.debug(f'clang-tidy-diff {p}: {out}')
# Typical finding looks like:
# [cwd/]clang/include/clang/AST/DeclCXX.h:3058:20: error: ... [clang-diagnostic-error]
pattern = '^([^:]*):(\\d+):(\\d+): (.*): (.*)'
add_artifact = False
logging.debug("cwd", os.getcwd())
errors_count = 0
warn_count = 0
inline_comments = 0
for line in out.splitlines(keepends=False):
line = line.strip()
line = line.replace(os.getcwd() + os.sep, '')
logging.debug(line)
if len(line) == 0 or line == 'No relevant changes found.':
continue
add_artifact = True
match = re.search(pattern, line)
if match:
file_name = match.group(1)
line_pos = match.group(2)
char_pos = match.group(3)
severity = match.group(4)
text = match.group(5)
text += '\n[[{} | not useful]] '.format(
'https://github.com/google/llvm-premerge-checks/blob/main/docs/clang_tidy.md#warning-is-not-useful')
if severity in ['warning', 'error']:
if severity == 'warning':
warn_count += 1
if severity == 'error':
errors_count += 1
if ignore.match_file(file_name):
print('{} is ignored by pattern and no comment will be added'.format(file_name))
else:
inline_comments += 1
report.add_lint({
'name': 'clang-tidy',
'severity': 'warning',
'code': 'clang-tidy',
'path': file_name,
'line': int(line_pos),
'char': int(char_pos),
'description': '{}: {}'.format(severity, text),
})
else:
logging.debug('does not match pattern')
if add_artifact:
p = 'clang-tidy.txt'
with open(p, 'w') as f:
f.write(out)
report.add_artifact(os.getcwd(), p, 'clang-tidy')
if errors_count + warn_count != 0:
step.success = False
url = "https://github.com/google/llvm-premerge-checks/blob/main/docs/clang_tidy.md#review-comments."
annotate(f'clang-tidy found {errors_count} errors and {warn_count} warnings. {inline_comments} of them were '
f'added as review comments [why?]({url})', style='error')
logging.debug(f'report: {report}')
logging.debug(f'step: {step}')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Runs clang-format against given diff with given commit. '
'Produces patch and attaches linter comments to a review.')
parser.add_argument('--base', default='HEAD~1')
parser.add_argument('--ignore-config', default=None, help='path to file with patters of files to ignore')
parser.add_argument('--log-level', type=str, default='INFO')
args = parser.parse_args()
logging.basicConfig(level=args.log_level, format='%(levelname)-7s %(message)s')
run(args.base, args.ignore_config, None, None)
|
Python
| 0
|
@@ -913,22 +913,20 @@
y clang-
-format
+tidy
and ret
|
8a8ea6a554e0142823be8b13b9cfb1de43acafec
|
fix bug when get data by key
|
api/api/views.py
|
api/api/views.py
|
#coding:utf-8
from cgi import FieldStorage
import json
from uuid import uuid4
from schema.product import ProductSchema
import colander
from mapping.products import Product
from cornice import Service
from webob import exc
from webob.response import Response
from couchdb.json import encode as couchdb_json_encode
products = Service(name='products', path='/products', description="products", cors_origins=('*',))
product = Service(name='product', path='/products/{product_id}', description="product detail", cors_origins=('*',))
image = Service(name='image', path='/image', description="upload image", cors_origins=('*',))
class UnauthorizedView(exc.HTTPError):
def __init__(self, msg=u'Unauthorized'):
body = {'status': 401, 'message': msg}
Response.__init__(self, json.dumps(body))
self.status = 401
self.content_type = 'application/json'
class BadRequestView(exc.HTTPError):
def __init__(self, msg=u'Bad request, missing data.'):
body = {'status': 400, 'message': msg}
Response.__init__(self, json.dumps(body))
self.status = 400
self.content_type = 'application/json'
def convert_to_ember_data_array(couch_data, name):
payload = {name: []}
for item in couch_data['rows']:
item[u'value'][u'id'] = item[u'value'][u'_id']
del item[u'value'][u'_id']
payload[name].append(item[u'value'])
return payload
def convert_to_ember_data_single(couch_data, name):
payload = {name: {}}
if couch_data[u'rows']:
couch_data[u'rows'][u'value'][u'id'] = couch_data[u'rows'][u'value'][u'_id']
del couch_data[u'rows'][u'value'][u'_id']
payload[name] = couch_data[u'rows'][u'value']
return payload
product_add_error = {
'brandNone': u'你忘了填写品牌名称',
'categoryNone': u'类别很重要,补上吧',
'specNone': u'每个产品的型号都不一样,不能落下',
'priceNone': u'必须填写价格哦,赚钱就靠它了',
'priceNotNumber': u'价格必须是数字,单位是分',
'descNone': u'描述随便写点什么把',
'coverNone': u'封面是脸面'
}
def validate_product(request):
try:
schema = ProductSchema()
new_product = schema.deserialize(request.json_body['product'])
request.validated['product'] = new_product
except colander.Invalid, e:
errors = e.asdict()
for error_name, error_value in errors.items():
if error_value == 'Required':
request.errors.add('body', error_name, product_add_error[error_name + 'None'])
elif 'not a number' in error_value:
request.errors.add('body', error_name, product_add_error[error_name + 'NotNumber'])
@products.post(content_type="application/json", validators=validate_product)
def add_product(request):
product = request.validated['product']
new_product = Product(product)
db = request.db
result = new_product.store(db)
product['id'] = result.id
return {'product': product}
@products.get()
def get_products(request):
db = request.db
result = db.resource('_design', 'products', '_view', 'product_list').get_json()[2]
return convert_to_ember_data_array(result, 'products')
@image.post()
def upload_image(request):
up = request.up
for file_type, file_wrapper in request.params.items():
if isinstance(file_wrapper, FieldStorage):
file_ext = '.' + file_wrapper.type.split('/')[-1]
image_url = '/products/' + uuid4().hex + file_ext
up.put(image_url, file_wrapper.file, checksum=True)
return {'image': image_url}
return {'error': True}
@product.get()
def get_product(request):
product_id = request.matchdict['product_id']
db = request.db
result = db.resource('_design', 'products', '_view', 'product_list').get_json(key=couchdb_json_encode(product_id))[2]
return convert_to_ember_data_single(result, 'product')
|
Python
| 0
|
@@ -1536,32 +1536,35 @@
h_data%5Bu'rows'%5D%5B
+0%5D%5B
u'value'%5D%5Bu'id'%5D
@@ -1578,32 +1578,35 @@
h_data%5Bu'rows'%5D%5B
+0%5D%5B
u'value'%5D%5Bu'_id'
@@ -1631,32 +1631,35 @@
h_data%5Bu'rows'%5D%5B
+0%5D%5B
u'value'%5D%5Bu'_id'
@@ -1704,16 +1704,19 @@
'rows'%5D%5B
+0%5D%5B
u'value'
|
de381a56e87a21da1e82146da01bb546c5094ec4
|
Print the traceback as well for debugging purposes.
|
scripts/asgard-deploy.py
|
scripts/asgard-deploy.py
|
#!/usr/bin/env python
import sys
import logging
import click
from os import path
# Add top-level module path to sys.path before importing tubular code.
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
from tubular import asgard
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
@click.command()
@click.option('--ami_id', envvar='AMI_ID', help='The ami-id to deploy', required=True)
def deploy(ami_id):
try:
asgard.deploy(ami_id)
except Exception, e:
click.secho("Error Deploying AMI: {0}.\nMessage: {1}".format(ami_id, e.message), fg='red')
sys.exit(1)
sys.exit(0)
if __name__ == "__main__":
deploy()
|
Python
| 0
|
@@ -41,16 +41,33 @@
logging%0A
+import traceback%0A
import c
@@ -515,16 +515,46 @@
ion, e:%0A
+ traceback.print_exc()%0A
|
d24ea7a2e3b87861480b2ec9792811f177d974b4
|
Fix over-correction of pixel coordinates in regions
|
aplpy/regions.py
|
aplpy/regions.py
|
try:
import pyregion
pyregion_installed = True
except:
pyregion_installed = False
def _check_pyregion_installed():
if not pyregion_installed:
raise Exception("The pyregion package is required to load region files")
from .decorators import auto_refresh
class Regions:
"""
Regions sub-class of APLpy
Used for overplotting various shapes and annotations on APLpy
fitsfigures
Example:
# DS9 region file called "test.reg"
# (the coordinates are around l=28 in the Galactic Plane)
# Filename: test.fits
fk5
box(18:42:48.262,-04:01:17.91,505.668",459.714",0) # color=red dash=1
point(18:42:51.797,-03:59:44.82) # point=x color=red dash=1
point(18:42:50.491,-04:03:09.39) # point=box color=red dash=1
# vector(18:42:37.433,-04:02:10.77,107.966",115.201) vector=1 color=red dash=1
ellipse(18:42:37.279,-04:02:11.92,26.4336",40.225",0) # color=red dash=1
polygon(18:42:59.016,-03:58:22.06,18:42:58.219,-03:58:11.30,18:42:57.403,-03:58:35.86,18:42:58.094,-03:58:57.69,18:42:59.861,-03:58:41.60,18:42:59.707,-03:58:23.21) # color=red dash=1
point(18:42:52.284,-04:00:02.80) # point=diamond color=red dash=1
point(18:42:46.561,-03:58:01.57) # point=circle color=red dash=1
point(18:42:42.615,-03:58:25.84) # point=cross color=red dash=1
point(18:42:42.946,-04:01:44.74) # point=arrow color=red dash=1
point(18:42:41.961,-03:57:26.16) # point=boxcircle color=red dash=1
# text(18:42:41.961,-03:57:26.16) text={This is text} color=red
Code:
import aplpy
import regions
ff = aplpy.FITSFigure("test.fits")
ff.show_grayscale()
ff.show_regions('test.reg')
"""
@auto_refresh
def show_regions(self, region_file, layer=False, **kwargs):
"""
Overplot regions as specified in the regionsfile
Required Arguments:
*region_file*: [ string | pyregion.ShapeList ]
Path to a ds9 regions file or a ShapeList already read
in by pyregion.
Optional Keyword Arguments:
*layer*: [ string ]
The name of the layer
Additional keyword arguments, e.g. zorder, will be passed to the ds9
call and onto the patchcollections.
"""
_check_pyregion_installed()
PC, TC = ds9(region_file, self._header, **kwargs)
#ffpc = self._ax1.add_collection(PC)
PC.add_to_axes(self._ax1)
TC.add_to_axes(self._ax1)
if layer:
region_set_name = layer
else:
self._region_counter += 1
region_set_name = 'region_set_' + str(self._region_counter)
self._layers[region_set_name] = PC
self._layers[region_set_name + "_txt"] = TC
def ds9(region_file, header, zorder=3, **kwargs):
"""
Wrapper to return a PatchCollection given a ds9 region file
and a fits header.
zorder - defaults to 3 so that regions are on top of contours
"""
# read region file
if isinstance(region_file, basestring):
rr = pyregion.open(region_file)
elif isinstance(region_file, pyregion.ShapeList):
rr = region_file
else:
raise Exception("Invalid type for region_file: %s - should be string or pyregion.ShapeList" % type(region_file))
# convert coordinates to image coordinates
rrim = rr.as_imagecoord(header)
# pyregion and aplpy both correct for the FITS standard origin=1,1
# need to avoid double-correcting
for r in rrim:
r.coord_list[0] += 1
r.coord_list[1] += 1
if 'text_offset' in kwargs:
text_offset = kwargs['text_offset']
del kwargs['text_offset']
else:
text_offset = 5.0
# grab the shapes to overplot
pp, aa = rrim.get_mpl_patches_texts(text_offset=text_offset)
PC = ArtistCollection(pp, **kwargs) # preserves line style (dashed)
TC = ArtistCollection(aa, **kwargs)
PC.set_zorder(zorder)
TC.set_zorder(zorder)
return PC, TC
class ArtistCollection():
"""
Matplotlib collections can't handle Text.
This is a barebones collection for text objects
that supports removing and making (in)visible
"""
def __init__(self, artistlist):
"""
Pass in a list of matplotlib.text.Text objects
(or possibly any matplotlib Artist will work)
"""
self.artistlist = artistlist
def remove(self):
for T in self.artistlist:
T.remove()
def add_to_axes(self, ax):
for T in self.artistlist:
ax.add_artist(T)
def get_visible(self):
visible = True
for T in self.artistlist:
if not T.get_visible():
visible = False
return visible
def set_visible(self, visible=True):
for T in self.artistlist:
T.set_visible(visible)
def set_zorder(self, zorder):
for T in self.artistlist:
T.set_zorder(zorder)
|
Python
| 0.001369
|
@@ -3503,32 +3503,51 @@
n rrim:%0A
+for i in range(len(
r.coord_list%5B0%5D
@@ -3546,17 +3546,16 @@
list
-%5B0%5D += 1%0A
+)):%0A
@@ -3571,17 +3571,17 @@
rd_list%5B
-1
+i
%5D += 1%0A%0A
|
f06e5adfa9f01a9304e08288c8baf0ef652851f0
|
fix timeout value to seconds
|
mopidy/stream/actor.py
|
mopidy/stream/actor.py
|
from __future__ import absolute_import, unicode_literals
import fnmatch
import logging
import re
import time
import pykka
from mopidy import audio as audio_lib, backend, exceptions, stream
from mopidy.audio import scan, tags
from mopidy.compat import urllib
from mopidy.internal import http, playlists
from mopidy.models import Track
logger = logging.getLogger(__name__)
class StreamBackend(pykka.ThreadingActor, backend.Backend):
def __init__(self, config, audio):
super(StreamBackend, self).__init__()
self._scanner = scan.Scanner(
timeout=config['stream']['timeout'],
proxy_config=config['proxy'])
self._session = http.get_requests_session(
proxy_config=config['proxy'],
user_agent='%s/%s' % (
stream.Extension.dist_name, stream.Extension.version))
blacklist = config['stream']['metadata_blacklist']
self._blacklist_re = re.compile(
r'^(%s)$' % '|'.join(fnmatch.translate(u) for u in blacklist))
self._timeout = config['stream']['timeout']
self.library = StreamLibraryProvider(backend=self)
self.playback = StreamPlaybackProvider(audio=audio, backend=self)
self.playlists = None
self.uri_schemes = audio_lib.supported_uri_schemes(
config['stream']['protocols'])
if 'file' in self.uri_schemes and config['file']['enabled']:
logger.warning(
'The stream/protocols config value includes the "file" '
'protocol. "file" playback is now handled by Mopidy-File. '
'Please remove it from the stream/protocols config.')
self.uri_schemes -= {'file'}
class StreamLibraryProvider(backend.LibraryProvider):
def lookup(self, uri):
if urllib.parse.urlsplit(uri).scheme not in self.backend.uri_schemes:
return []
if self.backend._blacklist_re.match(uri):
logger.debug('URI matched metadata lookup blacklist: %s', uri)
return [Track(uri=uri)]
_, scan_result = _unwrap_stream(
uri, timeout=self.backend._timeout, scanner=self.backend._scanner,
requests_session=self.backend._session)
if scan_result:
track = tags.convert_tags_to_track(scan_result.tags).replace(
uri=uri, length=scan_result.duration)
else:
logger.warning('Problem looking up %s: %s', uri)
track = Track(uri=uri)
return [track]
class StreamPlaybackProvider(backend.PlaybackProvider):
def translate_uri(self, uri):
if urllib.parse.urlsplit(uri).scheme not in self.backend.uri_schemes:
return None
if self.backend._blacklist_re.match(uri):
logger.debug('URI matched metadata lookup blacklist: %s', uri)
return uri
unwrapped_uri, _ = _unwrap_stream(
uri, timeout=self.backend._timeout, scanner=self.backend._scanner,
requests_session=self.backend._session)
return unwrapped_uri
# TODO: cleanup the return value of this.
def _unwrap_stream(uri, timeout, scanner, requests_session):
"""
Get a stream URI from a playlist URI, ``uri``.
Unwraps nested playlists until something that's not a playlist is found or
the ``timeout`` is reached.
"""
original_uri = uri
seen_uris = set()
deadline = time.time() + timeout
while time.time() < deadline:
if uri in seen_uris:
logger.info(
'Unwrapping stream from URI (%s) failed: '
'playlist referenced itself', uri)
return None, None
else:
seen_uris.add(uri)
logger.debug('Unwrapping stream from URI: %s', uri)
try:
scan_timeout = deadline - time.time()
if scan_timeout < 0:
logger.info(
'Unwrapping stream from URI (%s) failed: '
'timed out in %sms', uri, timeout)
return None, None
scan_result = scanner.scan(uri, timeout=scan_timeout)
except exceptions.ScannerError as exc:
logger.debug('GStreamer failed scanning URI (%s): %s', uri, exc)
scan_result = None
if scan_result is not None:
if scan_result.playable or (
not scan_result.mime.startswith('text/') and
not scan_result.mime.startswith('application/')
):
logger.debug(
'Unwrapped potential %s stream: %s', scan_result.mime, uri)
return uri, scan_result
download_timeout = deadline - time.time()
if download_timeout < 0:
logger.info(
'Unwrapping stream from URI (%s) failed: timed out in %sms',
uri, timeout)
return None, None
content = http.download(
requests_session, uri, timeout=download_timeout)
if content is None:
logger.info(
'Unwrapping stream from URI (%s) failed: '
'error downloading URI %s', original_uri, uri)
return None, None
uris = playlists.parse(content)
if not uris:
logger.debug(
'Failed parsing URI (%s) as playlist; found potential stream.',
uri)
return uri, None
# TODO Test streams and return first that seems to be playable
logger.debug(
'Parsed playlist (%s) and found new URI: %s', uri, uris[0])
uri = uris[0]
|
Python
| 0.000001
|
@@ -2436,12 +2436,8 @@
g up
- %25s:
%25s'
@@ -4957,16 +4957,21 @@
_timeout
+/1000
)%0A%0A
|
7b76b48bac70f1c19f4311c51d84eac8c7cf968a
|
Debug log when we get no browse results
|
mopidy_dirble/actor.py
|
mopidy_dirble/actor.py
|
from __future__ import unicode_literals
import logging
from mopidy import backend
from mopidy.models import Image, Ref, SearchResult
import pykka
from . import client, translator
logger = logging.getLogger(__name__)
class DirbleBackend(pykka.ThreadingActor, backend.Backend):
uri_schemes = ['dirble']
def __init__(self, config, audio):
super(DirbleBackend, self).__init__()
self.dirble = client.Dirble(config['dirble']['api_key'],
config['dirble']['timeout'])
self.countries = config['dirble']['countries']
self.library = DirbleLibrary(backend=self)
self.playback = DirblePlayback(audio=audio, backend=self)
class DirbleLibrary(backend.LibraryProvider):
root_directory = Ref.directory(uri='dirble:root', name='Dirble')
# TODO: add countries when there is a lookup for countries with stations
def browse(self, uri):
result = []
variant, identifier = translator.parse_uri(uri)
if variant == 'root':
for category in self.backend.dirble.categories():
result.append(translator.category_to_ref(category))
for country in self.backend.countries:
result.append(translator.country_to_ref(country))
for continent in self.backend.dirble.continents():
result.append(translator.continent_to_ref(continent))
elif variant == 'category' and identifier:
for category in self.backend.dirble.subcategories(identifier):
result.append(translator.category_to_ref(category))
for station in self.backend.dirble.stations(category=identifier):
result.append(translator.station_to_ref(station))
elif variant == 'continent' and identifier:
for country in self.backend.dirble.countries(identifier):
result.append(translator.country_to_ref(country))
elif variant == 'country' and identifier:
for station in self.backend.dirble.stations(country=identifier):
result.append(
translator.station_to_ref(station, show_country=False))
else:
logger.debug('Unknown URI: %s', uri)
result.sort(key=lambda ref: ref.name)
return result
def refresh(self, uri=None):
self.backend.dirble.flush()
def lookup(self, uri):
variant, identifier = translator.parse_uri(uri)
if variant != 'station':
return []
station = self.backend.dirble.station(identifier)
if not station:
return []
return [translator.station_to_track(station)]
def search(self, query=None, uris=None, exact=False):
if not query.get('any'):
return None
categories = set()
countries = []
for uri in uris or []:
variant, identifier = translator.parse_uri(uri)
if variant == 'country':
countries.append(identifier.lower())
elif variant == 'continent':
countries.extend(self.backend.dirble.countries(identifier))
elif variant == 'category':
pending = [self.backend.dirble.category(identifier)]
while pending:
c = pending.pop(0)
categories.add(c['id'])
pending.extend(c['children'])
tracks = []
for station in self.backend.dirble.search(' '.join(query['any'])):
if countries and station['country'].lower() not in countries:
continue
station_categories = {c['id'] for c in station['categories']}
if categories and not station_categories.intersection(categories):
continue
tracks.append(translator.station_to_track(station))
return SearchResult(tracks=tracks)
def get_images(self, uris):
result = {}
for uri in uris:
result[uri] = []
variant, identifier = translator.parse_uri(uri)
if variant != 'station' or not identifier:
continue
station = self.backend.dirble.station(identifier)
if not station or 'image' not in station:
continue
elif station['image'].get('url'):
result[uri].append(Image(uri=station['image']['url']))
elif station['image'].get('thumb', {}).get('url'):
result[uri].append(Image(uri=station['image']['thumb']['url']))
return result
class DirblePlayback(backend.PlaybackProvider):
def translate_uri(self, uri):
variant, identifier = translator.parse_uri(uri)
if variant != 'station':
return None
station = self.backend.dirble.station(identifier)
if not station['streams']:
return None
# TODO: order by bitrate and preferred mime types?
for stream in station['streams']:
if stream['status']:
return stream['stream']
return station['streams'][0]['stream']
|
Python
| 0
|
@@ -2220,24 +2220,143 @@
I: %25s', uri)
+%0A return %5B%5D%0A%0A if not result:%0A logger.debug('Did not find any browse results for: %25s', uri)
%0A%0A re
|
0a81356e0f8011f0764a8c28719d1371e5860656
|
Make sure create_privatekml mgmt command produces unique names less than 100 chars; fail gracefully if not
|
lingcod/layers/management/commands/create_privatekml.py
|
lingcod/layers/management/commands/create_privatekml.py
|
from django.core.management.base import BaseCommand, AppCommand
from django.conf import settings
from optparse import make_option
import os
import glob
from lingcod.layers.models import PrivateKml
from django.contrib.auth.models import User, Group
class Command(BaseCommand):
help = "Populates the PrivateKml table from the PRIVATE_KML_ROOT contents .. a good starting point"
args = '[optional group name to share all KMLs with]'
# Validation is called explicitly each time the server is reloaded.
requires_model_validation = False
def handle(self, groupname=None, *args, **options):
for pkml in PrivateKml.objects.all():
pkml.delete()
if groupname:
g = Group.objects.get(name=groupname)
if not os.path.exists(settings.PRIVATE_KML_ROOT):
raise Exception("Please create or set up a PRIVATE_KML_ROOT directory (currently set to %s" %
settings.PRIVATE_KML_ROOT)
for d in os.listdir(settings.PRIVATE_KML_ROOT):
path = os.path.join(settings.PRIVATE_KML_ROOT,d)
kmls = glob.glob(os.path.join(path,'*.km*'))
if len(kmls) == 0:
print "No KML/KMZ found in %s" % path
continue
for kml in kmls:
basename = os.path.basename(kml).split('.')[0]
pkml = PrivateKml.objects.create(name=d+"_"+basename,base_kml=kml)
if groupname:
pkml.sharing_groups.add(g)
print "Created %s from %s" % (pkml,kml)
|
Python
| 0
|
@@ -1336,16 +1336,90 @@
'.')%5B0%5D%0A
+ privatekml_name = d+'_'+basename%0A try:%0A
@@ -1468,22 +1468,28 @@
ame=
-d+%22_%22+basename
+privatekml_name%5B:99%5D
,bas
@@ -1499,16 +1499,20 @@
ml=kml)%0A
+
@@ -1549,24 +1549,28 @@
+
pkml.sharing
@@ -1584,16 +1584,20 @@
.add(g)%0A
+
@@ -1640,12 +1640,103 @@
(pkml,kml)%0A
+ except:%0A print %22couldn't create privatekml from %25s%22 %25 s%0A
|
3116617b4ab9d0b8168c8c8d7fb511a5793280bc
|
correct syntax of join
|
scripts/jython/import_osc.py
|
scripts/jython/import_osc.py
|
from xml.sax import make_parser, handler
from xml.sax.handler import ContentHandler
import sys
import java.lang
from org.apache.hadoop.hbase import HBaseConfiguration, HTableDescriptor, HColumnDescriptor, HConstants
from org.apache.hadoop.hbase.client import HBaseAdmin, HTable
from org.apache.hadoop.hbase.client import Get,Put,Delete
class CountingHandler(ContentHandler):
def __init__(self):
self.isModify = False
self.isCreate = False
self.isDelete = False
self.isWay = False
self.nodeId = 0
self.wayId = 0
self.nodeUser = ''
self.wayUser = ''
self.nodeTags = []
self.wayTags = []
self.wayNodes = []
self.nodeLat = 0.0
self.nodeLon = 0.0
def processElement(self,name,attrs):
if name == 'node':
self.isNode = True
self.nodeId = attrs.getValue('id')
self.nodeLat = attrs.getValue('lat')
self.nodeLon = attrs.getValue('lon')
self.nodeUser = attrs.getValue('user')
if name == 'way':
self.isWay = True
self.wayId = attrs.getValue('id')
self.wayUser = attrs.getValue('user')
if name == 'nd':
self.wayNodes.append(attrs.getValue('ref'))
if name == 'tag':
tag = (attrs.getValue('k'),attrs.getValue('v'))
if self.isWay and not self.isNode:
self.wayTags.append(tag)
else:
self.nodeTags.append(tag)
def startElement(self, name, attrs):
if name == 'modify':
self.isModify = True
if name =='create':
self.isCreate = True
if name == 'delete':
self.isDelete = True
self.processElement(name,attrs)
def endElement(self, name):
if name == 'modify':
self.isModify = False
if name =='create':
self.isCreate = False
if name == 'delete':
self.isDelete = False
if name == 'node':
row = Put(self.nodeId)
row.add('nodeData','user',self.nodeUser)
row.add('nodeData','lat',self.nodeLat)
row.add('nodeData','lon',self.nodeLon)
if len(self.nodeTags) > 0:
row.add('nodeData','tags',"#".join("(%s,%s)" % tup for tup in self.nodeTags))
self.nodeTags = []
nodesTable.put(row)
self.nodeId = 0
self.nodeLat = 0.0
self.nodeLon = 0.0
self.isNode = False
if name == 'way':
row = Put(self.wayId)
row.add('wayData','user',self.wayUser)
if len(self.wayNodes) > 0:
row.add('wayData','nodes',"#".join("%s" for tag in self.wayNodes))
self.wayNodes = []
if len(self.wayTags) > 0:
row.add('wayData','wayTags', "#".join("(%s,%s)" % tup for tup in self.wayTags))
self.wayTags = []
waysTable.put(row)
self.isWay = False
def setupHbase():
admin = HBaseAdmin(conf)
nodesDesc = HTableDescriptor(nodesTablename)
nodesDesc.addFamily(HColumnDescriptor("nodeData"))
waysDesc = HTableDescriptor(waysTablename)
waysDesc.addFamily(HColumnDescriptor("wayData"))
if admin.tableExists(nodesTablename):
admin.disableTable(nodesTablename)
admin.deleteTable(nodesTablename)
admin.createTable(nodesDesc)
if admin.tableExists(waysTablename):
admin.disableTable(waysTablename)
admin.deleteTable(waysTablename)
admin.createTable(waysDesc)
global nodesTable,waysTable
nodesTable = HTable(conf, nodesTablename)
waysTable = HTable(conf, waysTablename)
def main(argv=sys.argv):
parser = make_parser()
h = CountingHandler()
parser.setContentHandler(h)
with open(argv[1], "r") as input:
parser.parse(input)
if __name__ =='__main__':
conf = HBaseConfiguration()
nodesTablename = "nodesTest"
waysTablename = "waysTest"
setupHbase()
main()
|
Python
| 0.007571
|
@@ -2352,16 +2352,22 @@
oin(%22%25s%22
+ %25 tag
for tag
|
9a1921fb27b7073d9c79f6727766eb516478f403
|
Bump version 0.6.0 (git sync solution)
|
cmscloud_client/__init__.py
|
cmscloud_client/__init__.py
|
# -*- coding: utf-8 -*-
__version__ = '0.5.4'
|
Python
| 0
|
@@ -38,9 +38,9 @@
'0.
-5.4
+6.0
'%0A
|
2301908ef1a0da7ede392c424c1c813fca517f7a
|
version bump
|
academictorrents/version.py
|
academictorrents/version.py
|
__version__ = "2.0.16"
|
Python
| 0.000001
|
@@ -17,8 +17,8 @@
.0.1
-6
+7
%22%0A%0A
|
9926cbb1919b96999d479f5a8d67e17ce71a1091
|
Improve the get_nick a tiny amount
|
motobot/irc_message.py
|
motobot/irc_message.py
|
class IRCMessage:
""" Class to store and parse an IRC Message. """
def __init__(self, msg):
""" Parse a raw IRC message to IRCMessage. """
self.sender = None
self.nick = None
self.command = None
self.params = []
self.__parse_msg(msg)
def __parse_msg(self, msg):
if msg[0] == ':':
self.sender, msg = msg[1:].split(' ', 1)
self.nick = get_nick(self.sender)
if ' :' in msg:
msg, trailing = msg.split(' :', 1)
self.params = msg.split(' ')
self.params.append(trailing)
else:
self.params = msg.split(' ')
self.command = self.params.pop(0)
def __repr__(self):
""" Print the IRCMessage all nice 'n' pretty. """
return "Sender: {};\nCommand: {};\nParams: {};\n".format(
self.sender, self.command, self.params)
def action(message):
""" Make the message an action. """
return '\u0001ACTION {}\u0001'.format(message)
def get_nick(host):
""" Get the user's nick from a host. """
return host.split('!')[0]
|
Python
| 0.000021
|
@@ -1102,13 +1102,16 @@
plit('!'
+, 1
)%5B0%5D%0A
|
119ce47d9e876c345c2bc44751ccf04f0b226259
|
Remove lie_system package dependency
|
components/lie_structures/setup.py
|
components/lie_structures/setup.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# package: lie_docking
# file: setup.py
#
# Part of ‘lie_docking’, a package providing molecular docking functionality
# for the LIEStudio package.
#
# Copyright © 2016 Marc van Dijk, VU University Amsterdam, the Netherlands
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
distribution_name = 'lie_structures'
setup(
name=distribution_name,
version=0.1,
description='LIEStudio structure database module',
author='Marc van Dijk, VU University, Amsterdam, The Netherlands',
author_email='m4.van.dijk@vu.nl',
url='https://github.com/NLeSC/LIEStudio',
license='Apache Software License 2.0',
keywords='LIEStudio structures database',
platforms=['Any'],
packages=find_packages(),
package_data={'': ['*.json']},
py_modules=[distribution_name],
install_requires=['lie_system', 'openbabel'],
include_package_data=True,
zip_safe=True,
entry_points={
'autobahn.twisted.wamplet': [
'wamp_services = lie_structures.wamp_services:make'
],
},
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Chemistry',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
],
)
|
Python
| 0
|
@@ -1397,22 +1397,8 @@
es=%5B
-'lie_system',
'ope
|
ad02f9e35409ae0d8301414d7d297257e9cec982
|
change M-layer init to 'uniform'
|
m_layer/m_layer.py
|
m_layer/m_layer.py
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# coding=utf-8
"""Code for creating the M-layer as a keras layer."""
import tensorflow as tf
class MLayer(tf.keras.layers.Layer):
"""The M-layer: Lie Algebra generator-embedding and matrix exponentiation.
This is a Keras implementation of the M-layer described in (2020)[1].
#### References
[1]: Thomas Fischbacher, Iulia M. Comsa, Krzysztof Potempa, Moritz Firsching,
Luca Versari, Jyrki Alakuijala "Intelligent Matrix Exponentiation", ICML 2020.
TODO(firsching): add link to paper.
"""
def __init__(self,
dim_m,
matrix_init=None,
with_bias=False,
matrix_squarings_exp=None,
**kwargs):
"""Initializes the instance.
Args:
dim_m: The matrix to be exponentiated in the M-layer has the shape (dim_m,
dim_m).
matrix_init: What initializer to use for the matrix. `None` defaults to
`normal` initalization.
with_bias: Whether a bias should be included in layer after
exponentiation.
matrix_squarings_exp: None to compute tf.linalg.expm(M), an integer `k` to
instead approximate it with (I+M/2**k)**(2**k).
**kwargs: keyword arguments passed to the Keras layer base class.
"""
self._dim_m = dim_m
self._rep_to_exp_tensor = None
self._matrix_init = matrix_init or 'normal'
self._with_bias = with_bias
self._matrix_bias = None
self._matrix_squarings_exp = matrix_squarings_exp
super(MLayer, self).__init__(**kwargs)
def build(self, input_shape):
dim_rep = input_shape[-1]
self._rep_to_exp_tensor = self.add_weight(
name='rep_to_exp_tensor',
shape=(dim_rep, self._dim_m, self._dim_m),
initializer=self._matrix_init,
trainable=True)
if self._with_bias:
self._matrix_bias = self.add_weight(
name='matrix_bias',
shape=(1, self._dim_m, self._dim_m),
initializer='uniform',
trainable=True)
super(MLayer, self).build(input_shape)
def call(self, x):
if not self._with_bias:
mat = tf.einsum('amn,...a->...mn', self._rep_to_exp_tensor, x)
else:
mat = tf.einsum('amn,...a->...mn', self._rep_to_exp_tensor,
x) + self._matrix_bias
if self._matrix_squarings_exp is None:
return tf.linalg.expm(mat)
# Approximation of exp(mat) as (1+mat/k)**k with k = 2**MATRIX_SQUARINGS_EXP
mat = mat * 0.5**self._matrix_squarings_exp + tf.eye(self._dim_m)
for _ in range(self._matrix_squarings_exp):
mat = tf.einsum('...ij,...jk->...ik', mat, mat)
return mat
def compute_output_shape(self, input_shape):
return input_shape[0], self._dim_m, self._dim_m
def get_config(self):
config = dict(super().get_config())
config['dim_m'] = self._dim_m
config['matrix_init'] = self._matrix_init
config['with_bias'] = self._with_bias
config['matrix_squarings_exp'] = self._matrix_squarings_exp
return config
|
Python
| 0.000182
|
@@ -1962,22 +1962,23 @@
nit or '
-n
+unif
orm
-al
'%0A se
|
eb48fba5b3334437a752681df200c2bbefb0bc18
|
change font to be purple
|
NEMbox/osdlyrics.py
|
NEMbox/osdlyrics.py
|
from PyQt4 import QtGui, QtCore, QtDBus
import sys
import os
from multiprocessing import Process
class Lyrics(QtGui.QWidget):
def __init__(self):
super(Lyrics, self).__init__()
self.initUI()
def initUI(self):
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.resize(900, 150)
self.text = u"OSD Lyrics for Musicbox"
self.setWindowTitle("Lyrics")
self.show()
@QtCore.pyqtSlot(str)
def refresh_lyrics(self, text):
self.text = text
self.repaint()
def paintEvent(self, event):
qp = QtGui.QPainter()
qp.begin(self)
self.drawText(event, qp)
qp.end()
def drawText(self, event, qp):
qp.setPen(QtGui.QColor(0, 0, 0))
qp.setFont(QtGui.QFont('Decorative', 16))
qp.drawText(event.rect(), QtCore.Qt.AlignCenter, self.text)
def show_lyrics():
app = QtGui.QApplication(sys.argv)
# lyrics_receiver = LyricsReceiver()
lyrics = Lyrics()
QtDBus.QDBusConnection.sessionBus().registerService('org.musicbox.Bus')
QtDBus.QDBusConnection.sessionBus().registerObject('/', lyrics, QtDBus.QDBusConnection.ExportAllSlots)
sys.exit(app.exec_())
def show_lyrics_new_process():
p = Process(target=show_lyrics)
p.start()
# p.join()
|
Python
| 0.999989
|
@@ -744,15 +744,19 @@
lor(
-0
+128
, 0,
-0
+128
))%0A
|
c61d4c6df77fe505074c81eebaec938c6716d9ab
|
Create columns before querying them.
|
sqlaload/query.py
|
sqlaload/query.py
|
import logging
from itertools import count
from sqlalchemy.sql import expression, and_
log = logging.getLogger(__name__)
def resultiter(rp):
""" SQLAlchemy ResultProxies are not iterable to get a
list of dictionaries. This is to wrap them. """
keys = rp.keys()
while True:
row = rp.fetchone()
if row is None:
break
yield dict(zip(keys, row))
def find_one(engine, table, **kw):
res = list(find(engine, table, _limit=1, **kw))
if not len(res):
return None
return res[0]
def find(engine, table, _limit=None, _step=5000, _offset=0,
order_by=None, **kw):
if order_by is None:
order_by = [table.c.id.asc()]
qargs = []
try:
for col, val in kw.items():
qargs.append(table.c[col]==val)
except KeyError:
return
for i in count():
qoffset = _offset + (_step * i)
qlimit = _step
if _limit is not None:
qlimit = min(_limit-(_step*i), _step)
if qlimit <= 0:
break
q = table.select(whereclause=and_(*qargs), limit=qlimit,
offset=qoffset, order_by=order_by)
rows = list(resultiter(engine.execute(q)))
if not len(rows):
return
for row in rows:
yield row
def distinct(engine, table, *columns):
columns = [table.c[c] for c in columns]
q = expression.select(columns, distinct=True)
return list(resultiter(engine.execute(q)))
def all(engine, table):
return find(engine, table)
|
Python
| 0
|
@@ -80,16 +80,60 @@
on, and_
+%0Afrom sqlaload.schema import _ensure_columns
%0A%0Alog =
@@ -674,16 +674,55 @@
, **kw):
+%0A _ensure_columns(engine, table, kw)
%0A%0A if
|
0c4e6ff26d716bf20a1a7c36a4e3e363a1101c2a
|
add forced/default to plexpy.library.stream
|
Contents/Libraries/Shared/plex/objects/library/stream.py
|
Contents/Libraries/Shared/plex/objects/library/stream.py
|
from plex.objects.core.base import Descriptor, Property
class Stream(Descriptor):
id = Property(type=int)
index = Property(type=int)
stream_type = Property('streamType', type=int)
selected = Property(type=bool)
title = Property
duration = Property(type=int)
codec = Property
codec_id = Property('codecID')
bit_depth = Property('bitDepth', type=int)
chroma_subsampling = Property('chromaSubsampling')
color_space = Property('colorSpace')
width = Property(type=int)
height = Property(type=int)
bitrate = Property(type=int)
bitrate_mode = Property('bitrateMode')
channels = Property(type=int)
sampling_rate = Property('samplingRate', type=int)
frame_rate = Property('frameRate')
profile = Property
scan_type = Property('scanType')
language = Property('language')
language_code = Property('languageCode')
bvop = Property(type=int)
gmc = Property(type=int)
level = Property(type=int)
qpel = Property(type=int)
@classmethod
def from_node(cls, client, node):
items = []
for genre in cls.helpers.findall(node, 'Stream'):
_, obj = Stream.construct(client, genre, child=True)
items.append(obj)
return [], items
|
Python
| 0
|
@@ -224,16 +224,84 @@
=bool)%0A%0A
+ forced = Property(type=bool)%0A default = Property(type=bool)%0A%0A
titl
|
d03bd67e12201fa24f1f8c288246581224494357
|
optimize analyze_all a bit
|
mptracker/proposals.py
|
mptracker/proposals.py
|
from time import sleep
import logging
import flask
from flask.ext.script import Manager
from flask.ext.rq import job
from mptracker import models
from mptracker.common import ocr_url
from mptracker.nlp import match_text_for_mandate
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
proposals = flask.Blueprint('proposals', __name__)
proposals_manager = Manager()
@proposals.route('/mandate/<uuid:mandate_id>/proposals')
def mandate_proposals(mandate_id):
mandate = models.Mandate.query.get_or_404(mandate_id)
return flask.render_template('proposals/mandate.html', **{
'mandate': mandate,
'sponsorships': list(mandate.sponsorships),
})
@proposals.route('/proposals/<uuid:proposal_id>')
def proposal(proposal_id):
proposal = (models.Proposal.query
.filter_by(id=proposal_id)
.first_or_404())
return flask.render_template('proposals/detail.html', **{
'proposal': proposal,
'sponsorships': [{
'id': sp.id,
'mandate': sp.mandate,
'match_data': flask.json.loads(sp.match.data or '{}'),
} for sp in proposal.sponsorships],
})
@proposals.route('/proposals/relevant')
def relevant():
sponsorships = [s for s in models.Sponsorship.query if s.match.score]
sponsorships.sort(key=lambda s: s.match.score or 0, reverse=True)
return flask.render_template('proposals/relevant.html', **{
'sponsorships': sponsorships,
})
@proposals_manager.command
def ocr_all(number=None, force=False):
job_map = {}
n_jobs = n_skip = n_ok = 0
for proposal in models.Proposal.query:
if not proposal.pdf_url:
n_skip += 1
continue
if proposal.text is not None and not force:
n_ok += 1
continue
job = ocr_url.delay(proposal.pdf_url)
job_map[proposal.id] = job
n_jobs += 1
if number and n_jobs >= int(number):
break
logger.info("enqueued %d jobs, skipped %d, ok %d", n_jobs, n_skip, n_ok)
session = models.db.session
while job_map:
sleep(1)
done = set()
failed = set()
session.rollback()
for proposal_id, job in job_map.items():
if job.is_finished:
done.add(proposal_id)
proposal = models.Proposal.query.get(proposal_id)
pages = job.result
proposal.text = '\n\n'.join(pages)
elif job.is_failed:
failed.add(proposal_id)
session.commit()
if done or failed:
for proposal_id in done | failed:
del job_map[proposal_id]
logger.info("saved %d, failed %d, remaining %d",
len(done), len(failed), len(job_map))
@job
@proposals_manager.command
def analyze_sponsorship(sponsorship_id):
sponsorship = models.Sponsorship.query.get(sponsorship_id)
proposal = sponsorship.proposal
text = proposal.title + ' ' + proposal.text
result = match_text_for_mandate(sponsorship.mandate, text)
sponsorship.match.data = flask.json.dumps(result)
sponsorship.match.score = len(result['top_matches'])
models.db.session.commit()
@proposals_manager.command
def analyze_all(number=None, force=False, minority_only=False):
n_jobs = n_skip = n_ok = 0
for sponsorship in models.Sponsorship.query:
if not force:
if sponsorship.match.data is not None:
n_ok += 1
continue
if sponsorship.proposal.text is None:
n_skip += 1
continue
if not sponsorship.mandate.minority:
county = sponsorship.mandate.county
if (minority_only or
county is None or
county.geonames_code is None):
n_skip += 1
continue
analyze_sponsorship.delay(sponsorship.id)
n_jobs += 1
if number and n_jobs >= int(number):
break
logger.info("enqueued %d jobs, skipped %d, ok %d", n_jobs, n_skip, n_ok)
|
Python
| 0.000001
|
@@ -3372,32 +3372,90 @@
skip = n_ok = 0%0A
+ text_row_ids = models.OcrText.all_ids_for('proposal')%0A
for sponsors
@@ -3646,21 +3646,31 @@
osal
-.text is None
+_id not in text_row_ids
:%0A
|
e92a612ba231eebb8dbe7ac42d24ac002a89fbe1
|
add docstring
|
frappe/utils/logger.py
|
frappe/utils/logger.py
|
# imports - compatibility imports
from __future__ import unicode_literals
# imports - standard imports
import logging
import os
from logging.handlers import RotatingFileHandler
# imports - third party imports
from six import text_type
# imports - module imports
import frappe
default_log_level = logging.DEBUG
site = getattr(frappe.local, 'site', None)
def get_logger(module, with_more_info=False, _site=None):
global site
if module in frappe.loggers:
return frappe.loggers[module]
if not module:
module = "frappe"
with_more_info = True
logfile = module + '.log'
site = getattr(frappe.local, 'site', None)
LOG_FILENAME = os.path.join('..', 'logs', logfile)
logger = logging.getLogger(module)
logger.setLevel(frappe.log_level or default_log_level)
logger.propagate = False
formatter = logging.Formatter('%(asctime)s %(levelname)s %(name)s %(message)s')
handler = RotatingFileHandler(LOG_FILENAME, maxBytes=100_000, backupCount=20)
logger.addHandler(handler)
#
if site == _site:
SITELOG_FILENAME = os.path.join(site, 'logs', logfile)
site_handler = RotatingFileHandler(SITELOG_FILENAME, maxBytes=100_000, backupCount=20)
site_handler.setFormatter(formatter)
logger.addHandler(site_handler)
if with_more_info:
handler.addFilter(SiteContextFilter())
handler.setFormatter(formatter)
frappe.loggers[module] = logger
return logger
class SiteContextFilter(logging.Filter):
"""This is a filter which injects request information (if available) into the log."""
def filter(self, record):
if "Form Dict" not in text_type(record.msg):
record.msg = text_type(record.msg) + "\nSite: {0}\nForm Dict: {1}".format(site, getattr(frappe.local, 'form_dict', None))
return True
def set_log_level(level):
'''Use this method to set log level to something other than the default DEBUG'''
frappe.log_level = getattr(logging, (level or '').upper(), None) or default_log_level
frappe.loggers = {}
|
Python
| 0.000005
|
@@ -416,19 +416,709 @@
):%0A%09
-global site
+%22%22%22Application Logger for your given module%0A%0A%09Args:%0A%09%09module (str): Name of your logger and consequently your log file.%0A%09%09with_more_info (bool, optional): Will log the form dict using the SiteContextFilter. Defaults to False.%0A%09%09_site (str, optional): If set, validates the current site context with the passed value. The %60frappe.web%60 logger uses this to determine that the application is logging information related to the logger called. Defaults to None.%0A%0A%09Returns:%0A%09%09%3Cclass 'logging.Logger'%3E: Returns a Python logger object with Site and Bench level logging capabilities.%0A%09%22%22%22%0A%09global site%0A%0A%09def allow_site():%0A%09%09allow = False%0A%09%09if site: allow = True%0A%09%09if _site: allow = site == _site%0A%09%09return allow%0A
%0A%09if
@@ -1673,27 +1673,25 @@
er)%0A
-#
%0A%09if
-site ==
+allow
_site
+()
:%0A%09%09
|
951348a42e560ceb2ae0aef6b96f61c92493287d
|
fix pylint issues
|
sdcm/cluster_k8s/iptables.py
|
sdcm/cluster_k8s/iptables.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright (c) 2020 ScyllaDB
import atexit
import logging
from itertools import chain
from typing import Literal, List, Optional
from sdcm import cluster
from sdcm.remote import LOCALRUNNER, shell_script_cmd
IPTABLES_BIN = "iptables"
IPTABLES_LEGACY_BIN = "iptables-legacy"
LOGGER = logging.getLogger(__name__)
IptablesChainCommand = Literal["A", "C", "D"]
class IptablesPodPortsRedirectMixin:
def iptables_node_redirect_rules(self,
dest_ip: str,
iptables_bin: str = IPTABLES_BIN,
command: IptablesChainCommand = "A") -> List[str]:
to_ip = self._cluster_ip_service.spec.cluster_ip
return [iptables_port_redirect_rule(iptables_bin=iptables_bin,
command=command,
to_ip=to_ip,
to_port=p.target_port,
dest_ip=dest_ip,
dest_port=p.node_port) for p in self._loadbalancer_service.spec.ports]
class IptablesPodIpRedirectMixin:
def iptables_node_redirect_rules(self,
dest_ip: str,
iptables_bin: str = IPTABLES_BIN,
command: IptablesChainCommand = "A") -> List[str]:
to_ip = self._cluster_ip_service.spec.cluster_ip
return [iptables_ip_redirect_rule(iptables_bin=iptables_bin, command=command, to_ip=to_ip, dest_ip=dest_ip), ]
class IptablesClusterOpsMixin:
def hydra_iptables_redirect_rules(self,
command: IptablesChainCommand = "A",
nodes: Optional[list] = None) -> List[str]:
if nodes is None:
nodes = self.nodes
return list(chain.from_iterable(node.iptables_node_redirect_rules(dest_ip=node.hydra_dest_ip,
iptables_bin=IPTABLES_LEGACY_BIN,
command=command) for node in nodes))
def nodes_iptables_redirect_rules(self,
command: IptablesChainCommand = "A",
nodes: Optional[list] = None) -> List[str]:
if nodes is None:
nodes = self.nodes
return list(chain.from_iterable(node.iptables_node_redirect_rules(dest_ip=node.nodes_dest_ip,
command=command) for node in nodes))
def add_hydra_iptables_rules(self, nodes: Optional[list] = None) -> None:
add_rules_commands = self.hydra_iptables_redirect_rules(nodes=nodes)
del_rules_commands = self.hydra_iptables_redirect_rules(command="D", nodes=nodes)
LOCALRUNNER.sudo(shell_script_cmd("\n".join(add_rules_commands)))
atexit.register(LOCALRUNNER.sudo, shell_script_cmd("\n".join(del_rules_commands)))
def update_nodes_iptables_redirect_rules(self,
command: IptablesChainCommand = "A",
nodes: Optional[list] = None,
loaders: bool = True,
monitors: bool = True) -> None:
nodes_to_update = []
if tester := cluster.TestConfig.tester_obj():
if loaders and tester.loaders:
nodes_to_update.extend(tester.loaders.nodes)
if monitors and tester.monitors:
nodes_to_update.extend(tester.monitors.nodes)
if nodes_to_update:
LOGGER.debug("Found following nodes to apply new iptables rules: %s", nodes_to_update)
iptables_rules = "\n".join(self.nodes_iptables_redirect_rules(command=command, nodes=nodes))
for node in nodes_to_update:
node.remoter.sudo(shell_script_cmd(iptables_rules))
def iptables_port_redirect_rule(iptables_bin: str,
command: IptablesChainCommand,
to_ip: str,
to_port: int,
dest_ip: str,
dest_port: int) -> str:
return f"{iptables_bin} -t nat -{command} OUTPUT -d {to_ip} -p tcp --dport {to_port} " \
f"-j DNAT --to-destination {dest_ip}:{dest_port}"
def iptables_ip_redirect_rule(iptables_bin: str, command: IptablesChainCommand, to_ip: str, dest_ip: str) -> str:
return f"{iptables_bin} -t nat -{command} OUTPUT -d {to_ip} -j DNAT --to-destination {dest_ip}"
|
Python
| 0.000001
|
@@ -835,24 +835,65 @@
%22C%22, %22D%22%5D%0A%0A%0A
+# pylint: disable=too-few-public-methods%0A
class Iptabl
@@ -1657,24 +1657,65 @@
ec.ports%5D%0A%0A%0A
+# pylint: disable=too-few-public-methods%0A
class Iptabl
@@ -4652,16 +4652,53 @@
les))%0A%0A%0A
+# pylint: disable=too-many-arguments%0A
def ipta
|
24fbe55a3517e50f4d158bbb7b8857f8f10dc148
|
Use argparse to parse julia-py arguments
|
src/julia/julia_py.py
|
src/julia/julia_py.py
|
from __future__ import print_function, absolute_import
from argparse import Namespace
import os
import sys
from .api import LibJulia
from .tools import julia_py_executable
def parse_args(args):
ns = Namespace(julia="julia")
jl_args = list(args)
if len(jl_args) >= 2 and jl_args[0] == "--julia":
ns.julia = jl_args[1]
jl_args = jl_args[2:]
elif len(jl_args) >= 1 and jl_args[0].startswith("--julia="):
ns.julia = jl_args[0][len("--julia=") :]
jl_args = jl_args[1:]
return ns, jl_args
def main(args=None):
if args is None:
args = sys.argv[1:]
ns, jl_args = parse_args(args)
os.environ["_PYJULIA_JULIA_PY"] = julia_py_executable()
os.environ["_PYJULIA_PATCH_JL"] = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "patch.jl"
)
api = LibJulia.load(julia=ns.julia)
api.init_julia(jl_args)
code = 1
if api.jl_eval_string(b"""Base.include(Main, ENV["_PYJULIA_PATCH_JL"])"""):
if api.jl_eval_string(b"Base.invokelatest(Base._start)"):
code = 0
api.jl_atexit_hook(code)
sys.exit(code)
if __name__ == "__main__":
main()
|
Python
| 0.000004
|
@@ -1,12 +1,51 @@
+%22%22%22%0ALaunch Julia through PyJulia.%0A%22%22%22%0A%0A
from __futur
@@ -92,37 +92,22 @@
rt%0A%0A
-from argparse import Namespac
+import argpars
e%0Aim
@@ -152,16 +152,47 @@
ibJulia%0A
+from .core import enable_debug%0A
from .to
@@ -232,474 +232,92 @@
def
-parse_args(args):%0A ns = Namespace(julia=%22julia%22)%0A jl_args = list(args)%0A%0A if len(jl_args) %3E= 2 and jl_args%5B0%5D == %22--julia%22:%0A ns.julia = jl_args%5B1%5D%0A jl_args = jl_args%5B2:%5D%0A elif len(jl_args) %3E= 1 and jl_args%5B0%5D.startswith(%22--julia=%22):%0A ns.julia = jl_args%5B0%5D%5Blen(%22--julia=%22) :%5D%0A jl_args = jl_args%5B1:%5D%0A%0A return ns, jl_args%0A%0A%0Adef main(args=None):%0A if args is None:%0A args = sys.argv%5B1:%5D%0A ns, jl_args = parse_args(args
+julia_py(julia, pyjulia_debug, jl_args):%0A if pyjulia_debug:%0A enable_debug(
)%0A%0A
@@ -532,11 +532,8 @@
lia=
-ns.
juli
@@ -772,16 +772,16 @@
k(code)%0A
-
sys.
@@ -793,16 +793,944 @@
code)%0A%0A%0A
+class CustomFormatter(%0A argparse.RawDescriptionHelpFormatter, argparse.ArgumentDefaultsHelpFormatter%0A):%0A pass%0A%0A%0Adef parse_args(args, **kwargs):%0A options = dict(%0A prog=%22julia-py%22,%0A usage=%22%25(prog)s %5B--julia JULIA%5D %5B--pyjulia-debug%5D %5B%3Cjulia arguments%3E...%5D%22,%0A formatter_class=CustomFormatter,%0A description=__doc__,%0A )%0A options.update(kwargs)%0A parser = argparse.ArgumentParser(**options)%0A parser.add_argument(%0A %22--julia%22,%0A default=%22julia%22,%0A help=%22%22%22%0A Julia %60executable%60 used by PyJulia.%0A %22%22%22,%0A )%0A parser.add_argument(%0A %22--pyjulia-debug%22,%0A action=%22store_true%22,%0A help=%22%22%22%0A Print PyJulia's debugging messages to standard error.%0A %22%22%22,%0A )%0A ns, jl_args = parser.parse_known_args(args)%0A ns.jl_args = jl_args%0A return ns%0A%0A%0Adef main(args=None, **kwargs):%0A julia_py(**vars(parse_args(args, **kwargs)))%0A%0A%0A
if __nam
|
59e7fc5c924ebf8af66e0aeef990da55e84d3f9e
|
update to 3.30.1
|
packages/dependencies/sqlite3.py
|
packages/dependencies/sqlite3.py
|
{
'repo_type' : 'archive',
'custom_cflag' : '-O2', # make sure we build it without -ffast-math
'download_locations' : [
{ 'url' : 'https://www.sqlite.org/2019/sqlite-autoconf-3300000.tar.gz', 'hashes' : [ { 'type' : 'sha256', 'sum' : 'e0a8cf4c7a87455e55e10413d16f358ca121ccec687fe1301eac95e2d340fc58' }, ], },
{ 'url' : 'https://fossies.org/linux/misc/sqlite-autoconf-3300000.tar.gz', 'hashes' : [ { 'type' : 'sha256', 'sum' : 'e0a8cf4c7a87455e55e10413d16f358ca121ccec687fe1301eac95e2d340fc58' }, ], },
],
'cflag_addition' : '-fexceptions -DSQLITE_ENABLE_COLUMN_METADATA=1 -DSQLITE_USE_MALLOC_H=1 -DSQLITE_USE_MSIZE=1 -DSQLITE_DISABLE_DIRSYNC=1 -DSQLITE_ENABLE_RTREE=1 -fno-strict-aliasing',
'configure_options': '--host={target_host} --prefix={target_prefix} --disable-shared --enable-static --enable-threadsafe --disable-editline --enable-readline --enable-json1 --enable-fts5 --enable-session',
'depends_on': (
'zlib',
),
'update_check' : { 'url' : 'https://www.sqlite.org/index.html', 'type' : 'httpregex', 'regex' : r'<a href="releaselog/.*\.html">Version (?P<version_num>[\d.]+)<\/a>' },
'_info' : { 'version' : '3.30.0', 'fancy_name' : 'libsqlite3' },
}
|
Python
| 0.000001
|
@@ -169,33 +169,33 @@
te-autoconf-3300
-0
+1
00.tar.gz', 'has
@@ -237,72 +237,72 @@
: '
-e0a8cf4c7a87455e55e10413d16f358ca121ccec687fe1301eac95e2d340fc58
+8c5a50db089bd2a1b08dbc5b00d2027602ca7ff238ba7658fabca454d4298e60
' %7D,
@@ -372,17 +372,17 @@
onf-3300
-0
+1
00.tar.g
@@ -432,72 +432,72 @@
: '
-e0a8cf4c7a87455e55e10413d16f358ca121ccec687fe1301eac95e2d340fc58
+8c5a50db089bd2a1b08dbc5b00d2027602ca7ff238ba7658fabca454d4298e60
' %7D,
@@ -1133,17 +1133,17 @@
: '3.30.
-0
+1
', 'fanc
|
6ad4796030aab2f6dbf8389b4030007d0fcf8761
|
Update to test for mount setup
|
panoptes/test/mount/test_ioptron.py
|
panoptes/test/mount/test_ioptron.py
|
from nose.tools import raises
import panoptes
from panoptes.mount.ioptron import Mount
class TestIOptron():
@raises(AssertionError)
def test_no_config_no_commands(self):
""" Mount needs a config """
mount = Mount()
@raises(AssertionError)
def test_config_no_commands(self):
""" """
mount = Mount(config={'mount': { 'model': 'ioptron', 'port':'/dev/ttyUSB0' } }, commands=dict())
|
Python
| 0
|
@@ -252,34 +252,35 @@
def test_config_
-no
+bad
_commands(self):
@@ -285,16 +285,82 @@
):%0A%09%09%22%22%22
+ Passes in a default config but blank commands, which should error
%22%22%22%0A%09%09m
@@ -451,11 +451,230 @@
nds=
-dict()
+%7B'foo': 'bar'%7D)%0A%0A%09def test_config_auto_commands(self):%0A%09%09%22%22%22 Passes in config like above, but no commands, so they should read from defaults %22%22%22%0A%09%09mount = Mount(config=%7B'mount': %7B 'model': 'ioptron', 'port':'/dev/ttyUSB0' %7D %7D
)
|
edcee81796f335c87ec15c258f4f551a6fd21c55
|
kill UI when manager failed to start
|
selfdrive/manager/manager.py
|
selfdrive/manager/manager.py
|
#!/usr/bin/env python3
import datetime
import os
import signal
import subprocess
import sys
import traceback
from typing import List, Tuple, Union
import cereal.messaging as messaging
import selfdrive.sentry as sentry
from common.basedir import BASEDIR
from common.params import Params, ParamKeyType
from common.text_window import TextWindow
from selfdrive.boardd.set_time import set_time
from selfdrive.hardware import HARDWARE, PC
from selfdrive.manager.helpers import unblock_stdout
from selfdrive.manager.process import ensure_running
from selfdrive.manager.process_config import managed_processes
from selfdrive.athena.registration import register, UNREGISTERED_DONGLE_ID
from selfdrive.swaglog import cloudlog, add_file_handler
from selfdrive.version import is_dirty, get_commit, get_version, get_origin, get_short_branch, \
terms_version, training_version
sys.path.append(os.path.join(BASEDIR, "pyextra"))
def manager_init() -> None:
# update system time from panda
set_time(cloudlog)
# save boot log
subprocess.call("./bootlog", cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
params = Params()
params.clear_all(ParamKeyType.CLEAR_ON_MANAGER_START)
default_params: List[Tuple[str, Union[str, bytes]]] = [
("CompletedTrainingVersion", "0"),
("HasAcceptedTerms", "0"),
("OpenpilotEnabledToggle", "1"),
]
if not PC:
default_params.append(("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')))
if params.get_bool("RecordFrontLock"):
params.put_bool("RecordFront", True)
if not params.get_bool("DisableRadar_Allow"):
params.delete("DisableRadar")
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this dashcam?
if os.getenv("PASSIVE") is not None:
params.put_bool("Passive", bool(int(os.getenv("PASSIVE", "0"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
# set version params
params.put("Version", get_version())
params.put("TermsVersion", terms_version)
params.put("TrainingVersion", training_version)
params.put("GitCommit", get_commit(default=""))
params.put("GitBranch", get_short_branch(default=""))
params.put("GitRemote", get_origin(default=""))
# set dongle id
reg_res = register(show_spinner=True)
if reg_res:
dongle_id = reg_res
else:
serial = params.get("HardwareSerial")
raise Exception(f"Registration failed for device {serial}")
os.environ['DONGLE_ID'] = dongle_id # Needed for swaglog
if not is_dirty():
os.environ['CLEAN'] = '1'
# init logging
sentry.init(sentry.SentryProject.SELFDRIVE)
cloudlog.bind_global(dongle_id=dongle_id, version=get_version(), dirty=is_dirty(),
device=HARDWARE.get_device_type())
def manager_prepare() -> None:
for p in managed_processes.values():
p.prepare()
def manager_cleanup() -> None:
# send signals to kill all procs
for p in managed_processes.values():
p.stop(block=False)
# ensure all are killed
for p in managed_processes.values():
p.stop(block=True)
cloudlog.info("everything is dead")
def manager_thread() -> None:
cloudlog.bind(daemon="manager")
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
params = Params()
ignore: List[str] = []
if params.get("DongleId", encoding='utf8') in (None, UNREGISTERED_DONGLE_ID):
ignore += ["manage_athenad", "uploader"]
if os.getenv("NOBOARD") is not None:
ignore.append("pandad")
ignore += [x for x in os.getenv("BLOCK", "").split(",") if len(x) > 0]
ensure_running(managed_processes.values(), started=False, not_run=ignore)
started_prev = False
sm = messaging.SubMaster(['deviceState'])
pm = messaging.PubMaster(['managerState'])
while True:
sm.update()
not_run = ignore[:]
started = sm['deviceState'].started
driverview = params.get_bool("IsDriverViewEnabled")
ensure_running(managed_processes.values(), started, driverview, not_run)
# trigger an update after going offroad
if started_prev and not started and 'updated' in managed_processes:
os.sync()
managed_processes['updated'].signal(signal.SIGHUP)
started_prev = started
running = ' '.join("%s%s\u001b[0m" % ("\u001b[32m" if p.proc.is_alive() else "\u001b[31m", p.name)
for p in managed_processes.values() if p.proc)
print(running)
cloudlog.debug(running)
# send managerState
msg = messaging.new_message('managerState')
msg.managerState.processes = [p.get_process_state_msg() for p in managed_processes.values()]
pm.send('managerState', msg)
# Exit main loop when uninstall/shutdown/reboot is needed
shutdown = False
for param in ("DoUninstall", "DoShutdown", "DoReboot"):
if params.get_bool(param):
shutdown = True
params.put("LastManagerExitReason", param)
cloudlog.warning(f"Shutting down manager - {param} set")
if shutdown:
break
def main() -> None:
prepare_only = os.getenv("PREPAREONLY") is not None
manager_init()
# Start UI early so prepare can happen in the background
if not prepare_only:
managed_processes['ui'].start()
manager_prepare()
if prepare_only:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
sentry.capture_exception()
finally:
manager_cleanup()
params = Params()
if params.get_bool("DoUninstall"):
cloudlog.warning("uninstalling")
HARDWARE.uninstall()
elif params.get_bool("DoReboot"):
cloudlog.warning("reboot")
HARDWARE.reboot()
elif params.get_bool("DoShutdown"):
cloudlog.warning("shutdown")
HARDWARE.shutdown()
if __name__ == "__main__":
unblock_stdout()
try:
main()
except Exception:
add_file_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(-3)
error = "Manager failed to start\n\n" + error
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
|
Python
| 0.000005
|
@@ -6168,24 +6168,104 @@
to start%22)%0A%0A
+ try:%0A managed_processes%5B'ui'%5D.stop()%0A except Exception:%0A pass%0A%0A
# Show l
|
6b3e44b5e3ba66b870a584544a15a17036cf043a
|
fix syntax error
|
fruitScope/plotjson.py
|
fruitScope/plotjson.py
|
import json
import Gnuplot, Gnuplot.PlotItems, Gnuplot.funcutils
import argparse
import time, os, sys
import tempfile
import math
def check_dir(directory):
if not os.path.exists(directory):
print "Directory {} does not exist...creating...".format(directory)
os.makedirs(directory)
def main():
parser = argparse.ArgumentParser(description = "Plots and saves json objects in folder parseddata, uses a config file for plot setting inside cfg/.plotjson for gnuplot settings")
parser.add_argument('f', metavar = 'f', help="Filename of json file to be plotted.")
parser.add_argument('title', metavar = 't', help = "Title to be included in plot.")
parser.add_argument('-e', '--errorbars', help = "Use this flag to NOT plot with error bars, in case the plot is too messy.", action = "store_true")
args = parser.parse_args()
p = plotJson(args.errorbars)
p.load(args.f, args.title)
def hms(x):
m, s = divmod(x, 60)
h, m = divmod(m, 60)
return "%dh:%02dm:%02ds" % (h, m, s)
class plotJson():
def __init__(self, eb):
self.eb = eb
self.cfg = {}
try:
with open('cfg/.plotjson', 'rb+') as f:
x = f.read()
x = x.split('\n')
for i in x:
if len(i) > 0:
i = i.rstrip()
i = i.split('=')
self.cfg[i[0]] = i[1]
except:
print "No config file found...using default settings";
self.cfg =
{
"format": "epscairo",
"xlabel": "t@level (ns)",
"ylabel": "Two-photon coincidence events"
}
def getx(self,hist, desc):
h_offset = float(desc['horiz_offset']) * 10 ** 9
h_binsize = float(desc['horiz_interval']) * 10 ** 9
s = []
for i in xrange(len(hist)):
s.append([(i * h_binsize) + h_offset, hist[i]])
return s
def load(self, path, title):
#fpath = 'parseddata/' + path
fpath = path
with open(fpath, 'rb+') as datafile:
data = json.load(datafile)
if not isinstance(data['hist'][0], list):
data['hist'] = self.getx(data['hist'], data['desc'])
duration = int(float(data['desc']['acq_duration']))
duration = hms(duration)
rawf = open(fpath + ".dat", 'wb+')
for i in xrange(len(data['hist'])):
_x = data['hist'][i][0]
_y = data['hist'][i][1]
_yerror = round(math.sqrt(_y),1)
rawf.write("{}\t{}\n".format(_x, _y))
rawf.close()
self.initPlot()
self.g('set title "{} {}, acquisition duration {}"'.format(path,title,duration))
self.g('set output "{}.eps"'.format(fpath))
if not self.eb:
self.g('f(x) = mean_y')
self.g('fit f(x) "{}" u 1:2 via mean_y'.format(fpath))
self.g('plot "{}" u 1:2:(sqrt(mean_y)) with yerrorbars pt 7 ps 0.2 '.format(fpath))
else:
self.g('plot "{}" u 1:2 w p pt 7 ps 0.2'.format(fpath))
def initPlot(self):
self.g = Gnuplot.Gnuplot()
self.g('set term {} transparent truecolor size 10,7.5'.format(self.cfg['format']))
self.g('set xlabel "{}"'.format(self.cfg['xlabel']))
self.g('set ylabel {}'.format(self.cfg['ylabel']))
main()
|
Python
| 0.000003
|
@@ -1531,28 +1531,16 @@
lf.cfg =
-%0A
%7B%0A
|
c884eae90e41577670b8bd194cc55b31e49f3f61
|
fix data provider ref
|
src/py/crankshaft/crankshaft/clustering/kmeans.py
|
src/py/crankshaft/crankshaft/clustering/kmeans.py
|
from sklearn.cluster import KMeans
import numpy as np
from crankshaft.analysis_data_provider import AnalysisDataProvider
class Kmeans:
def __init__(self, data_provider=None):
if data_provider is None:
self.data_provider = AnalysisDataProvider()
else:
self.data_provider = data_provider
def spatial(self, query, no_clusters, no_init=20):
"""
find centers based on clusters of latitude/longitude pairs
query: SQL query that has a WGS84 geometry (the_geom)
"""
params = {"subquery": query,
"geom_col": "the_geom",
"id_col": "cartodb_id"}
data = self.data_provider.get_spatial_kmeans(params)
# Unpack query response
xs = data[0]['xs']
ys = data[0]['ys']
ids = data[0]['ids']
km = KMeans(n_clusters=no_clusters, n_init=no_init)
labels = km.fit_predict(zip(xs, ys))
return zip(ids, labels)
def nonspatial(self, subquery, colnames, num_clusters=5,
id_col='cartodb_id', standarize=True):
"""
query (string): A SQL query to retrieve the data required to do the
k-means clustering analysis, like so:
SELECT * FROM iris_flower_data
colnames (list): a list of the column names which contain the data
of interest, like so: ["sepal_width",
"petal_width",
"sepal_length",
"petal_length"]
num_clusters (int): number of clusters (greater than zero)
id_col (string): name of the input id_column
"""
import json
from sklearn import metrics
out_id_colname = 'rowids'
# TODO: need a random seed?
params = {"cols": colnames,
"subquery": subquery,
"id_col": id_col}
data = self.query_runner.get_nonspatial_kmeans(params, standarize)
# fill array with values for k-means clustering
if standarize:
cluster_columns = _scale_data(
_extract_columns(data, colnames))
else:
cluster_columns = _extract_columns(data, colnames)
print str(cluster_columns)
# TODO: decide on optimal parameters for most cases
# Are there ways of deciding parameters based on inputs?
kmeans = KMeans(n_clusters=num_clusters,
random_state=0).fit(cluster_columns)
centers = [json.dumps(dict(zip(colnames, c)))
for c in kmeans.cluster_centers_[kmeans.labels_]]
silhouettes = metrics.silhouette_samples(cluster_columns,
kmeans.labels_,
metric='sqeuclidean')
return zip(kmeans.labels_,
centers,
silhouettes,
data[0][out_id_colname])
# -- Preprocessing steps
def _extract_columns(data, colnames):
"""
Extract the features from the query and pack them into a NumPy array
data (list of dicts): result of the kmeans request
id_col_name (string): name of column which has the row id (not a
feature of the analysis)
"""
return np.array([data[0]['arr_{}'.format(c)] for c in colnames],
dtype=float).T
def _scale_data(features):
"""
Scale all input columns to center on 0 with a standard devation of 1
features (numpy matrix): features of dimension (n_features, n_samples)
"""
from sklearn.preprocessing import StandardScaler
return StandardScaler().fit_transform(features)
|
Python
| 0
|
@@ -2070,18 +2070,19 @@
elf.
-query_runn
+data_provid
er.g
|
4f0e0d4d92301dea408925d99001913e76a15ee1
|
Update filterscan.py
|
lib/filterscan.py
|
lib/filterscan.py
|
try:
import subprocess
from lib.core.core import Core
from lib.filter.filter import Filter
except ImportError, err:
from lib.core.core import Core
Core.print_error(err)
class FilterScan(Filter):
def __init__(self, args):
Filter.__init__(self, [args.pcap], args, "filter")
print self._output_dir
def __run_cmd(self, cmd, file_name, result_set):
output_file = "{0}{1}.txt".format(self._output_dir, file_name)
result_file = open(output_file, "w")
proc = subprocess.Popen([cmd], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,)
if isinstance(result_set, (list, tuple)):
for line in iter(proc.stdout.readline, ''):
if line not in result_set:
result_set.append(line)
else:
for line in iter(proc.stdout.readline, ''):
try:
result_set[line.rstrip()] += 1
except:
result_set[line.rstrip()] = 1
if isinstance(result_set, (list, tuple)):
if len(result_set) > 10:
result_file.write("".join(result_set[1:10]))
else:
result_file.write("".join(result_set))
else:
for counter, value in enumerate(sorted(result_set, key=result_set.get, reverse=True)):
if counter == 10:
break
else:
print result_set[value], value
def _run(self, logger):
for file_name, tshark_cmd in self._filter_commands.iteritems():
result_set = None
if file_name.startswith("top10"):
result_set = {}
else:
result_set = []
self.__run_cmd(tshark_cmd,file_name, result_set)
|
Python
| 0
|
@@ -1,10 +1,20 @@
-%0A
try:%0A
+%09import os%0A
%09imp
@@ -235,17 +235,37 @@
args):%0A%09
-%09
+%0A%09%09self.__args = args
%0A%09%09Filte
@@ -282,16 +282,23 @@
(self, %5B
+self.__
args.pca
@@ -301,16 +301,23 @@
.pcap%5D,
+self.__
args, %22f
@@ -328,33 +328,8 @@
r%22)%0A
-%09%09print self._output_dir%0A
%0A%0A%09d
@@ -373,16 +373,24 @@
sult_set
+, logger
):%0A%09%09%0A%09%09
@@ -410,16 +410,20 @@
%22%7B0%7D%7B1%7D
+_%7B2%7D
.txt%22.fo
@@ -454,16 +454,52 @@
ile_name
+, os.path.basename(self.__args.pcap)
)%0A%09%09resu
@@ -532,16 +532,76 @@
, %22w%22)%0A%0A
+%09%09logger._logging(%22Filter: %7B0%7D parsing%22.format(file_name))%0A%0A
%09%09proc =
@@ -731,18 +731,45 @@
ple)):%0A%09
+
%09%09
+%5B result_set.append(line)
for line
@@ -802,22 +802,17 @@
ine, '')
-:%0A%09%09%09%09
+
if line
@@ -832,38 +832,10 @@
_set
-:%0A%09%09%09%09%09result_set.append(line)
+ %5D
%0A%09%09e
@@ -1308,14 +1308,45 @@
%09%09%09%09
-print
+result_file.write(%22%7B0%7D %7B1%7D%5Cn%22.format(
resu
@@ -1365,16 +1365,18 @@
%5D, value
+))
%0A%09%09%09%09%0A%0A%0A
@@ -1403,16 +1403,62 @@
er):%0A%09%09%0A
+%09%09logger._logging(%22START: Filter pcap file%22)%0A%0A
%09%09for fi
@@ -1535,18 +1535,11 @@
t =
-None%09%0A%0A%09%09%09
+%7B%7D
if f
@@ -1570,62 +1570,16 @@
10%22)
-:%0A%09%09%09%09result_set = %7B%7D%0A%09%09%09else:%0A%09%09%09%09result_set = %5B%5D%0A%09%09%09
+ else %5B%5D
%0A%09%09%09
@@ -1604,16 +1604,17 @@
ark_cmd,
+
file_nam
@@ -1626,10 +1626,159 @@
sult_set
+, logger)%0A%09%09%0A%09%09logger._logging(%22STOP: Filter pcap file%22)%0A%09%09logger._logging(%22Finished Filtering. Results saved in %7B0%7D folder%22.format(self._output_dir)
)%0A
|
f953117b2e42721fafd52865145cbba7b989dd22
|
Fix the wsgi path reference
|
accountant/settings/common.py
|
accountant/settings/common.py
|
"""
Django settings for accountant project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
import os
import sys
from decimal import Decimal
import accounting
VERSION = accounting.VERSION
DISPLAY_VERSION = accounting.get_version()
DISPLAY_SHORT_VERSION = accounting.get_short_version()
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PROJECT_DIR = os.path.basename(BASE_DIR)
# Add the BASE_DIR to the path in order to reuse the apps easily
sys.path.append(BASE_DIR)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'o7k)j*lewj6va4yqz=#1^z@6wtf!$#dx(u=z!3(351rc27c9fm'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
LOCAL_SERVER = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
SITE_ID = 1
SITE_MAIN_DOMAIN = 'example.com'
SITE_MAIN_NAME = 'example.com'
# Application definition
DJANGO_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
)
THIRD_PARTY_APPS = (
'djrill',
'crispy_forms',
'avatar', # for user avatars
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
'stronghold', # enforce login on the whole app
)
# Accounting apps
from accounting import get_apps
LOCAL_APPS = get_apps()
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# Migrations
MIGRATION_MODULES = {
'sites': 'migrations.sites',
'socialaccount': 'migrations.socialaccount',
}
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'stronghold.middleware.LoginRequiredMiddleware',
)
ROOT_URLCONF = 'accountant.urls'
WSGI_APPLICATION = 'accountant.wsgi.application'
# Emailing
DEFAULT_FROM_EMAIL = 'noreply@accountant.fr'
# Templates
# https://docs.djangoproject.com/en/1.7/ref/settings/#template-context-processors
from accounting import ACCOUNTING_TEMPLATE_CONTEXT_PROCESSORS
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'allauth.account.context_processors.account',
'allauth.socialaccount.context_processors.socialaccount',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
) + ACCOUNTING_TEMPLATE_CONTEXT_PROCESSORS
# See: https://docs.djangoproject.com/en/1.7/ref/settings/#template-loaders
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: https://docs.djangoproject.com/en/1.7/ref/settings/#template-dirs
from accounting import ACCOUNTING_MAIN_TEMPLATE_DIR
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
ACCOUNTING_MAIN_TEMPLATE_DIR,
)
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': ''
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'fr-fr'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# See: https://docs.djangoproject.com/en/1.7/ref/contrib/staticfiles\
# /#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'assets'),
)
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
"djangobower.finders.BowerFinder",
)
# Media files
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# Bower config
BOWER_COMPONENTS_ROOT = os.path.abspath(os.path.join(BASE_DIR, 'components'))
BOWER_INSTALLED_APPS = (
'modernizr',
'jquery',
'bootstrap',
)
# Custom User
LOGIN_REDIRECT_URL = 'connect:getting-started'
LOGIN_URL = 'account_login'
# Authentication
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
# Stronghold
STRONGHOLD_PUBLIC_URLS = (
r'^%s.+$' % STATIC_URL,
r'^%s.+$' % MEDIA_URL,
r'^/accounts/.*$',
)
STRONGHOLD_PUBLIC_NAMED_URLS = (
)
# Forms
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# Accounting
from accounting.defaults import *
|
Python
| 0.001083
|
@@ -2445,27 +2445,16 @@
TION = '
-accountant.
wsgi.app
|
bf9c799d1fb13098bd4bce65d44f86bb352b834a
|
Comment out an extensive validation
|
main.py
|
main.py
|
#!/usr/bin/python3
#
# The MIT License (MIT)
#
# Copyright (c) 2013 Andrian Nord
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import sys
import ljd.rawdump.parser
import ljd.pseudoasm.writer
import ljd.ast.builder
import ljd.ast.validator
import ljd.ast.locals
import ljd.ast.slotworks
import ljd.ast.unwarper
import ljd.ast.mutator
import ljd.lua.writer
def dump(name, obj, level=0):
indent = level * '\t'
if name is not None:
prefix = indent + name + " = "
else:
prefix = indent
if isinstance(obj, (int, float, str)):
print(prefix + str(obj))
elif isinstance(obj, list):
print (prefix + "[")
for value in obj:
dump(None, value, level + 1)
print (indent + "]")
elif isinstance(obj, dict):
print (prefix + "{")
for key, value in obj.items():
dump(key, value, level + 1)
print (indent + "}")
else:
print (prefix + obj.__class__.__name__)
for key in dir(obj):
if key.startswith("__"):
continue
val = getattr(obj, key)
dump(key, val, level + 1)
def main():
file_in = sys.argv[1]
header, prototype = ljd.rawdump.parser.parse(file_in)
if not prototype:
return 1
# TODO: args
# ljd.pseudoasm.writer.write(sys.stdout, header, prototype)
ast = ljd.ast.builder.build(prototype)
assert ast is not None
ljd.ast.validator.validate(ast, warped=True)
ljd.ast.mutator.pre_pass(ast)
ljd.ast.validator.validate(ast, warped=True)
ljd.ast.locals.mark_locals(ast)
ljd.ast.validator.validate(ast, warped=True)
ljd.ast.slotworks.eliminate_temporary(ast)
ljd.ast.validator.validate(ast, warped=True)
if True:
ljd.ast.unwarper.primary_pass(ast)
ljd.ast.validator.validate(ast, warped=True)
ljd.ast.locals.mark_local_definitions(ast)
ljd.ast.validator.validate(ast, warped=False)
ljd.ast.mutator.primary_pass(ast)
ljd.ast.validator.validate(ast, warped=False)
ljd.lua.writer.write(sys.stdout, ast)
return 0
if __name__ == "__main__":
retval = main()
sys.exit(retval)
# vim: ts=8 noexpandtab nosmarttab softtabstop=8 shiftwidth=8
|
Python
| 0
|
@@ -2348,32 +2348,34 @@
pre_pass(ast)%0A%0A%09
+#
ljd.ast.validato
@@ -2431,32 +2431,34 @@
k_locals(ast)%0A%0A%09
+#
ljd.ast.validato
@@ -2529,24 +2529,26 @@
rary(ast)%0A%0A%09
+#
ljd.ast.vali
@@ -2610,28 +2610,22 @@
nwarper.
-primary_pass
+unwarp
(ast)%0A%0A%09
@@ -2617,32 +2617,34 @@
.unwarp(ast)%0A%0A%09%09
+#
ljd.ast.validato
@@ -2662,27 +2662,28 @@
ast, warped=
-Tru
+Fals
e)%0A%0A%09%09ljd.as
@@ -2714,32 +2714,34 @@
nitions(ast)%0A%0A%09%09
+#
ljd.ast.validato
|
3b6cc83cfea47550619d8a1d966131a1cc90f1c9
|
clean up processes/threads
|
lib/ipf/engine.py
|
lib/ipf/engine.py
|
###############################################################################
# Copyright 2012 The University of Texas at Austin #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
###############################################################################
import copy
import json
import logging
import logging.config
import os
import sys
import time
import traceback
from ipf.error import WorkflowError
from ipf.home import IPF_HOME
from ipf.step import Step
from ipf.workflow import Workflow
#######################################################################################################################
logging.config.fileConfig(os.path.join(IPF_HOME,"etc","logging.conf"))
logger = logging.getLogger(__name__)
#######################################################################################################################
class WorkflowEngine(object):
def __init__(self):
pass
def run(self, workflow_file_name):
workflow = Workflow()
if os.path.isfile(workflow_file_name):
workflow.read(workflow_file_name)
else:
file_name = os.path.join(IPF_HOME,"etc","workflow",workflow_file_name)
if os.path.isfile(file_name):
workflow.read(file_name)
else:
raise WorkflowError("cannot open workflow file %s as a path or relative to %s/etc/workflow" % \
(workflow_file_name,IPF_HOME))
self._setDependencies(workflow)
logger.debug(workflow)
logger.info("starting workflow %s",workflow.name)
for step in workflow.steps:
step.start()
start_time = time.time()
steps_with_inputs = filter(self._sendNoMoreInputs,workflow.steps)
while self._anyAlive(workflow.steps):
if workflow.timeout is not None and time.time() - start_time > workflow.timeout:
logger.warn("time out, terminating workflow")
for step in workflow.steps:
if step.is_alive():
step.terminate()
break
time.sleep(0.1)
steps_with_inputs = filter(self._sendNoMoreInputs,steps_with_inputs)
# wait again, in case we terminated
while self._anyAlive(workflow.steps):
time.sleep(0.1)
if reduce(lambda b1,b2: b1 and b2, map(lambda step: step.exitcode == 0, workflow.steps)):
logger.info("workflow succeeded")
else:
logger.error("workflow failed")
for step in workflow.steps:
if step.exitcode == 0:
logger.info(" %10s succeeded (%s)",step.id,step.__class__.__name__)
else:
logger.error(" %10s failed (%s)",step.id,step.__class__.__name__)
def _anyAlive(self, steps):
return reduce(lambda b1,b2: b1 or b2, map(lambda step: step.is_alive(), steps), False)
def _sendNoMoreInputs(self, step):
if self._anyAlive(step.depends_on):
return True
logger.debug("no more inputs to step %s",step.id)
step.input_queue.put(None)
return False
def _setDependencies(self, workflow):
for step in workflow.steps:
step.depends_on = [] # [step, ...]
for step in workflow.steps:
for type in step.outputs:
for dstep in step.outputs[type]:
dstep.depends_on.append(step)
#######################################################################################################################
|
Python
| 0.000001
|
@@ -3170,73 +3170,20 @@
-# wait again, in case we terminated%0A while self._anyAlive(
+for step in
work
@@ -3184,33 +3184,32 @@
n workflow.steps
-)
:%0A ti
@@ -3206,30 +3206,26 @@
-time.sleep(0.1
+step.join(
)%0A%0A
@@ -4048,16 +4048,138 @@
ut(None)
+ # send None to indicate no more inputs%0A step.input_queue.close() # close the queue to stop the background thread
%0A
|
c52d056091acf49624450cc2d1e01cbf0900a08f
|
Add a profiling option
|
main.py
|
main.py
|
#!/usr/bin/env python
import sys
from PyQt4.QtGui import QApplication as QApp
from gui.EditorWindow import MainWindow
def main():
import grammar.grammars
grammar.grammars.compileGrammars()
app = QApp(sys.argv)
ex = MainWindow()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
Python
| 0.00003
|
@@ -298,16 +298,123 @@
ain__':%0A
+%0A profile = False%0A%0A if profile:%0A import cProfile%0A cProfile.run('main()')%0A else:%0A
main
|
c8a6699b7dca3f82905ae01f17f04337dd926b83
|
Tweak for Android build.
|
main.py
|
main.py
|
#!/usr/bin/env python3
from flask import Flask
from flask import redirect
from flask_socketio import SocketIO, emit
from flask import stream_with_context, Response
from wsgidav.wsgidav_app import DEFAULT_CONFIG, WsgiDAVApp
from wsgidav.fs_dav_provider import FilesystemProvider
from werkzeug.wsgi import DispatcherMiddleware
import os
import sys
import mimetypes
mimetypes.add_type('image/svg+xml', '.svg')
try:
from urllib import parse as urlparse
except ImportError:
import urlparse
if 'ANDROID_ARGUMENT' in os.environ:
_ANDROID = True
else:
_ANDROID = False
if sys.platform=='linux2':
reload(sys)
sys.setdefaultencoding('utf-8')
if sys.platform.startswith('linux') and not _ANDROID:
import threading
import gi
gi.require_version('WebKit2', '4.0')
from gi.repository import WebKit2
from gi.repository import Gtk
from gi.repository import Gio
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
app_dir = "Jappy.activity"
app = Flask(__name__, static_folder=app_dir)
socketio = SocketIO(app)
@app.route("/")
def hello():
return redirect(app_dir + "/index.html")
@app.route("/shutdown")
def bye(*args):
print ("Bye!")
socketio.stop()
Gtk.main_quit()
return 'Bye!'
@socketio.on('connect', namespace='/test')
def test_connect():
emit('my response', {'data': 'Connected'})
@socketio.on('disconnect', namespace='/test')
def test_disconnect():
print('Client disconnected')
def start_server():
bundle_dir = os.path.dirname(os.path.realpath(__file__))
provider = FilesystemProvider(bundle_dir)
config = DEFAULT_CONFIG.copy()
config.update({
"mount_path": "/dav",
"provider_mapping": {"/": provider},
"user_mapping": {},
"verbose": 1,
})
dav_app = WsgiDAVApp(config)
app.wsgi_app = DispatcherMiddleware(app.wsgi_app, {
'/dav' : dav_app
})
socketio.run(app, host='0.0.0.0')
def start_webview():
def _app_scheme_cb(request, user_data):
uri = request.get_path()
with app.test_client() as c:
response = c.get(uri) # TODO: instead, use c.open stream
if response.status_code==302: # Handle redirect
new_uri = urlparse.urlparse(response.location)
new_uri = 'activity://127.0.0.1' + new_uri.path +\
new_uri.params + new_uri.query
request.get_web_view().load_uri(new_uri)
data, mime = response.data, response.mimetype
input_stream = Gio.MemoryInputStream.new_from_data(data, None)
#request.finish(Gio.File.new_for_path(path).read(None),
# -1, Gio.content_type_guess(path, None)[0])
request.finish( input_stream, len(data), mime )
context = WebKit2.WebContext.get_default()
context.register_uri_scheme("activity", _app_scheme_cb, None)
window = Gtk.Window()
window.set_default_size(800, 600)
window.maximize()
web_view = WebKit2.WebView()
#web_view.connect("load-changed", _loading_changed_cb)
#web_view.connect('run-file-chooser', __run_file_chooser)
window.add(web_view)
settings = web_view.get_settings()
settings.set_property("enable-developer-extras", True)
web_view.load_uri("activity://127.0.0.1/")
window.set_title("Jappy")
window.show_all()
window.connect("delete-event", bye)
if __name__ == "__main__":
if sys.platform.startswith('linux') and not _ANDROID:
t = threading.Thread(target=start_server)
t.daemon = True
t.start()
start_webview()
Gtk.main()
else:
start_server()
sys.exit()
|
Python
| 0
|
@@ -1210,16 +1210,41 @@
.stop()%0A
+ if not _ANDROID:%0A
Gtk.
|
30abf36b7626035f657a4417346fc303725462c4
|
Add new grouptype to db init
|
main.py
|
main.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Shikin, political donations database.
"""
from argparse import ArgumentParser
import shikin
import os
from shikin.model import GroupType, DocType, PubType, User, AppConfig
def initdb_command(args):
"""Creates the database tables."""
db = shikin.app.dbobj
db.create_all()
print('Initialized the database.')
# Seed some tables, if needed:
groups = GroupType.query.count()
if groups == 0:
db.session.add(GroupType(u'議員別'))
db.session.add(GroupType(u'政党本部'))
db.session.add(GroupType(u'政党支部'))
db.session.add(GroupType(u'政治資金団体'))
db.session.add(GroupType(u'資金管理団体'))
db.session.add(GroupType(u'その他の政治団体'))
# An "unknown" type we can use when not sure
db.session.add(GroupType(u'不明'))
db.session.commit()
doctypes = DocType.query.count()
if doctypes == 0:
db.session.add(DocType(u'政治資金収支報告書'))
db.session.add(DocType(u'政党交付金使途等報告書'))
db.session.add(DocType(u'政治資金収支報告書の要旨'))
db.session.commit()
pubtypes = PubType.query.count()
if pubtypes == 0:
db.session.add(PubType(u'定期公表'))
db.session.add(PubType(u'解散分'))
db.session.add(PubType(u'追加分'))
db.session.add(PubType(u'解散支部分'))
db.session.commit()
users = User.query.count()
if users == 0:
db.session.add(User(name='admin', pw_hash='*'))
db.session.commit()
configs = AppConfig.query.count()
if configs == 0:
db.session.add(AppConfig(key='secret_key', val=os.urandom(32)))
db.session.commit()
print('Seeded tables which need it.')
def dropdb_command(args):
"""Creates the database tables."""
if not args.yes:
print("Do you really want to drop the db? Add --yes if you're sure.")
return
shikin.app.dbobj.drop_all()
print('dropped the database.')
def run_command(args):
shikin.app.run(host=args.host, port=args.port, debug=not args.ndebug)
def startup(args):
import logging
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
elif args.quiet:
logging.basicConfig(level=logging.WARN)
def main():
p = ArgumentParser(description="debug server for shikin")
p.add_argument("--verbose", action="store_true", help="increase logging level")
p.add_argument("--quiet", action="store_true", help="decrease logging level")
sub = p.add_subparsers(dest='command', help='command help')
init_sub = sub.add_parser("initdb", help="initialise the db")
init_sub.set_defaults(func=initdb_command)
drop_sub = sub.add_parser("dropdb", help="drop the db")
drop_sub.add_argument("--yes", action="store_true", help="really drop the db")
drop_sub.set_defaults(func=dropdb_command)
run_sub = sub.add_parser("run", help="run the web app")
run_sub.add_argument("--port", type=int, help="port to serve on", default=5000)
run_sub.add_argument("--host", help="host to serve from (default=127.0.0.1)", default="127.0.0.1")
run_sub.add_argument("--ndebug", help="disable debug mode", action="store_true")
run_sub.set_defaults(func=run_command)
args = p.parse_args()
startup(args)
args.func(args)
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -817,24 +817,92 @@
ype(u'%E4%B8%8D%E6%98%8E'))%0A
+ if groups %3C 8:%0A db.session.add(GroupType(u'%E5%9B%BD%E4%BC%9A%E8%AD%B0%E5%93%A1%E9%96%A2%E4%BF%82%E6%94%BF%E6%B2%BB%E5%9B%A3%E4%BD%93'))%0A
db.s
@@ -1503,16 +1503,43 @@
hash='*'
+, email='admin@toumeika.jp'
))%0A
|
2a2d34dced729ca5896b10522ccc41f58278cb28
|
add docstring to argparse_main function
|
main.py
|
main.py
|
#!/usr/bin/env python3
"""
responsible for calling other modules and interacting with user
To solve the challenge problem, run:
./main.py --count 7 7 -k2 -q2 -b2 -n1
"""
import sys
from time import time as now
import argparse
from pieces import ChessPiece
from solution import (
find_solutions_s,
find_solutions_r,
find_solutions_q,
)
from chess_util import format_board
from cmd_util import input_yesno
from cmd_chess_util import input_problem
def count_or_show_by_generator(gen, count_enable, row_count, col_count):
"""
gen: a generator returned by find_solutions_*
count_enable: bool, only count solutions/configurations, don't show them
"""
if count_enable:
print('Calculating, please wait... (Control+C to cancel)')
tm0 = now()
try:
solution_count = sum(1 for _ in gen)
except KeyboardInterrupt:
print('\nGoodbye')
return
delta = now() - tm0
print('Number of Unique Configurations: %s' % solution_count)
print('Running Time: %.4f seconds' % delta)
else:
print('Found Configurations:\n')
for board in gen:
print(format_board(board, row_count, col_count))
try:
input('Press Enter to see the next, Control+C to exit')
except KeyboardInterrupt:
print('\nGoodbye')
break
def interactive_main():
"""
ask the board size and pieces count
calculate and show all possible unique configurations
or just count unique configurations depending on user input
"""
row_count, col_count, count_by_symbol = input_problem()
count_enable = input_yesno(
'Count configurations? [Yes/No] ',
default=True,
)
gen = find_solutions_s(
row_count,
col_count,
count_by_symbol,
)
count_or_show_by_generator(
gen,
count_enable,
row_count,
col_count,
)
def argparse_main():
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument(
action='store',
dest='row_count',
type=int,
help='number of rows in the board',
)
parser.add_argument(
action='store',
dest='col_count',
type=int,
help='number of columns in the board',
)
parser.add_argument(
'-c',
'--count',
dest='count_enable',
action='store_true',
default=False,
help='only count the number of unique configurations, '
'don\'t show them',
)
for cls in ChessPiece.class_list:
plural_name = cls.name + 's'
parser.add_argument(
'-' + cls.symbol.lower(),
'--' + plural_name,
dest=cls.name,
type=int,
default=0,
help='number of %s' % plural_name
)
args = parser.parse_args()
count_by_symbol = {
cls.symbol: getattr(args, cls.name, 0)
for cls in ChessPiece.class_list
}
gen = find_solutions_s(
args.row_count,
args.col_count,
count_by_symbol,
)
count_or_show_by_generator(
gen,
args.count_enable,
args.row_count,
args.col_count,
)
# ______________________ Test Functions ______________________ #
def compare_find_solutions_result():
"""
run and compare the result of 3 implementations of find_solutions
make sure they all return the same set of configurations
with no duplicates
"""
row_count, col_count, count_by_symbol = input_problem()
solution_set_list = []
# solution_set_list is a list of sets, one set for each implementation
func_list = (
find_solutions_r,
find_solutions_q,
find_solutions_s,
)
for func in func_list: # pylint!
solution_set = set()
for board in func(row_count, col_count, count_by_symbol):
board_tuple = tuple(sorted(board.items()))
assert board_tuple not in solution_set
solution_set.add(board_tuple)
solution_set_list.append(solution_set)
print('Number of solutions: %s (%s)' % (len(solution_set), func))
assert solution_set_list[1:] == solution_set_list[:-1] # all items equal
def compare_find_solutions_time():
"""
run and compare the running time of 3 implementations of find_solutions
"""
row_count, col_count, count_by_symbol = input_problem()
time_list = []
func_list = (
find_solutions_s,
find_solutions_r,
find_solutions_q,
find_solutions_s,
find_solutions_r,
find_solutions_q,
)
for func in func_list: # pylint!
tm0 = now()
for _ in func(row_count, col_count, count_by_symbol):
pass
delta = now() - tm0
time_list.append(delta)
print('%.4f seconds (%s)' % (delta, func))
if __name__ == '__main__':
if len(sys.argv) > 1:
argparse_main()
else:
interactive_main()
|
Python
| 0.000001
|
@@ -1994,24 +1994,115 @@
rse_main():%0A
+ %22%22%22%0A parses the command line arguments and options, and performs operations%0A %22%22%22%0A
parser =
|
c76d77e8f639ea1eb9c61e41f1e11a4c12f6780e
|
allow lowercase urls
|
main.py
|
main.py
|
#!/usr/bin/env python
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
import pytz
from datetime import datetime
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util, template
import logging
import pprint
import urllib
import re
import json
PB_WIKI = 'dojowebsite'
PB_API_URL = 'http://%s.pbworks.com/api_v2/op/GetPage/page/%s'
CACHE_ENABLED = True
CDN_ENABLED = True
CDN_HOSTNAME = 'http://cdn.hackerdojo.com'
LOCAL_TZ = 'America/Los_Angeles'
if os.environ['SERVER_SOFTWARE'].startswith('Dev'):
CACHE_ENABLED = False
CDN_ENABLED = False
def _request(url, cache_ttl=3600, force=False):
request_cache_key = 'request:%s' % url
failure_cache_key = 'failure:%s' % url
resp = memcache.get(request_cache_key)
if force or not resp or not CACHE_ENABLED:
try:
data = urlfetch.fetch(url)
if 'pbworks.com' in url:
resp = json.loads(data.content[11:-3])
if "html" in resp:
resp["html"] = re.sub("/w/page/\d*", "", resp["html"])
else:
resp = json.loads(data.content)
memcache.set(request_cache_key, resp, cache_ttl)
memcache.set(failure_cache_key, resp, cache_ttl*10)
except (ValueError, urlfetch.DownloadError), e:
resp = memcache.get(failure_cache_key)
if not resp:
resp = {}
return resp
class PBWebHookHandler(webapp.RequestHandler):
def post(self):
page = self.request.get('page')
if page:
url = PB_API_URL % (PB_WIKI, urllib.pathname2url(page))
request_cache_key = 'request:%s' % url
failure_cache_key = 'failure:%s' % url
memcache.delete(request_cache_key)
memcache.delete(failure_cache_key)
self.response.out.write("200 OK")
class IndexHandler(webapp.RequestHandler):
def get(self):
utc_now = pytz.utc.localize(datetime.utcnow())
local_now = utc_now.astimezone(pytz.timezone(LOCAL_TZ))
hour = local_now.hour
if hour > 8 and hour < 22:
open = True
version = os.environ['CURRENT_VERSION_ID']
if CDN_ENABLED:
cdn = CDN_HOSTNAME
self.response.out.write(template.render('templates/index.html', locals()))
class StaffHandler(webapp.RequestHandler):
def get(self):
staff = _request('http://hackerdojo-signin.appspot.com/staffjson')
version = os.environ['CURRENT_VERSION_ID']
if CDN_ENABLED:
cdn = CDN_HOSTNAME
self.response.out.write(template.render('templates/event_staff.html', locals()))
class MainHandler(webapp.RequestHandler):
def get(self, pagename, site = PB_WIKI):
skip_cache = self.request.get('cache') == '0'
version = os.environ['CURRENT_VERSION_ID']
redirect_urls = {
# From: To
'give': 'Give',
'auction': 'Auction',
'Assemble': 'Give',
'Mobile%20Device%20Lab': 'MobileDeviceLab',
'kickstarter': 'http://www.kickstarter.com/projects/384590180/an-events-space-and-a-design-studio-for-hacker-doj',
'Kickstarter': 'http://www.kickstarter.com/projects/384590180/an-events-space-and-a-design-studio-for-hacker-doj',
'KICKSTARTER': 'http://www.kickstarter.com/projects/384590180/an-events-space-and-a-design-studio-for-hacker-doj',
'key': 'http://signup.hackerdojo.com/key',
}
if pagename in redirect_urls:
url = redirect_urls[pagename]
self.redirect(url, permanent=True)
else:
if CDN_ENABLED:
cdn = CDN_HOSTNAME
try:
if not(pagename):
pagename = 'FrontPage'
page = _request(PB_API_URL % (site, pagename), cache_ttl=604800, force=skip_cache)
# Convert quasi-camel-case to spaced words
title = re.sub('([a-z]|[A-Z])([A-Z])', r'\1 \2', pagename)
if page and "name" in page:
self.response.out.write(template.render('templates/content.html', locals()))
else:
raise LookupError
except LookupError:
self.response.out.write(template.render('templates/404.html', locals()))
self.response.set_status(404)
app = webapp.WSGIApplication([
('/api/pbwebhook', PBWebHookHandler),
('/api/event_staff', StaffHandler),
('/', IndexHandler),
('/(.+)', MainHandler)],
debug=True)
|
Python
| 0.005941
|
@@ -670,30 +670,16 @@
= False%0A
-
%0Adef _re
@@ -1214,28 +1214,16 @@
content)
-
%0A
@@ -1945,28 +1945,16 @@
00 OK%22)%0A
-
%0Aclass I
@@ -2929,24 +2929,16 @@
ON_ID'%5D%0A
-
%0A
@@ -3697,32 +3697,16 @@
else:
-
%0A
@@ -3760,24 +3760,16 @@
HOSTNAME
-
%0A
@@ -3954,16 +3954,274 @@
_cache)%0A
+ # fetch a page where a lowercase version may exist%0A if not(page):%0A pagename = memcache.get(pagename.lower())%0A page = _request(PB_API_URL %25 (site, pagename), cache_ttl=604800, force=skip_cache)%0A
@@ -4386,24 +4386,129 @@
e%22 in page:%0A
+ fiveDays = 432000%0A memcache.set(pagename.lower(), pagename, fiveDays)%0A
@@ -4820,25 +4820,9 @@
4)%0A%0A
- %0A
+%0A
%0Aapp
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.