commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
dfb0067affabeb79b6bd225c35cd0b22bd57fbe3
|
use serializer variable for class assignment
|
hs_core/views/resource_metadata_rest_api.py
|
hs_core/views/resource_metadata_rest_api.py
|
import logging
from django.http import QueryDict
from rest_framework.response import Response
from rest_framework.exceptions import ValidationError
from rest_framework import status
from rest_framework import generics
from rest_framework import serializers
from hs_core import hydroshare
from hs_core.models import Contributor, CoreMetaData, Coverage, Creator, Date, \
Format, FundingAgency, Identifier, Subject, Source, Relation
from hs_core.views import utils as view_utils
from hs_core.views.utils import ACTION_TO_AUTHORIZE
logger = logging.getLogger(__name__)
class Identifiers(serializers.DictField):
child = serializers.CharField()
class PartySerializer(serializers.Serializer):
name = serializers.CharField()
description = serializers.URLField(required=False)
organization = serializers.CharField(required=False)
email = serializers.EmailField(required=False)
address = serializers.CharField(required=False)
phone = serializers.CharField(required=False)
homepage = serializers.URLField(required=False)
identifiers = Identifiers(required=False)
class Meta:
model = Creator
fields = {'name', 'description', 'organization', 'email',
'address', 'phone', 'homepage', 'identifiers'}
class CreatorSerializer(PartySerializer):
order = serializers.IntegerField(required=False)
class Meta:
model = Contributor
class DateSerializer(serializers.Serializer):
# term = 'Date'
type = serializers.CharField(required=False)
start_date = serializers.DateTimeField(required=False)
end_date = serializers.DateTimeField(required=False)
class Meta:
model = Date
class CoverageSerializer(serializers.Serializer):
type = serializers.CharField(required=False)
value = serializers.SerializerMethodField(required=False)
class Meta:
model = Coverage
def get_value(self, obj):
return obj.value
class FormatSerializer(serializers.Serializer):
value = serializers.CharField(required=False)
class Meta:
model = Format
class FundingAgencySerializer(serializers.Serializer):
agency_name = serializers.CharField()
award_title = serializers.CharField(required=False)
award_number = serializers.CharField(required=False)
agency_url = serializers.URLField(required=False)
class Meta:
model = FundingAgency
class IdentifierSerializer(serializers.Serializer):
name = serializers.CharField(required=False)
url = serializers.URLField(required=False)
class Meta:
model = Identifier
class SubjectSerializer(serializers.Serializer):
value = serializers.CharField(required=False)
class Meta:
model = Subject
class SourceSerializer(serializers.Serializer):
derived_from = serializers.CharField(required=False)
class Meta:
model = Source
class RelationSerializer(serializers.Serializer):
type = serializers.CharField(required=False)
value = serializers.CharField(required=False)
class Meta:
model = Relation
class CoreMetaDataSerializer(serializers.Serializer):
title = serializers.CharField(required=False)
creators = CreatorSerializer(required=False, many=True)
contributors = PartySerializer(required=False, many=True)
coverages = CoverageSerializer(required=False, many=True)
dates = DateSerializer(required=False, many=True)
description = serializers.CharField(required=False)
formats = FormatSerializer(required=False, many=True)
funding_agencies = FundingAgencySerializer(required=False, many=True)
identifiers = IdentifierSerializer(required=False, many=True)
language = serializers.CharField(required=False)
rights = serializers.CharField(required=False)
type = serializers.CharField(required=False)
publisher = serializers.CharField(required=False)
sources = SourceSerializer(required=False, many=True)
subjects = SubjectSerializer(required=False, many=True)
relations = RelationSerializer(required=False, many=True)
class Meta:
model = CoreMetaData
class MetadataElementsRetrieveUpdate(generics.RetrieveUpdateDestroyAPIView):
"""
Retrieve resource science (Dublin Core) metadata
REST URL: /hsapi/resource/{pk}/scimeta/elements/
HTTP method: GET
:type pk: str
:param pk: id of the resource
:return: resource science metadata as JSON document
:rtype: str
:raises:
NotFound: return json format: {'detail': 'No resource was found for resource id:pk'}
PermissionDenied: return json format: {'detail': 'You do not have permission to perform
this action.'}
REST URL: /hsapi/resource/{pk}/scimeta/elements/
HTTP method: PUT
:type pk: str
:param pk: id of the resource
:type request: JSON formatted string
:param request: resource metadata
:return: updated resource science metadata as JSON document
:rtype: str
:raises:
NotFound: return json format: {'detail': 'No resource was found for resource id':pk}
PermissionDenied: return json format: {'detail': 'You do not have permission to perform
this action.'}
ValidationError: return json format: {parameter-1': ['error message-1'],
'parameter-2': ['error message-2'], .. }
"""
ACCEPT_FORMATS = ('application/json',)
allowed_methods = ('GET', 'PUT')
# Overwritten by resource types with extended metadata
serializer_class = CoreMetaDataSerializer
def get(self, request, pk):
view_utils.authorize(request, pk, needed_permission=ACTION_TO_AUTHORIZE.VIEW_METADATA)
resource = hydroshare.get_resource_by_shortkey(shortkey=pk)
serializer = resource.metadata.serializer
self.serializer_class = resource.metadata.serializer
return Response(data=serializer.data, status=status.HTTP_200_OK)
def put(self, request, pk):
# Update science metadata
resource, _, _ = view_utils.authorize(
request, pk,
needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE)
metadata = []
put_data = request.data.copy()
# convert the QueryDict to dict
if isinstance(put_data, QueryDict):
put_data = put_data.dict()
try:
resource.metadata.parse_for_bulk_update(put_data, metadata)
hydroshare.update_science_metadata(pk=pk, metadata=metadata, user=request.user)
except Exception as ex:
error_msg = {
'resource': "Resource metadata update failed: %s, %s"
% (ex.__class__, ex.message)
}
raise ValidationError(detail=error_msg)
resource = hydroshare.get_resource_by_shortkey(shortkey=pk)
serializer = resource.metadata.serializer
self.serializer_class = resource.metadata.serializer
return Response(data=serializer.data, status=status.HTTP_202_ACCEPTED)
|
Python
| 0
|
@@ -6818,34 +6818,16 @@
class =
-resource.metadata.
serializ
|
cca2ef0f3700c4eafe66c8f751ecb2fc03318e2b
|
Disable boto3 deprecation warning logs
|
tools/delete_fleet.py
|
tools/delete_fleet.py
|
import sys
from time import sleep
import boto3
from botocore.exceptions import ClientError
def describe_fleets(region, fleet_id):
ec2 = boto3.client('ec2', region_name=region)
response = ec2.describe_fleets(
FleetIds=[
fleet_id
],
)
errors = response['Fleets'][0]['Errors']
instances = response['Fleets'][0]['Instances']
# to ensure we are returning an array anyway
if len(errors) > 0 and len(instances) == 0:
return ['']
return instances[0]['InstanceIds']
def delete_fleet(region, fleet_id):
ec2 = boto3.client('ec2', region_name=region)
response = ec2.delete_fleets(
FleetIds=[
fleet_id,
],
TerminateInstances=True
)
return response['SuccessfulFleetDeletions'][0]['CurrentFleetState']
if __name__ == '__main__':
region = sys.argv[1]
fleet_id = sys.argv[2]
try:
# Delete the fleet
fleet_deleted_states = ["deleted", "deleted_running", "deleted_terminating"]
fleet_state = None
while fleet_state not in fleet_deleted_states:
sleep(5)
fleet_state = delete_fleet(region=region, fleet_id=fleet_id)
print(f"Fleet deleted. Fleet state: {fleet_state}")
# get the instance ids from the fleet
print(describe_fleets(region=region, fleet_id=fleet_id))
except (ClientError, Exception) as e:
print(e)
|
Python
| 0
|
@@ -86,16 +86,67 @@
tError%0A%0A
+boto3.compat.filter_python_deprecation_warnings()%0A%0A
def desc
|
8a6370f7c91fec6c220bc2e438a236816c636341
|
Revert throttle Arlo api calls (#13174)
|
homeassistant/components/arlo.py
|
homeassistant/components/arlo.py
|
"""
This component provides support for Netgear Arlo IP cameras.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/arlo/
"""
import logging
from datetime import timedelta
import voluptuous as vol
from requests.exceptions import HTTPError, ConnectTimeout
from homeassistant.helpers import config_validation as cv
from homeassistant.util import Throttle
from homeassistant.const import CONF_USERNAME, CONF_PASSWORD
REQUIREMENTS = ['pyarlo==0.1.2']
_LOGGER = logging.getLogger(__name__)
CONF_ATTRIBUTION = "Data provided by arlo.netgear.com"
DATA_ARLO = 'data_arlo'
DEFAULT_BRAND = 'Netgear Arlo'
DOMAIN = 'arlo'
NOTIFICATION_ID = 'arlo_notification'
NOTIFICATION_TITLE = 'Arlo Component Setup'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
}),
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up an Arlo component."""
conf = config[DOMAIN]
username = conf.get(CONF_USERNAME)
password = conf.get(CONF_PASSWORD)
try:
from pyarlo import PyArlo
arlo = PyArlo(username, password, preload=False)
if not arlo.is_connected:
return False
arlo.update = Throttle(timedelta(seconds=10))(arlo.update)
hass.data[DATA_ARLO] = arlo
except (ConnectTimeout, HTTPError) as ex:
_LOGGER.error("Unable to connect to Netgear Arlo: %s", str(ex))
hass.components.persistent_notification.create(
'Error: {}<br />'
'You will need to restart hass after fixing.'
''.format(ex),
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID)
return False
return True
|
Python
| 0
|
@@ -200,39 +200,8 @@
ging
-%0Afrom datetime import timedelta
%0A%0Aim
@@ -344,48 +344,8 @@
cv%0A
-from homeassistant.util import Throttle%0A
from
@@ -1202,75 +1202,8 @@
lse%0A
- arlo.update = Throttle(timedelta(seconds=10))(arlo.update)%0A
|
712f1498ae2b605a781896c934778720f451cde2
|
Add voluptuous to Vera.
|
homeassistant/components/vera.py
|
homeassistant/components/vera.py
|
"""
Support for Vera devices.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/vera/
"""
import logging
from collections import defaultdict
from requests.exceptions import RequestException
from homeassistant.util.dt import utc_from_timestamp
from homeassistant.util import convert
from homeassistant.helpers import discovery
from homeassistant.const import (
ATTR_ARMED, ATTR_BATTERY_LEVEL, ATTR_LAST_TRIP_TIME, ATTR_TRIPPED,
EVENT_HOMEASSISTANT_STOP)
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['pyvera==0.2.15']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'vera'
VERA_CONTROLLER = None
CONF_EXCLUDE = 'exclude'
CONF_LIGHTS = 'lights'
ATTR_CURRENT_POWER_MWH = "current_power_mwh"
VERA_DEVICES = defaultdict(list)
# pylint: disable=unused-argument, too-many-function-args
def setup(hass, base_config):
"""Common setup for Vera devices."""
global VERA_CONTROLLER
import pyvera as veraApi
config = base_config.get(DOMAIN)
base_url = config.get('vera_controller_url')
if not base_url:
_LOGGER.error(
"The required parameter 'vera_controller_url'"
" was not found in config"
)
return False
VERA_CONTROLLER, _ = veraApi.init_controller(base_url)
def stop_subscription(event):
"""Shutdown Vera subscriptions and subscription thread on exit."""
_LOGGER.info("Shutting down subscriptions.")
VERA_CONTROLLER.stop()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_subscription)
try:
all_devices = VERA_CONTROLLER.get_devices()
except RequestException:
# There was a network related error connecting to the vera controller.
_LOGGER.exception("Error communicating with Vera API")
return False
exclude = config.get(CONF_EXCLUDE, [])
if not isinstance(exclude, list):
_LOGGER.error("'exclude' must be a list of device_ids")
return False
lights_ids = config.get(CONF_LIGHTS, [])
if not isinstance(lights_ids, list):
_LOGGER.error("'lights' must be a list of device_ids")
return False
for device in all_devices:
if device.device_id in exclude:
continue
dev_type = map_vera_device(device, lights_ids)
if dev_type is None:
continue
VERA_DEVICES[dev_type].append(device)
for component in 'binary_sensor', 'sensor', 'light', 'switch', 'lock':
discovery.load_platform(hass, component, DOMAIN, {}, base_config)
return True
def map_vera_device(vera_device, remap):
"""Map vera classes to HA types."""
# pylint: disable=too-many-return-statements
import pyvera as veraApi
if isinstance(vera_device, veraApi.VeraDimmer):
return 'light'
if isinstance(vera_device, veraApi.VeraBinarySensor):
return 'binary_sensor'
if isinstance(vera_device, veraApi.VeraSensor):
return 'sensor'
if isinstance(vera_device, veraApi.VeraArmableDevice):
return 'switch'
if isinstance(vera_device, veraApi.VeraLock):
return 'lock'
if isinstance(vera_device, veraApi.VeraSwitch):
if vera_device.device_id in remap:
return 'light'
else:
return 'switch'
# VeraCurtain: NOT SUPPORTED YET
return None
class VeraDevice(Entity):
"""Representation of a Vera devicetity."""
def __init__(self, vera_device, controller):
"""Initialize the device."""
self.vera_device = vera_device
self.controller = controller
self._name = self.vera_device.name
self.controller.register(vera_device, self._update_callback)
self.update()
def _update_callback(self, _device):
self.update_ha_state(True)
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
attr = {}
if self.vera_device.has_battery:
attr[ATTR_BATTERY_LEVEL] = self.vera_device.battery_level + '%'
if self.vera_device.is_armable:
armed = self.vera_device.is_armed
attr[ATTR_ARMED] = 'True' if armed else 'False'
if self.vera_device.is_trippable:
last_tripped = self.vera_device.last_trip
if last_tripped is not None:
utc_time = utc_from_timestamp(int(last_tripped))
attr[ATTR_LAST_TRIP_TIME] = utc_time.isoformat()
else:
attr[ATTR_LAST_TRIP_TIME] = None
tripped = self.vera_device.is_tripped
attr[ATTR_TRIPPED] = 'True' if tripped else 'False'
power = self.vera_device.power
if power:
attr[ATTR_CURRENT_POWER_MWH] = convert(power, float, 0.0) * 1000
attr['Vera Device Id'] = self.vera_device.vera_device_id
return attr
|
Python
| 0.014603
|
@@ -199,16 +199,42 @@
ltdict%0A%0A
+import voluptuous as vol%0A%0A
from req
@@ -275,17 +275,16 @@
eption%0A%0A
-%0A
from hom
@@ -411,16 +411,74 @@
scovery%0A
+from homeassistant.helpers import config_validation as cv%0A
from hom
@@ -768,16 +768,56 @@
= None%0A%0A
+CONF_CONTROLLER = 'vera_controller_url'%0A
CONF_EXC
@@ -937,16 +937,330 @@
(list)%0A%0A
+VERA_ID_LIST_SCHEMA = vol.Schema(%5Bint%5D)%0A%0ACONFIG_SCHEMA = vol.Schema(%7B%0A DOMAIN: vol.Schema(%7B%0A vol.Required(CONF_CONTROLLER): cv.url,%0A vol.Optional(CONF_EXCLUDE, default=%5B%5D): VERA_ID_LIST_SCHEMA,%0A vol.Optional(CONF_LIGHTS, default=%5B%5D): VERA_ID_LIST_SCHEMA,%0A %7D),%0A%7D, extra=vol.ALLOW_EXTRA)%0A%0A
%0A# pylin
@@ -1528,182 +1528,8 @@
l')%0A
- if not base_url:%0A _LOGGER.error(%0A %22The required parameter 'vera_controller_url'%22%0A %22 was not found in config%22%0A )%0A return False%0A%0A
@@ -2144,307 +2144,51 @@
LUDE
-, %5B%5D)%0A if not isinstance(exclude, list):%0A _LOGGER.error(%22'exclude' must be a list of device_ids%22)%0A return False%0A%0A lights_ids = config.get(CONF_LIGHTS, %5B%5D)%0A if not isinstance(lights_ids, list):%0A _LOGGER.error(%22'lights' must be a list of device_ids%22)%0A return False
+)%0A%0A lights_ids = config.get(CONF_LIGHTS)
%0A%0A
|
b221ed2e83cd352b1eec0ad74a3e02946db39197
|
Add an example of yielding a dict in plpy
|
examples/spouse_example/plpy_extractor/udf/ext_people.py
|
examples/spouse_example/plpy_extractor/udf/ext_people.py
|
#! /usr/bin/env python
import ddext
import itertools
# Format of plpy_extractor:
# Anything Write functions "init", "run" will not be accepted.
# In "init", import libraries, specify input variables and return types
# In "run", write your extractor. Return a list containing your results, each item in the list should be a list/tuple of your return types.
# Do not print.
def init():
# SD['json'] = __import__('json')
ddext.import_lib('itertools')
# Input commands MUST HAVE CORRECT ORDER
ddext.input('sentence_id', 'bigint')
ddext.input('words', 'text[]')
ddext.input('ner_tags', 'text[]')
# Returns commands MUST HAVE CORRECT ORDER
ddext.returns('sentence_id', 'bigint')
ddext.returns('start_position', 'int')
# ddext.returns('start_index', 'int')
ddext.returns('length', 'int')
ddext.returns('text', 'text')
def run(sentence_id, words, ner_tags):
# Find phrases that are tagged with PERSON
phrases_indicies = []
start_index = 0
ner_list = list(enumerate(ner_tags))
while True:
sublist = ner_list[start_index:]
next_phrase = list(itertools.takewhile(lambda x: (x[1] in ["PERSON"]), sublist))
if next_phrase:
phrases_indicies.append([x[0] for x in next_phrase])
start_index = next_phrase[-1][0] + 1
elif start_index == len(ner_list)+1: break
else: start_index = start_index + 1
# You can yield a tuple to database
for phrase in phrases_indicies:
yield (sentence_id,
phrase[0],
len(phrase),
" ".join(words[phrase[0]:phrase[-1]+1]))
# # Or you can return a list of tuples
# return [(sentence_id, phrase[0], len(phrase),
# " ".join(words[phrase[0]:phrase[-1]+1])) for phrase in phrases_indicies]
|
Python
| 0.000875
|
@@ -1379,16 +1379,26 @@
a tuple
+or a dict
to datab
@@ -1401,16 +1401,16 @@
atabase%0A
-
for ph
@@ -1438,16 +1438,18 @@
ies:%0A
+ #
yield (
@@ -1461,24 +1461,26 @@
nce_id, %0A
+ #
phrase%5B
@@ -1483,24 +1483,26 @@
ase%5B0%5D, %0A
+ #
len(phr
@@ -1511,16 +1511,18 @@
e), %0A
+ #
%22 %22
@@ -1558,16 +1558,200 @@
-1%5D+1%5D))
+%0A yield %7B%0A 'sentence_id': sentence_id, %0A 'start_position': phrase%5B0%5D, %0A 'text': %22 %22.join(words%5Bphrase%5B0%5D:phrase%5B-1%5D+1%5D),%0A 'length': len(phrase)%0A %7D
%0A%0A # #
|
b9c93a583b8ebb28c6875513191438bf67595415
|
Fix timezone stuff
|
reports/models.py
|
reports/models.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import re
from bs4 import BeautifulSoup
from django.contrib.auth.models import User
from django.db import models
import pytz
class Report(models.Model):
report_dt = models.DateTimeField(null=True, blank=True)
body = models.TextField()
processed = models.BooleanField(default=False)
email_id = models.CharField(max_length=100, null=True, blank=True)
created_dt = models.DateTimeField(auto_now_add=True)
@property
def fixed_body(self):
# their class names are all preceeded with some random garbage, clean that up
return re.sub(r'(<div class=")(.*)(rss_)(.+?")', r'\1\3\4', self.body)
def create_incidents(self):
soup = BeautifulSoup(self.fixed_body, 'html.parser')
for incident_html in soup.find_all('div', class_='rss_item'):
title = incident_html.find(class_='rss_title').a.text.replace('–', '-')
location, incident_dt, incident_date = None, None, None
body = incident_html.find(class_='rss_description').text
match = re.match(r'^(\d+)\/(\d+)\/(\d+)[\s,]+(\d{1,2}):?(\d{2})', body)
if match:
month, day, year, hour, minute = [int(m) for m in match.groups()]
if year < 2000:
year += 2000
body = '\r\n'.join(body.split('\r\n')[1:])
incident_dt = datetime.datetime(year, month, day, hour, minute)
incident_date = datetime.date(year, month, day)
pytz.timezone('America/Los_Angeles').localize(incident_dt)
pytz.timezone('America/Los_Angeles').localize(incident_date)
if ' - ' in title:
title_split = title.split(' - ')
title = title_split[0]
location = title_split[1]
incident = Incident.objects.create(
title=title,
body=body,
location=location,
report=self,
incident_dt=incident_dt,
incident_date=incident_date,
)
print 'Created incident: {}'.format(incident)
self.processed = True
self.save()
def __unicode__(self):
return '{} - {} incident(s)'.format(self.report_dt, self.incident_set.all().count())
class Station(models.Model):
name = models.CharField(max_length=100)
class Incident(models.Model):
incident_dt = models.DateTimeField(null=True, blank=True)
incident_date = models.DateField(null=True, blank=True)
report = models.ForeignKey(Report)
station = models.ForeignKey(Station, null=True, blank=True)
location = models.CharField(max_length=255, null=True, blank=True)
case = models.CharField(max_length=50, null=True, blank=True)
title = models.CharField(max_length=255)
body = models.CharField(max_length=5000)
arrested = models.BooleanField(default=False)
@property
def icon(self):
lower_title = self.title.lower()
if 'auto' in lower_title:
return 'car'
if 'bicycle' in lower_title or 'bike' in lower_title:
return 'bicycle'
if 'warrant' in lower_title:
return 'person'
if 'theft' in lower_title or 'robbery' in lower_title:
return 'money'
if 'person' in lower_title:
return 'person'
if 'violation' in lower_title or 'obstruct' in lower_title:
return 'ban'
if 'weapon' in lower_title:
return 'cutlery'
def __unicode__(self):
return self.title
class Comment(models.Model):
user = models.ForeignKey(User, null=True, blank=True)
created_dt = models.DateTimeField(auto_now_add=True)
incident = models.ForeignKey(Incident)
text = models.TextField()
|
Python
| 0.000262
|
@@ -1566,82 +1566,21 @@
-pytz.timezone('America/Los_Angeles').localize(incident_dt)%0A
+incident_dt =
pyt
@@ -1632,19 +1632,17 @@
cident_d
-ate
+t
)%0A%0A
|
3010b38a15ca90f51a72e0cf3698ca218aaa144f
|
Remove an execution warning.
|
requests_graph.py
|
requests_graph.py
|
#!/usr/bin/env python3
import sys
import time
import array
import datetime
import requests
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
NB_HOURS = 24
GRANULOMETRY = 15 # must be a divisor of 60
if len(sys.argv) != 2:
print('Syntax: %s file.png' % sys.argv[0])
exit(1)
# Get data
data = requests.get('http://gunicorn9005.ppp.pony.ovh/', params={'limit': 10000}).json()
# Convert to datetime
data = [datetime.datetime(*time.strptime(x[1].split('.')[0], "%Y-%m-%d %H:%M:%S")[:6]) for x in data]
# Compute the difference
now = datetime.datetime.now()
data = [now - x for x in data]
max_ = datetime.timedelta(hours=NB_HOURS)
# Shrink and convert to minutes
data = [x.seconds//(60*GRANULOMETRY) for x in data if x <= max_]
# Compute the height of the bars
requests_per_minute = array.array('I', (0 for x in range(0, 60*NB_HOURS//GRANULOMETRY)))
for x in data:
requests_per_minute[(60*NB_HOURS//GRANULOMETRY) - x - 1] += 1
# Final plot
x = range(0, 60*NB_HOURS//GRANULOMETRY)
plt.plot(x, requests_per_minute, label=None)
plt.title("Requests to the PPP")
plt.xlabel("Time (%s minutes)" % str(GRANULOMETRY))
plt.ylabel("Requests")
plt.legend()
plt.savefig(sys.argv[1])
|
Python
| 0.000005
|
@@ -1164,16 +1164,17 @@
uests%22)%0A
+#
plt.lege
|
4daba5e5f5377fd6e068b4b7059b1bd78819aa53
|
fix ZeroDivisionError
|
statistics.py
|
statistics.py
|
#
# A Class that couples samples from a simulation
# with some statistics functions
#
# Kevin Greenan (kmgreen@cs.ucsc.edu)
#
# Improved by Min Fu (fumin@hust.edu.cn)
from mpmath import *
import random
#
# A Class that incapsulates a set of samples with
# operations over those samples (i.e. statistics)
#
class Samples:
#
# Construct new instance with a list of samples
#
# @param samples: a set of samples, most observed in simulation
#
def __init__(self):
self.byte_sum = mpf(0)
self.prob_sum = mpf(0)
self.num_samples = 0
#
# A static table used to estimate the confidence
# interval around a sample mean
#
self.conf_lvl_lku = {}
self.conf_lvl_lku["0.80"] = mpf(1.281)
self.conf_lvl_lku["0.85"] = mpf(1.440)
self.conf_lvl_lku["0.90"] = mpf(1.645)
self.conf_lvl_lku["0.95"] = mpf(1.960)
self.conf_lvl_lku["0.995"] = mpf(2.801)
self.byte_mean = None
self.byte_dev = None
self.byte_ci = None
self.byte_re = None
# used to calculate the probability of data loss
self.prob_mean = None
self.prob_dev = None
self.prob_ci = None
self.prob_re = None
#
# Calculate the sample mean based on the samples for this instance
#
def calcMean(self):
self.byte_mean = self.byte_sum / self.num_samples
self.prob_mean = self.prob_sum / self.num_samples
#
# Calculate the standard deviation based on the samples for this instance
#
def calcStdDev(self, samples):
if self.byte_dev == None:
self.calcMean()
sum_1 = (self.num_samples - len(samples)) * abs(power(0 - self.byte_mean, 2))
for sample in samples:
sum_1 += abs(power(sample - self.byte_mean, 2))
sum_2 = (self.num_samples - len(samples)) * abs(power(0 - self.prob_mean, 2)) + len(samples) * abs(power(1 - self.prob_mean, 2))
self.byte_dev = sqrt((mpf(1)/(self.num_samples-1)) * sum_1)
self.prob_dev = sqrt((mpf(1)/(self.num_samples-1)) * sum_2)
#
# Calculate the confidence interval around the sample mean
#
# @param conf_level: the probability that the mean falls within the interval
#
def calcConfInterval(self, conf_level, samples):
if conf_level not in self.conf_lvl_lku.keys():
print "%s not a valid confidence level!" % conf_level
return None
self.calcStdDev(samples)
self.byte_ci = abs(self.conf_lvl_lku[conf_level] * (self.byte_dev / sqrt(self.num_samples)))
self.prob_ci = abs(self.conf_lvl_lku[conf_level] * (self.prob_dev / sqrt(self.num_samples)))
#
# Calculate the relative error
#
# self.conf_lvl_lku[conf_level] * sqrt(Var)/sqrt(num_samples) / mean
#
def calcRE(self, conf_level, samples):
if self.byte_mean == 0:
return None
self.calcConfInterval(conf_level, samples)
self.byte_re = self.byte_ci / self.byte_mean
self.prob_re = self.prob_ci / self.prob_mean
# zeros have been eliminated
def calcResults(self, conf_level, samples, sample_num):
self.num_samples = sample_num
self.prob_sum += len(samples)
for sample in samples:
self.byte_sum += sample
self.calcRE(conf_level, samples)
#
# Generate samples from a known distribution and verify the statistics
#
def test():
num_samples = 1000
samples = []
mean = 0.5
std_dev = 0.001
for i in range(num_samples):
samples.append(random.gauss(mean, std_dev))
s = Samples(1000)
s.calcResults("0.9", samples)
print "Mean: %s (%s): " % (s.calcMean(), mean)
print "Std Dev: %s (%s): " % (s.calcStdDev(), std_dev)
print "Conf. Interval: (%s, %s)" % s.calcConfInterval("0.995")
(a,b,c,d) = s.getResults()
print "Mean: %s (%s): " % (a, mean)
print "Conf. Interval: (%s, %s)" % (b,c)
print "Relative Error: (%s)" % d
if __name__ == "__main__":
test()
|
Python
| 0.000411
|
@@ -2926,32 +2926,93 @@
evel, samples):%0A
+ %0A self.calcConfInterval(conf_level, samples)%0A%0A
if self.
@@ -3043,31 +3043,31 @@
-return None%0A
+self.byte_re = 0%0A
-%0A
@@ -3077,47 +3077,38 @@
elf.
-calcConfInterval(conf_level, samples)%0A%0A
+prob_re = 0%0A else:%0A
@@ -3152,24 +3152,28 @@
f.byte_mean%0A
+
self
|
a240cbaa13be8682e5611241634a761df581efff
|
fix format
|
sw-project.py
|
sw-project.py
|
# Import the SDK
import facebook
# import the secret token
import secret
# For date and time operations
from datetime import datetime, date, time
# open connection
g = facebook.GraphAPI(secret.ACCESS_TOKEN)
# retrieve friends
friends = g.get_connections("me", "friends")['data']
# retrieve their likes
likes = { friend['name'] : g.get_connections(friend['id'], "likes")['data']
for friend in friends }
statuses = { friend['name'] : g.get_connections(friend['id'], "statuses")['data']
for friend in friends }
# take a look at a 'created_time' value of a random like, cf http://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior
fb_date_format = "%Y-%m-%dT%H:%M:%S+0000"
likes_times = []
for friend in likes:
for like in likes[friend]:
likes_times.append(datetime.strptime(like['created_time'], fb_date_format))
statuses_times = []
for friend in statuses:
for status in statuses[friend]:
statuses_times.append(datetime.strptime(status['updated_time'], fb_date_format))
# Retrieve the number of Statuses per day for the last 7 days
now = datetime.now()
# How many...
seconds_per_week = 7 * 24 * 60 * 60
statuses_last_week = [0, 0, 0, 0, 0, 0, 0]
for status_time in statuses_times:
if (now - status_time).total_seconds() < seconds_per_week:
statuses_last_week[status_time.weekday()] += 1
likes_last_week = [0, 0, 0, 0, 0, 0, 0]
for like_time in likes_times:
if (now - like_time).total_seconds() < seconds_per_week:
likes_last_week[like_time.weekday()] += 1
|
Python
| 0.00006
|
@@ -303,25 +303,24 @@
es%0Alikes = %7B
-
friend%5B'name
@@ -313,33 +313,32 @@
%7Bfriend%5B'name'%5D
-
: g.get_connecti
@@ -364,32 +364,33 @@
likes%22)%5B'data'%5D%0A
+
for frie
@@ -402,17 +402,16 @@
friends
-
%7D%0A%0Astatu
@@ -417,17 +417,16 @@
uses = %7B
-
friend%5B'
@@ -431,17 +431,16 @@
%5B'name'%5D
-
: g.get_
@@ -485,32 +485,36 @@
'data'%5D%0A
+
for friend in fr
@@ -518,17 +518,16 @@
friends
-
%7D%0A# take
@@ -741,16 +741,18 @@
likes:%0A
+
for li
@@ -768,24 +768,28 @@
es%5Bfriend%5D:%0A
+
likes_ti
|
bdad53692d9728ff8f7a33a2549f680940834292
|
Add helpful comment
|
robot_algorithm.py
|
robot_algorithm.py
|
class RobotRuntimeException(BaseException):
"""
Generic runtime exception for game interpreter
"""
class RobotAlgorithm:
def __init__(self, robot, raw_program):
self.robot = robot
commands = self.__to_commands_list(raw_program)
stack = self.__to_stack(commands)
self.program = self.__translate(stack)
self.current_step = 0
def run_next_command(self):
if self.current_step >= len(self.program):
self.current_step = 0
statement = self.program[self.current_step]
cmd = statement[0]
arg = statement[1] if len(statement) > 1 else ''
skip = statement[2] if len(statement) > 2 else 0
while cmd == 'if':
self.current_step += 1
passed = self.__evaluate(arg)
if passed == False:
self.current_step += skip
if self.current_step >= len(self.program):
cmd = None
break
statement = self.program[self.current_step]
cmd = statement[0]
arg = statement[1] if len(statement) > 1 else ''
skip = statement[2] if len(statement) > 2 else 0
if cmd == 'rl':
self.robot.rotate(-90)
elif cmd == 'rr':
self.robot.rotate(90)
elif cmd == 'fd':
self.robot.move_forward()
elif cmd =='sh':
self.robot.shoot(15)
else:
raise RobotRuntimeException('Unknown command - {}'.format(cmd))
self.current_step += 1
def __evaluate(self, condition):
value = not condition.startswith('!')
condition = condition.strip('!')
if condition == 'el':
return self.robot.enemy_left() == value
if condition == 'er':
return self.robot.enemy_right() == value
if condition == 'ef':
return self.robot.enemy_front() == value
if condition == 'fl':
return self.robot.friend_left() == value
if condition == 'fr':
return self.robot.friend_right() == value
if condition == 'ff':
return self.robot.friend_front() == value
if condition == 'wl':
return self.robot.wall_left() == value
if condition == 'wr':
return self.robot.wall_right() == value
if condition == 'wf':
return self.robot.wall_front() == value
raise RobotRuntimeException('Unknown condition - {}'.format(condition))
def __translate(self, block, result=None, entry_condition=None):
if not result:
result = []
while len(block) > 0:
statement = filter(None, block.pop().split(' '))
func = statement[0]
arg = statement[1] if len(statement) > 1 else ''
if func == 'end':
return result
elif func == 'if':
skip = self.__get_if_block_length(block)
result.append([func, arg, skip])
self.__translate(block, result, arg)
elif func == 'else':
skip = self.__get_if_block_length(block)
result.append(['if', '!{}'.format(entry_condition), skip])
self.__translate(block, result)
return result
else:
try:
count = int(arg) if arg <> '' else 1
except ValueError:
raise RobotRuntimeException('Invalid argument - {}. Expected argument of type int.'.format(arg))
for _ in range(count):
result.append([func.strip()])
return result
def __get_if_block_length(self, block):
section = list(reversed(block))
ifs = 0
skip = 0
ends = 0
repeats = 0
for skip in range(0, len(section) - 1):
inner = filter(None, section[skip].split(' '))
inner_cmd = inner[0]
if inner_cmd == 'if':
ifs += 1
elif inner_cmd == 'end':
if ifs == 0:
break
ends += 1
ifs -= 1
elif inner_cmd == 'else':
if ifs == 0:
break
ifs -= 1
elif len(inner) > 1:
inner_arg = inner[1]
try:
repeats += (int(inner_arg) - 1)
except ValueError:
raise RobotRuntimeException('Invalid argument - {}. Expected argument of type int.'.format(inner_arg))
skip += 1
return skip - ends + repeats
def __to_stack(self, collection):
return list(reversed(collection))
def __to_commands_list(self, raw_program):
return list(filter(None, map(lambda x: x.strip().lower(), raw_program.split('\n'))))
|
Python
| 0
|
@@ -5084,24 +5084,78 @@
collection))
+ # in Python, pop() removes the last entry of the list
%0A %0A
|
e99239184cffbdc1ca08ba0050f6e4f23e1155fd
|
Allow import error to propagate up
|
romanesco/spark.py
|
romanesco/spark.py
|
import six
import romanesco
import os
import sys
from ConfigParser import ConfigParser, NoOptionError
def setup_spark_env():
# Setup pyspark
try:
spark_home = romanesco.config.get('spark', 'spark_home')
# If not configured try the environment
if not spark_home:
spark_home = os.environ.get('SPARK_HOME')
if not spark_home:
raise Exception('spark_home must be set or SPARK_HOME must be set in '
'the environment')
# Need to set SPARK_HOME
os.environ['SPARK_HOME'] = spark_home
if not os.path.exists(spark_home):
raise Exception('spark_home is not a valid directory')
except NoOptionError:
raise Exception('spark_home must be configured')
sys.path.append(os.path.join(spark_home, 'python'))
sys.path.append(os.path.join(spark_home, 'bin'))
# Check that we can import SparkContext
try:
from pyspark import SparkConf, SparkContext
except Exception as ex:
six.raise_from(Exception('Unable to create SparkContext, check Spark '
'installation'), ex)
def create_spark_context(task_spark_conf):
from pyspark import SparkConf, SparkContext
# Set can spark configuration parameter user has specified
spark_conf = SparkConf()
for (name, value) in romanesco.config.items('spark'):
spark_conf.set(name, value)
# Override with any task specific configuration
for (name, value) in task_spark_conf.items():
spark_conf.set(name, value)
# Build up the context, using the master URL
sc = SparkContext(conf=spark_conf)
return sc
|
Python
| 0
|
@@ -932,29 +932,16 @@
Context%0A
- try:%0A
from
@@ -984,169 +984,8 @@
ext%0A
- except Exception as ex:%0A six.raise_from(Exception('Unable to create SparkContext, check Spark '%0A 'installation'), ex)%0A
%0A%0Ade
|
7a81c289d944bad4505a51c80b701f5f11159787
|
stop bandwagon leaving temp files around
|
apps/bandwagon/tests/test_tasks.py
|
apps/bandwagon/tests/test_tasks.py
|
import os
import shutil
import tempfile
from django.conf import settings
from nose.tools import eq_
from PIL import Image
from amo.tests.test_helpers import get_image_path
from bandwagon.tasks import resize_icon
def test_resize_icon():
somepic = get_image_path('mozilla.png')
src = tempfile.NamedTemporaryFile(mode='r+w+b', suffix=".png",
delete=False)
dest = tempfile.NamedTemporaryFile(mode='r+w+b', suffix=".png")
# resize_icon removes the original
shutil.copyfile(somepic, src.name)
src_image = Image.open(src.name)
eq_(src_image.size, (82, 31))
resize_icon(src.name, dest.name)
dest_image = Image.open(dest.name)
eq_(dest_image.size, (32, 12))
assert not os.path.exists(src.name)
def test_resize_icon_poorly():
"""If we attempt to set the src/dst, we do nothing."""
somepic = get_image_path('mozilla.png')
src = tempfile.NamedTemporaryFile(mode='r+w+b', suffix=".png",
delete=False)
shutil.copyfile(somepic, src.name)
src_image = Image.open(src.name)
eq_(src_image.size, (82, 31))
resize_icon(src.name, src.name)
# assert nothing happenned
src_image = Image.open(src.name)
eq_(src_image.size, (82, 31))
|
Python
| 0
|
@@ -392,24 +392,47 @@
delete=False
+, dir=settings.TMP_PATH
)%0A dest =
@@ -487,16 +487,78 @@
x=%22.png%22
+,%0A dir=settings.TMP_PATH
)%0A%0A #
@@ -1109,16 +1109,39 @@
te=False
+, dir=settings.TMP_PATH
)%0A sh
|
4e7bc1dc4cc571f09667a9b29ceff8b5acdfbb13
|
Drop supplementary variables from formula
|
apps/metricsmanager/serializers.py
|
apps/metricsmanager/serializers.py
|
from rest_framework import serializers
from .models import *
from .formula import validate_formula
from .formula import ComputeSemantics
from drf_compound_fields import fields as compound_fields
class MetricSerializer(serializers.ModelSerializer):
formula = serializers.CharField()
creator_path = serializers.Field(source='creator_path')
def validate(self, attrs):
"""
Check formula and that provided mappings cover all variables and filter
supplementary mappings.
"""
validate_formula(attrs['formula'], attrs['variables'])
return attrs
class Meta:
model = Metric
class OperationalizeMappingSerializer(serializers.Serializer):
variable = serializers.RegexField("__[0-9]+__")
dataset = serializers.IntegerField(min_value=0)
def restore_object(self, validated_data, instance=None):
return (validated_data['variable'], validated_data['dataset'])
class OperationalizeSerializer(serializers.Serializer):
title = serializers.CharField(max_length=100)
acronym = serializers.CharField(max_length=20)
datasets = compound_fields.ListField(OperationalizeMappingSerializer())
def restore_object(self, validated_data, instance=None):
return {
"title": validated_data["title"],
"acronym": validated_data["acronym"],
"datasets": dict(validated_data['datasets'])
}
class NormalizerSerializer(serializers.Serializer):
name = serializers.CharField(max_length=100)
acronym = serializers.CharField(max_length=20)
description = serializers.CharField(max_length=500)
arguments = serializers.Field(source="get_arguments")
|
Python
| 0
|
@@ -516,16 +516,33 @@
%0A
+ variables_used =
validat
@@ -584,24 +584,284 @@
variables'%5D)
+%0A%0A # Accept if too many vars are provided and filter them here%0A attrs%5B'variables'%5D = %7B var_name: value for var_name, value%0A in attrs%5B'variables'%5D.items()%0A if var_name in variables_used %7D
%0A ret
|
79bd9ff614f48d82e05625f61c9b5960b9459ea0
|
Fix indent error
|
salt/states/git.py
|
salt/states/git.py
|
'''
Interaction with Git repositories.
==================================
NOTE: This modules is under heavy development and the API is subject to change.
It may be replaced with a generic VCS module if this proves viable.
Important, before using git over ssh, make sure your remote host fingerprint
exists in "~/.ssh/known_hosts" file.
.. code-block:: yaml
https://github.com/saltstack/salt.git:
git.latest:
- rev: develop
- target: /tmp/salt
'''
import logging
import os
import shutil
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if git is available
'''
return 'git' if __salt__['cmd.has_exec']('git') else False
def latest(name,
rev=None,
target=None,
runas=None,
force=None,
submodules=False,
):
'''
Make sure the repository is cloned to the given directory and is up to date
name
Address of the remote repository as passed to "git clone"
rev
The remote branch, tag, or revision ID to checkout after
clone / before update
target
Name of the target directory where repository is about to be cloned
runas
Name of the user performing repository management operations
force
Force git to clone into pre-existing directories (deletes contents)
submodules
Update submodules on clone or branch change (Default: False)
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
if not target:
return _fail(ret, '"target" option is required')
if os.path.isdir(target) and os.path.isdir('{0}/.git'.format(target)):
# git pull is probably required
log.debug(
'target {0} is found, "git pull" is probably required'.format(
target)
)
try:
current_rev = __salt__['git.revision'](target, user=runas)
#only do something, if the specified rev differs from the
#current_rev
if rev == current_rev:
new_rev = current_rev
else:
if __opts__['test']:
return _neutral_test(
ret,
('Repository {0} update is probably required (current '
'revision is {1})').format(target, current_rev))
# check if rev is already present in repo and git-fetch otherwise
if rev:
cmd = "git rev-parse "+rev
retcode = __salt__['cmd.retcode'](cmd, cwd=target, runas=runas)
if 0 != retcode: --dif
__salt__['git.fetch'](target, user=runas)
__salt__['git.checkout'](target, rev, user=runas)
# check if we are on a branch to merge changes
cmd = "git symbolic-ref -q HEAD > /dev/null"
retcode = __salt__['cmd.retcode'](cmd, cwd=target, runas=runas)
if 0 == retcode:
__salt__['git.pull'](target, user=runas)
if submodules:
__salt__['git.submodule'](target, user=runas,
opts='--recursive')
new_rev = __salt__['git.revision'](cwd=target, user=runas)
except Exception as exc:
return _fail(
ret,
str(exc))
if current_rev != new_rev:
log.info('Repository {0} updated: {1} => {2}'.format(target,
current_rev,
new_rev))
ret['comment'] = 'Repository {0} updated'.format(target)
ret['changes']['revision'] = '{0} => {1}'.format(
current_rev, new_rev)
else:
if os.path.isdir(target):
# git clone is required, target exists but force is turned on
if force:
log.debug(
'target {0} found, but not a git repository. Since force option'
' is in use, deleting.'.format(target))
shutil.rmtree(target)
# git clone is required, but target exists and is non-empty
elif os.listdir(target):
return _fail(ret, 'Directory exists, is non-empty, and force '
'option not in use')
# git clone is required
log.debug(
'target {0} is not found, "git clone" is required'.format(
target))
if __opts__['test']:
return _neutral_test(
ret,
'Repository {0} is about to be cloned to {1}'.format(
name, target))
try:
# make the clone
__salt__['git.clone'](target, name, user=runas)
if rev:
__salt__['git.checkout'](target, rev, user=runas)
if submodules:
__salt__['git.submodule'](target, user=runas, opts='--recursive')
new_rev = __salt__['git.revision'](cwd=target, user=runas)
except Exception as exc:
return _fail(
ret,
str(exc))
message = 'Repository {0} cloned to {1}'.format(name, target)
log.info(message)
ret['comment'] = message
ret['changes']['new'] = name
ret['changes']['revision'] = new_rev
return ret
def present(name, bare=True, runas=None, force=False):
'''
Make sure the repository is present in the given directory
name
Name of the directory where the repository is about to be created
bare
Create a bare repository (Default: True)
runas
Name of the user performing repository management operations
force
Force create a new repository into an pre-existing non-git directory (deletes contents)
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
# If the named directory is a git repo return True
if os.path.isdir(name):
if bare and os.path.isfile('{0}/HEAD'.format(name)):
return ret
elif not bare and os.path.isdir('{0}/.git'.format(name)):
return ret
# Directory exists and is not a git repo, if force is set destroy the
# directory and recreate, otherwise throw an error
elif not force and os.listdir(name):
return _fail(ret,
'Directory which does not contain a git repo '
'is already present at {0}. To delete this '
'directory and create a fresh git repo set '
'force: True'.format(name))
# Run test is set
if __opts__['test']:
ret['changes']['new repository'] = name
return _neutral_test(ret, 'New git repo set for creation at {0}'.format(name))
if force and os.path.isdir(name):
shutil.rmtree(name)
opts = '--bare' if bare else ''
__salt__['git.init'](cwd=name, user=runas, opts=opts)
message = 'Initialized repository {0}'.format(name)
log.info(message)
ret['changes']['new repository'] = name
ret['comment'] = message
return ret
def _fail(ret, comment):
ret['result'] = False
ret['comment'] = comment
return ret
def _neutral_test(ret, comment):
ret['result'] = None
ret['comment'] = comment
return ret
|
Python
| 0.000033
|
@@ -2659,14 +2659,8 @@
ode:
- --dif
%0A
|
f98926efc99e4a0f5bd772028375a8c84937a8d8
|
Add versioning suppor tto the pkg state
|
salt/states/pkg.py
|
salt/states/pkg.py
|
'''
Package Management
==================
Salt can manage software packages via the pkg state module, packages can be
set up to be installed, latest, removed and purged. Package management
declarations are typically rather simple:
.. code-block:: yaml
vim:
pkg:
- installed
'''
import logging
import os
from distutils.version import LooseVersion
logger = logging.getLogger(__name__)
def installed(name, refresh=False, repo='', skip_verify=False):
'''
Verify that the package is installed, and only that it is installed. This
state will not upgrade an existing package and only verify that it is
installed
name
The name of the package to install
repo
Specify a non-default repository to install from
skip_verify : False
Skip the GPG verification check for the package to be installed
Usage::
httpd:
- pkg
- installed
- repo: mycustomrepo
- skip_verify: True
'''
rtag = __gen_rtag()
if __salt__['pkg.version'](name):
return {'name': name,
'changes': {},
'result': True,
'comment': 'Package {0} is already installed'.format(name)}
if refresh or os.path.isfile(rtag):
changes = __salt__['pkg.install'](name,
True,
repo=repo,
skip_verify=skip_verify)
if os.path.isfile(rtag):
os.remove(rtag)
else:
changes = __salt__['pkg.install'](name,
repo=repo,
skip_verify=skip_verify)
if not changes:
return {'name': name,
'changes': changes,
'result': False,
'comment': 'Package {0} failed to install'.format(name)}
return {'name': name,
'changes': changes,
'result': True,
'comment': 'Package {0} installed'.format(name)}
def latest(name, refresh=False, repo='', skip_verify=False):
'''
Verify that the named package is installed and the latest available
package. If the package can be updated this state function will update
the package. Generally it is better for the installed function to be
used, as ``latest`` will update the package the package whenever a new
package is available
name
The name of the package to maintain at the latest available version
repo : (default)
Specify a non-default repository to install from
skip_verify : False
Skip the GPG verification check for the package to be installed
'''
rtag = __gen_rtag()
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
version = __salt__['pkg.version'](name)
avail = __salt__['pkg.available_version'](name)
if not version:
# Net yet installed
has_newer = True
elif not avail:
# Already at latest
has_newer = False
else:
try:
has_newer = LooseVersion(avail) > LooseVersion(version)
except AttributeError:
logger.debug("Error comparing versions for '%s' (%s > %s)",
name, avail, version)
ret['comment'] = "No version could be retrieved for '{0}'".format(name)
return ret
if has_newer:
if refresh or os.path.isfile(rtag):
ret['changes'] = __salt__['pkg.install'](name,
True,
repo=repo,
skip_verify=skip_verify)
if os.path.isfile(rtag):
os.remove(rtag)
else:
ret['changes'] = __salt__['pkg.install'](name,
repo=repo,
skip_verify=skip_verify)
if ret['changes']:
ret['comment'] = 'Package {0} upgraded to latest'.format(name)
ret['result'] = True
else:
ret['comment'] = 'Package {0} failed to install'.format(name)
ret['result'] = False
return ret
else:
ret['comment'] = 'Package {0} already at latest'.format(name)
ret['result'] = True
return ret
def removed(name):
'''
Verify that the package is removed, this will remove the package via
the remove function in the salt pkg module for the platform.
name
The name of the package to be removed
'''
changes = {}
if not __salt__['pkg.version'](name):
return {'name': name,
'changes': {},
'result': True,
'comment': 'Package {0} is not installed'.format(name)}
else:
changes['removed'] = __salt__['pkg.remove'](name)
if not changes:
return {'name': name,
'changes': changes,
'result': False,
'comment': 'Package {0} failed to remove'.format(name)}
return {'name': name,
'changes': changes,
'result': True,
'comment': 'Package {0} removed'.format(name)}
def purged(name):
'''
Verify that the package is purged, this will call the purge function in the
salt pkg module for the platform.
name
The name of the package to be purged
'''
changes = {}
if not __salt__['pkg.version'](name):
return {'name': name,
'changes': {},
'result': True,
'comment': 'Package {0} is not installed'.format(name)}
else:
changes['removed'] = __salt__['pkg.purge'](name)
if not changes:
return {'name': name,
'changes': changes,
'result': False,
'comment': 'Package {0} failed to purge'.format(name)}
return {'name': name,
'changes': changes,
'result': True,
'comment': 'Package {0} purged'.format(name)}
def mod_init(low):
'''
Refresh the package database here so that it only needs to happen once
'''
if low['fun'] == 'installed' or low['fun'] == 'latest':
rtag = __gen_rtag()
if not os.path.exists(rtag):
open(rtag, 'w+').write('')
return True
else:
return False
def __gen_rtag():
'''
Return the location of the refresh tag
'''
return os.path.join(__opts__['cachedir'], 'pkg_refresh')
|
Python
| 0
|
@@ -291,16 +291,37 @@
led%0A'''%0A
+# Import python ilbs%0A
import l
@@ -434,24 +434,38 @@
talled(name,
+ version=None,
refresh=Fal
@@ -1050,18 +1050,22 @@
g()%0A
-if
+cver =
__salt_
@@ -1078,33 +1078,364 @@
.version'%5D(name)
-:
+%0A if cver == version:%0A # The package is installed and is the correct version%0A return %7B'name': name,%0A 'changes': %7B%7D,%0A 'result': True,%0A 'comment': 'Package %7B0%7D is already installed and is the correct version'.format(name)%7D%0A elif cver:%0A # The package is installed
%0A return
@@ -1700,32 +1700,75 @@
True,%0A
+ version=version,%0A
@@ -1902,32 +1902,32 @@
rtag)%0A else:%0A
-
changes
@@ -1950,32 +1950,75 @@
install'%5D(name,%0A
+ version=version,%0A
|
c8847c21b724e4875e0cafde5bbf85409c351754
|
update docstrings for the pkg state
|
salt/states/pkg.py
|
salt/states/pkg.py
|
'''
State enforcing for packages
'''
def installed(name):
'''
Verify that the package is installed, return the packages changed in the
operation and a bool if the job was sucessfull
'''
if __salt__['pkg.version'](name):
return {'name': name,
'changes': {},
'result': True,
'comment': 'Package ' + name + ' is already installed'}
changes = __salt__['pkg.install'](name)
if not changes:
return {'name': name,
'changes': changes,
'result': False,
'comment': 'Package ' + name + ' failed to install'}
return {'name': name,
'changes': changes,
'result': True,
'comment': 'Package ' + name + ' installed'}
def latest(name):
'''
Verify that the latest package is installed
'''
changes = {}
version = __salt__['pkg.version'](name)
avail = __salt__['pkg.available_version'](name)
if avail > version:
changes = __salt__['pkg.install'](name, True)
if not changes:
return {'name': name,
'changes': changes,
'result': False,
'comment': 'Package ' + name + ' failed to install'}
return {'name': name,
'changes': changes,
'result': True,
'comment': 'Package ' + name + ' installed'}
def removed(name):
'''
Verify that the package is removed
'''
if not __salt__['pkg.version'](name):
return {'name': name,
'changes': {},
'result': True,
'comment': 'Package ' + name + ' is not installed'}
else:
changes = __salt__['pkg.remove'](name)
if not changes:
return {'name': name,
'changes': changes,
'result': False,
'comment': 'Package ' + name + ' failed to remove'}
return {'name': name,
'changes': changes,
'result': True,
'comment': 'Package ' + name + ' removed'}
def purged(name):
'''
Verify that the package is purged
'''
if not __salt__['pkg.version'](name):
return {'name': name,
'changes': {},
'result': True,
'comment': 'Package ' + name + ' is not installed'}
else:
changes = __salt__['pkg.purge'](name)
if not changes:
return {'name': name,
'changes': changes,
'result': False,
'comment': 'Package ' + name + ' failed to purge'}
return {'name': name,
'changes': changes,
'result': True,
'comment': 'Package ' + name + ' purged'}
|
Python
| 0
|
@@ -1,196 +1,552 @@
'''%0A
-State enforcing for packages%0A'''%0A%0Adef installed(name):%0A '''%0A Verify that the package is installed, return the packages changed in the%0A operation and a bool if the job was sucessfu
+Package Management%0A==================%0ASalt can manage software packages via the pkg state module, packages can be%0Aset up to be installed, latest, removed and purged. Package management%0Adeclarations are typically rather simple:%0A%0A.. code-block:: yaml%0A vim:%0A pkg:%0A - installed%0A'''%0A%0Adef installed(name):%0A '''%0A Verify that the package is installed, and only that it is installed. This%0A state will not upgrade an existing package and only verify that it is%0A installed%0A%0A name%0A ~~~~%0A The name of the package to insta
ll%0A
@@ -1181,35 +1181,398 @@
the
-latest package is installed
+named package is installed and the latest available%0A package. If the package can be updated this state function will update%0A the package. Generally it is better for the installed function to be%0A used, as %60%60latest%60%60 will update the package the package whenever a new%0A package is available%0A%0A name%0A ~~~~%0A The name of the package to maintain at the latest available version
%0A
@@ -2184,16 +2184,176 @@
removed
+, this will remove the package via%0A the remove function in the salt pkg module for the platform.%0A%0A name%0A ~~~~%0A The name of the package to be removed
%0A '''
@@ -3009,16 +3009,156 @@
s purged
+, this will call the purge function in the%0A salt pkg module for the platform.%0A%0A name%0A ~~~~%0A The name of the package to be purged
%0A '''
|
13225498b064dad6af99302cece847fcff3eeb6c
|
fix transfer_output_remaps
|
iceprod/server/plugins/condor.py
|
iceprod/server/plugins/condor.py
|
"""
The Condor plugin. Allows submission to
`HTCondor <http://research.cs.wisc.edu/htcondor/>`_.
Note: Condor was renamed to HTCondor in 2012.
"""
from __future__ import print_function
import os
import sys
import random
import math
import logging
import getpass
from datetime import datetime,timedelta
import subprocess
from functools import partial
from iceprod.core import dataclasses
from iceprod.core import functions
from iceprod.server import GlobalID
from iceprod.server import grid
logger = logging.getLogger('condor')
class condor(grid.grid):
### Plugin Overrides ###
# let the basic plugin be dumb and implement as little as possible
def generate_submit_file(self,task,cfg=None,passkey=None,
filelist=None):
"""Generate queueing system submit file for task in dir."""
args = self.get_submit_args(task,cfg=cfg,passkey=passkey)
# get requirements and batchopts
requirements = []
batch_opts = {}
for b in self.queue_cfg['batchopts']:
if b.lower() == 'requirements':
requirements.append(self.queue_cfg['batchopts'][b])
else:
batch_opts[b] = self.queue_cfg['batchopts'][b]
if cfg:
if (cfg['steering'] and 'batchsys' in cfg['steering'] and
cfg['steering']['batchsys']):
for b in cfg['steering']['batchsys']:
if b.lower().startswith(self.__class__.__name__):
# these settings apply to this batchsys
for bb in cfg['steering']['batchsys'][b]:
value = cfg['steering']['batchsys'][b][bb]
if bb.lower() == 'requirements':
requirements.append(value)
else:
batch_opts[bb] = value
if task['task_id'] != 'pilot':
if 'task' in cfg['options']:
t = cfg['options']['task']
if t in cfg['tasks']:
alltasks = [cfg['tasks'][t]]
else:
alltasks = []
try:
for tt in cfg['tasks']:
if t == tt['name']:
alltasks.append(tt)
except:
logger.warn('error finding specified task to run for %r',
task,exc_info=True)
else:
alltasks = cfg['tasks']
for t in alltasks:
for b in t['batchsys']:
if b.lower().startswith(self.__class__.__name__):
# these settings apply to this batchsys
for bb in t['batchsys'][b]:
value = t['batchsys'][b][bb]
if bb.lower() == 'requirements':
requirements.append(value)
else:
batch_opts[bb] = value
# write the submit file
submit_file = os.path.join(task['submit_dir'],'condor.submit')
with open(submit_file,'w') as f:
p = partial(print,sep='',file=f)
p('universe = vanilla')
p('executable = {}'.format(os.path.join(task['submit_dir'],'loader.sh')))
p('log = condor.log')
p('output = condor.out.$(Process)')
p('error = condor.err.$(Process)')
p('notification = never')
if filelist:
p('transfer_input_files = {}'.format(','.join(filelist)))
p('skip_filechecks = True')
p('should_transfer_files = always')
p('transfer_output_files = iceprod_log, iceprod_out, iceprod_err')
if 'num' in task:
p('transfer_output_remaps = iceprod_log iceprod_log_$(Process)'
' ; iceprod_out iceprod_out_$(Process)'
' ; iceprod_err iceprod_err_$(Process)')
p('arguments = ',' '.join(args))
if 'reqs' in task:
if 'cpu' in task['reqs']:
p('request_cpus = {}'.format(task['reqs']['cpu']))
if 'gpu' in task['reqs']:
p('request_gpus = {}'.format(task['reqs']['gpu']))
if 'memory' in task['reqs']:
p('request_memory = {}'.format(task['reqs']['memory']))
if 'disk' in task['reqs']:
p('request_disk = {}'.format(task['reqs']['disk']*1000))
for b in batch_opts:
p(b+'='+batch_opts[b])
if requirements:
p('requirements = ('+')&&('.join(requirements)+')')
if 'num' in task:
p('queue {}'.format(task['num']))
else:
p('queue')
def submit(self,task):
"""Submit task to queueing system."""
cmd = ['condor_submit','condor.submit']
out = subprocess.check_output(cmd,cwd=task['submit_dir'])
for line in out.split('\n'):
line = line.strip()
if 'cluster' in line:
task['grid_queue_id'] = line.split()[-1].strip('.')
def get_grid_status(self):
"""Get all tasks running on the queue system.
Returns {grid_queue_id:{status,submit_dir}}
"""
ret = {}
cmd = ['condor_q',getpass.getuser(),'-af:j','jobstatus','cmd']
out = subprocess.check_output(cmd)
for line in out.split('\n'):
if not line.strip():
continue
gid,status,cmd = line.split()
if status == '1':
status = 'queued'
elif status == '2':
status = 'processing'
elif status == '4':
status = 'completed'
elif status in ('3','5','6'):
status = 'error'
else:
status = 'unknown'
ret[gid] = {'status':status,'submit_dir':os.path.dirname(cmd)}
return ret
def remove(self,tasks):
"""Remove tasks from queueing system."""
if tasks:
subprocess.check_call(['condor_rm']+list(tasks))
|
Python
| 0.000003
|
@@ -4108,24 +4108,25 @@
ut_remaps =
+%22
iceprod_log
@@ -4124,17 +4124,17 @@
prod_log
-
+=
iceprod_
@@ -4161,35 +4161,33 @@
'
- ;
+;
iceprod_out icep
@@ -4181,17 +4181,17 @@
prod_out
-
+=
iceprod_
@@ -4230,11 +4230,9 @@
'
- ;
+;
icep
@@ -4238,17 +4238,17 @@
prod_err
-
+=
iceprod_
@@ -4253,32 +4253,33 @@
d_err_$(Process)
+%22
')%0D%0A
|
0926cc173151dd223e517cabd03e7f0b5bd6c7b2
|
Change DOI extraction in PubMed client
|
indra/databases/pubmed_client.py
|
indra/databases/pubmed_client.py
|
import urllib, urllib2
from functools32 import lru_cache
import xml.etree.ElementTree as ET
pubmed_search = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi'
pubmed_fetch = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi'
pmid_convert = 'http://www.ncbi.nlm.nih.gov/pmc/utils/idconv/v1.0/'
@lru_cache(maxsize=100)
def send_request(url, data):
try:
req = urllib2.Request(url, data)
res = urllib2.urlopen(req)
xml_str = res.read()
tree = ET.fromstring(xml_str)
except:
return None
return tree
def get_ids(search_term, retmax=1000, db='pubmed'):
params = {'db': db,
'term': search_term,
'sort': 'pub+date',
'retstart': 0,
'retmax': retmax}
tree = send_request(pubmed_search, urllib.urlencode(params))
if tree is None:
return []
count = int(tree.find('Count').text)
id_terms = tree.findall('IdList/Id')
if id_terms is None:
return []
ids = [idt.text for idt in id_terms]
if count != len(ids):
print 'Not all ids were retrieved, limited at %d.' % retmax
return ids
def get_abstract(pubmed_id):
if pubmed_id.upper().startswith('PMID'):
pubmed_id = pubmed_id[4:]
params = {'db': 'pubmed',
'retmode': 'xml',
'rettype': 'abstract',
'id': pubmed_id}
tree = send_request(pubmed_fetch, urllib.urlencode(params))
if tree is None:
return None
article = tree.find('PubmedArticle/MedlineCitation/Article')
if article is None:
return None
abstract = article.findall('Abstract/AbstractText')
if abstract is None:
return None
else:
abstract_text = ' '.join([' ' if abst.text is None
else abst.text for abst in abstract])
return abstract_text
def pmid_to_doi(pubmed_id):
if pubmed_id.upper().startswith('PMID'):
pubmed_id = pubmed_id[4:]
url = pmid_convert
data = {'ids': pubmed_id}
tree = send_request(url, urllib.urlencode(data))
if tree is None:
return None
record = tree.find('record')
if record is None:
return None
doi = record.attrib['doi']
return doi
|
Python
| 0
|
@@ -2243,15 +2243,19 @@
trib
-%5B
+.get(
'doi'
-%5D
+)
%0A
|
c95e54b558d9a910181715df291402c44e0d8d55
|
Specify only luci buckets instead of hardcoding trybot names
|
infra/bots/update_meta_config.py
|
infra/bots/update_meta_config.py
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Update meta/config of the specified Skia repo."""
import argparse
import json
import os
import subprocess
import sys
import urllib2
import git_utils
SKIA_REPO_TEMPLATE = 'https://skia.googlesource.com/%s.git'
CQ_INCLUDE_CHROMIUM_TRYBOTS = [
('luci.chromium.try', [
'android_optional_gpu_tests_rel',
'linux-blink-rel',
'linux_chromium_compile_dbg_ng',
'linux_chromium_dbg_ng',
'linux_chromium_rel_ng',
'linux_optional_gpu_tests_rel',
'mac10.10-blink-rel',
'mac10.11-blink-rel',
'mac10.12-blink-rel',
'mac10.13-blink-rel',
'mac10.13_retina-blink-rel',
'mac_chromium_compile_dbg_ng',
'mac_chromium_compile_rel_ng',
'mac_chromium_dbg_ng',
'mac_chromium_rel_ng',
'mac_optional_gpu_tests_rel',
'win10-blink-rel',
'win7-blink-rel',
'win_chromium_compile_dbg_ng',
'win_chromium_dbg_ng',
'win_optional_gpu_tests_rel',
]),
('master.tryserver.chromium.linux', [
'linux_chromium_compile_rel_ng',
]),
('master.tryserver.chromium.win', [
'win_chromium_compile_rel_ng',
'win7_chromium_rel_ng',
'win10_chromium_x64_rel_ng',
]),
('master.tryserver.chromium.android', [
'android_blink_rel',
'android_compile_dbg',
'android_compile_rel',
'android_n5x_swarming_dbg',
'android_n5x_swarming_rel',
])
]
def addChromiumTrybots(f):
for master, bots in CQ_INCLUDE_CHROMIUM_TRYBOTS:
f.write('[bucket "%s"]\n' % master)
for bot in bots:
f.write('\tbuilder = %s\n' % bot)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--repo_name")
parser.add_argument("--tasks_json")
args = parser.parse_args()
skia_repo = SKIA_REPO_TEMPLATE % args.repo_name
with git_utils.NewGitCheckout(repository=skia_repo):
# Fetch and checkout the meta/config branch.
subprocess.check_call(['git', 'fetch', skia_repo, 'refs/meta/config:cfg'])
subprocess.check_call(['git', 'checkout', 'cfg'])
# Create list of tryjobs from tasks_json.
tryjobs = []
with open(args.tasks_json) as tasks_json:
data = json.load(tasks_json)
for job in data['jobs'].keys():
if not job.startswith('Upload-'):
tryjobs.append(job)
tryjobs.sort()
# Write to buildbucket.config.
buildbucket_config = os.path.join(os.getcwd(), 'buildbucket.config')
with open(buildbucket_config, 'w') as f:
if args.repo_name == 'skia':
addChromiumTrybots(f)
# Adding all Skia jobs.
f.write('[bucket "skia.primary"]\n')
for job in tryjobs:
f.write('\tbuilder = ' + job + '\n')
subprocess.check_call(['git', 'add', 'buildbucket.config'])
try:
subprocess.check_call(
['git', 'commit', '-m', 'Update builders in buildbucket.config'])
except subprocess.CalledProcessError:
print 'No changes to buildbucket.config'
return
subprocess.check_call(['git', 'push', skia_repo, 'cfg:refs/meta/config'])
if '__main__' == __name__:
main()
|
Python
| 0.999011
|
@@ -394,21 +394,21 @@
HROMIUM_
-TRYBO
+BUCKE
TS = %5B%0A
@@ -410,17 +410,16 @@
= %5B%0A
-(
'luci.ch
@@ -434,1216 +434,33 @@
ry',
- %5B%0A 'android_optional_gpu_tests_rel',%0A 'linux-blink-rel',%0A 'linux_chromium_compile_dbg_ng',%0A 'linux_chromium_dbg_ng',%0A 'linux_chromium_rel_ng',%0A 'linux_optional_gpu_tests_rel',%0A 'mac10.10-blink-rel',%0A 'mac10.11-blink-rel',%0A 'mac10.12-blink-rel',%0A 'mac10.13-blink-rel',%0A 'mac10.13_retina-blink-rel',%0A 'mac_chromium_compile_dbg_ng',%0A 'mac_chromium_compile_rel_ng',%0A 'mac_chromium_dbg_ng',%0A 'mac_chromium_rel_ng',%0A 'mac_optional_gpu_tests_rel',%0A 'win10-blink-rel',%0A 'win7-blink-rel',%0A 'win_chromium_compile_dbg_ng',%0A 'win_chromium_dbg_ng',%0A 'win_optional_gpu_tests_rel',%0A %5D),%0A ('master.tryserver.chromium.linux', %5B%0A 'linux_chromium_compile_rel_ng',%0A %5D),%0A ('master.tryserver.chromium.win', %5B%0A 'win_chromium_compile_rel_ng',%0A 'win7_chromium_rel_ng',%0A 'win10_chromium_x64_rel_ng',%0A %5D),%0A ('master.tryserver.chromium.android', %5B%0A 'android_blink_rel',%0A 'android_compile_dbg',%0A 'android_compile_rel',%0A 'android_n5x_swarming_dbg',%0A 'android_n5x_swarming_rel',%0A %5D)%0A%5D%0A%0A%0Adef addChromiumTrybo
+%0A%5D%0A%0A%0Adef addChromiumBucke
ts(f
@@ -472,20 +472,14 @@
for
-master, bots
+bucket
in
@@ -502,13 +502,13 @@
IUM_
-TRYBO
+BUCKE
TS:%0A
@@ -543,74 +543,13 @@
' %25
-master)%0A for bot in bots:%0A f.write('%5Ctbuilder = %25s%5Cn' %25 bo
+bucke
t)%0A%0A
@@ -1477,13 +1477,13 @@
mium
-Trybo
+Bucke
ts(f
|
52bc51e4d55322b45019d9ddf3bb273282dd80b3
|
Call `super` at the start of `setup_experiment` and `post_batch`. prev
|
nupic/research/frameworks/pytorch/imagenet/mixins/lr_range_test.py
|
nupic/research/frameworks/pytorch/imagenet/mixins/lr_range_test.py
|
# ------------------------------------------------------------------------------
# Copyright (C) 2020, Numenta, Inc. All rights reserved.
#
# The information and source code contained herein is the
# exclusive property of Numenta Inc. No part of this software
# may be used, reproduced, stored or distributed in any form,
# without explicit written authorization from Numenta Inc.#
# ------------------------------------------------------------------------------
import os
from copy import deepcopy
from pandas import DataFrame
from ray.tune.utils import flatten_dict
from nupic.research.frameworks.pytorch.imagenet import mixins
from nupic.research.frameworks.pytorch.lr_scheduler import LinearLRScheduler
class LRRangeTest(mixins.LogEveryLoss):
"""
Mixin for the LR-range test defined in section 4.1 of "A Disciplined Approach to
Neural Network Hyper-Parameters"
- https://arxiv.org/pdf/1803.09820.pdf
Herein, a min_lr and max_lr are set, and training proceeds for a small number of
epochs (1-3) while the learning rate is linearly increased. Generally, the point
at which the training loss begins to curve upwards and increases while the
validation/test loss still decreases, is considered to be a reasonable choice
for your max_lr in a cyclical lr-schedule. The same author recommends using 10-20
times lower this amount for your min_lr.
"""
# Use the following name when `save_lr_test_to_csv=True`.
csv_save_name = "lr_range_test.csv"
def setup_experiment(self, config):
"""
:param config:
- epochs: number of epochs in training (recommended 1-3)
- epochs_to_validate: will be overridden to include all epochs
- save_lr_test_to_csv: whether to save results to csv; must be accompanied
by 'logdir'
- logdir: directory to save test results
- lr_scheduler_class: automatically overridden to LinearLRScheduler
- lr_scheduler_args: args for the linear-schedule
- min_lr: starting learning rate
- max_lr: ending learning rate
"""
# Ensure all epochs get validated.
assert "epochs" in config
config["epochs_to_validate"] = range(-1, config["epochs"])
# Init saving of test results.
self.save_lr_test_to_csv = config.get("save_lr_test_to_csv", False)
if self.save_lr_test_to_csv:
assert "logdir" in config, (
"The logdir must be specified to save the lt-range test resutls.")
self.logdir = config["logdir"]
# Save config for later - used to aggregate results.
self._config = deepcopy(config)
super().setup_experiment(config)
@classmethod
def create_lr_scheduler(cls, config, optimizer, total_batches):
assert "lr_scheduler_args" in config
lr_scheduler_class = LinearLRScheduler
lr_scheduler_args = config["lr_scheduler_args"]
lr_scheduler_args.update(epochs=config["epochs"])
lr_scheduler_args.update(steps_per_epoch=total_batches)
return lr_scheduler_class(optimizer, **lr_scheduler_args)
def post_batch(self, *args, **kwargs):
"""Increase lr after every batch."""
self.lr_scheduler.step()
super().post_batch(*args, **kwargs)
def log_result(self, result):
"""
Logs results to csv with columns for timestep, learning_rate, train_loss,
and validation_accuracy.
"""
super().log_result(result)
# Logging should only occur in the zero-ith process.
if not self.rank == 0 and self.save_lr_test_to_csv:
return
df = DataFrame()
df.index.name = "timestep"
time_series_dict = self.expand_result_to_time_series(result, self._config)
for t, d in sorted(time_series_dict.items(), key=lambda x: x[0]):
for key, value in flatten_dict(d, delimiter="/").items():
if key in ["train_loss", "learning_rate", "validation_accuracy"]:
df.at[t, key] = value
df.to_csv(os.path.join(self.logdir, self.csv_save_name))
@classmethod
def get_execution_order(cls):
eo = super().get_execution_order()
eo["setup_experiment"].insert(0, cls.__name__ + ": initialize")
eo["create_lr_scheduler"] = cls.__name__ + ": create_lr_scheduler"
eo["post_batch"].insert(0, cls.__name__ + ": linearly increase lr")
return eo
|
Python
| 0.001165
|
@@ -2146,32 +2146,74 @@
ate%0A %22%22%22%0A
+ super().setup_experiment(config)%0A%0A
# Ensure
@@ -2239,16 +2239,16 @@
idated.%0A
-
@@ -2765,49 +2765,8 @@
fig)
-%0A super().setup_experiment(config)
%0A%0A
@@ -3281,41 +3281,8 @@
%22%22%22%0A
- self.lr_scheduler.step()%0A
@@ -3316,24 +3316,57 @@
s, **kwargs)
+%0A self.lr_scheduler.step()
%0A%0A def lo
|
216bb86730436f4b2d167d917a903dcd982b7897
|
remove log for values modifier disabled
|
openedx/core/djangoapps/appsembler/sites/config_values_modifier.py
|
openedx/core/djangoapps/appsembler/sites/config_values_modifier.py
|
"""
Tahoe: Configuration modifiers for Tahoe.
"""
from urllib.parse import urlsplit
from logging import getLogger
from django.conf import settings
from openedx.core.djangoapps.appsembler.sites.waffle import ENABLE_CONFIG_VALUES_MODIFIER
log = getLogger(__name__)
class TahoeConfigurationValueModifier:
"""
Calculate URL values for Tahoe.
This is useful to reduce the cost of changing a Site domain.
"""
FIELD_OVERRIDERS = {
'SITE_NAME': 'get_site_name',
'LMS_ROOT_URL': 'get_lms_root_url',
'ACTIVATION_EMAIL_SUPPORT_LINK': 'get_activation_email_support_link',
'PASSWORD_RESET_SUPPORT_LINK': 'get_password_reset_support_link',
'css_overrides_file': 'get_css_overrides_file',
}
def __init__(self, site_config_instance):
self.site_config_instance = site_config_instance
def normalize_get_value_params(self, name, default):
"""
Amend the name and default values for Tahoe.
This resolves few quirks and tech-debt in Open edX in which some variables don't exist while others exists
in multiple spellings/cases.
"""
# Tahoe: Default value is needed for this
if name == 'LANGUAGE_CODE' and default is None:
# TODO: Ask Dashboard 2.0 / AMC to set the `LANGUAGE_CODE` by default.
default = 'en'
if name == 'PLATFORM_NAME':
# Always use the lower case so the configuration is easier to maintain.
name = 'platform_name'
return name, default
def get_domain(self):
domain = None
if hasattr(self.site_config_instance, 'site'):
domain = self.site_config_instance.site.domain
return domain
def get_site_name(self):
return self.get_domain()
def get_css_overrides_file(self):
domain_without_port_number = self.get_domain().split(':')[0]
return '{}.css'.format(domain_without_port_number)
def get_lms_root_url(self):
"""
Provide override for LMS_ROOT_URL synced with `SITE_NAME`.
"""
# We cannot simply use a protocol-relative URL for LMS_ROOT_URL
# This is because the URL here will be used by such activities as
# sending activation links to new users. The activation link needs the
# scheme address verification emails. The callers using this variable
# expect the scheme in the URL
return '{scheme}://{domain}'.format(
scheme=urlsplit(settings.LMS_ROOT_URL).scheme,
domain=self.get_domain(),
)
def get_activation_email_support_link(self):
"""
RED-2471: Use Multi-tenant `/help` URL for password reset emails.
"""
return '{root_url}/help'.format(root_url=self.get_lms_root_url())
def get_password_reset_support_link(self):
"""
RED-2385: Use Multi-tenant `/help` URL for activation emails.
"""
return '{root_url}/help'.format(root_url=self.get_lms_root_url())
def override_value(self, name):
"""
Given a value name, return a hard-coded default completely disregarding the stored values.
This is useful to simplify the domain name change for Sites.
:return (should_override, overridden_value)
"""
value_getter_method_name = self.FIELD_OVERRIDERS.get(name)
if value_getter_method_name:
if self.get_domain():
value_getter = getattr(self, value_getter_method_name)
return True, value_getter()
return False, None
def init_configuration_modifier_for_site_config(sender, instance, **kwargs):
if ENABLE_CONFIG_VALUES_MODIFIER.is_enabled():
instance.tahoe_config_modifier = TahoeConfigurationValueModifier(site_config_instance=instance)
else:
log.info('ENABLE_CONFIG_VALUES_MODIFIER: switch is not enabled, not using TahoeConfigurationValueModifier')
|
Python
| 0
|
@@ -3808,130 +3808,4 @@
ce)%0A
- else:%0A log.info('ENABLE_CONFIG_VALUES_MODIFIER: switch is not enabled, not using TahoeConfigurationValueModifier')%0A
|
0da1b4f4041ebe415782d36f6f69af91faad024f
|
Include some useful links for future development
|
src/dataset/retriever.py
|
src/dataset/retriever.py
|
"""
Subset of reuters 21578, "ModApte", considering only received categories.
"""
from nltk.corpus import reuters
class ReutersCollection:
interest_categories = []
documents = []
train_docs = []
test_docs = []
"""
Initializes the collection considering only the received categories.
:param categories: [str] list of categories names
"""
def __init__(self, categories):
self.interest_categories = categories
self.documents = reuters.fileids(categories)
self.train_docs = list(filter(lambda doc: doc.startswith("train"), self.documents))
self.test_docs = list(filter(lambda doc: doc.startswith("test"), self.documents))
"""
Prints the size of training and test set, the total amount of documents
and categories used to retrieve the documents.
"""
def stats(self):
# print (reuters.categories())
print ("Collection stats:")
# List of documents
print ("\t" + str(len(self.documents)) + " documents")
print ("\t" + str(len(self.train_docs)) + " total train documents")
print ("\t" + str(len(self.test_docs)) + " total test documents")
# List of categories
all_categories = reuters.categories()
print ("\tConsidering " + str(len(self.interest_categories)) + " from a total of " + str(len(all_categories)) + " categories\n")
"""
Prints the stats of every category of interest
"""
def detailed_stats(self):
for cat in self.interest_categories:
self.category_stats(cat)
"""
Prints the size of training and test set, the total amount of documents
of a specific category
"""
def category_stats(self, category):
print ("Stats of " + category + ":")
category_docs = reuters.fileids(category)
category_train_docs = list(filter(lambda doc: doc.startswith("train"), category_docs))
category_test_docs = list(filter(lambda doc: doc.startswith("test"), category_docs))
# List of documents
print ("\t" + str(len(category_docs)) + " documents")
print ("\t" + str(len(category_train_docs)) + " total train documents")
print ("\t" + str(len(category_test_docs)) + " total test documents\n")
"""
Prints a preview of the category, including some words and a raw document
:param categories: str name of desired category
"""
def preview(self, category):
# Documents in a category
category_docs = reuters.fileids(category)
# Words for a document
document_id = category_docs[0]
document_words = reuters.words(category_docs[0])
print(document_words)
# Raw document
print(reuters.raw(document_id))
|
Python
| 0
|
@@ -70,16 +70,321 @@
egories.
+%0A%0Autil: %0A- http://www.nltk.org/book/ch02.html%0A- https://miguelmalvarez.com/2015/03/20/classifying-reuters-21578-collection-with-python-representing-the-data/%0A%0Amight be useful:%0A- http://www.nltk.org/howto/corpus.html%0A- https://miguelmalvarez.com/2016/11/07/classifying-reuters-21578-collection-with-python/
%0A%22%22%22%0A%0A%0Af
|
3c538233d30a39281a5e2d79df51f114480c33fc
|
Add in a way to specify different ways of generating the hostname. Some people want to use a fqdn, others uname, others hardcoded, and others reversed. This allows a easier way to code a new method into place
|
src/diamond/collector.py
|
src/diamond/collector.py
|
import inspect
from diamond import *
from diamond.metric import Metric
# Detect the architecture of the system and set the counters for MAX_VALUES
# appropriately. Otherwise, rolling over counters will cause incorrect or
# negative values.
if platform.architecture()[0] == '64bit':
MAX_COUNTER = (2 ** 64) - 1
else:
MAX_COUNTER = (2 ** 32) - 1
from diamond.metric import Metric
class Collector(object):
"""
The Collector class is a base class for all metric collectors.
"""
def __init__(self, config, handlers):
"""
Create a new instance of the Collector class
"""
# Initialize Logger
self.log = logging.getLogger('diamond')
# Initialize Members
self.name = self.__class__.__name__
self.handlers = handlers
self.last_values = {}
# Get Collector class
cls = self.__class__
# Initialize config
self.config = configobj.ConfigObj()
# Merge default Collector config
self.config.merge(config['collectors']['default'])
# Check if default config is defined
if self.get_default_config() is not None:
# Merge default config
self.config.merge(self.get_default_config())
# Check if Collector config section exists
if cls.__name__ in config['collectors']:
# Merge Collector config section
self.config.merge(config['collectors'][cls.__name__])
# Check for config file in config directory
configfile = os.path.join(config['server']['collectors_config_path'], cls.__name__) + '.conf'
if os.path.exists(configfile):
# Merge Collector config file
self.config.merge(configobj.ConfigObj(configfile))
def get_default_config(self):
"""
Return the default config for the collector
"""
return {}
def get_schedule(self):
"""
Return schedule for the collector
"""
# Return a dict of tuples containing (collector function, collector function args, splay, interval)
return {self.__class__.__name__: (self._run, None, int(self.config['splay']), int(self.config['interval']))}
def get_metric_path(self, name):
"""
Get metric path
"""
if 'path_prefix' in self.config:
prefix = self.config['path_prefix']
else:
prefix = 'systems'
if 'hostname' in self.config:
hostname = self.config['hostname']
else:
hostname = socket.getfqdn().split('.')[0]
if 'path' in self.config:
path = self.config['path']
else:
path = self.__class__.__name__
if path == '.':
return '.'.join([prefix, hostname, name])
else:
return '.'.join([prefix, hostname, path, name])
def collect(self):
"""
Default collector method
"""
raise NotImplementedError()
def publish(self, name, value, precision=0):
"""
Publish a metric with the given name
"""
# Get metric Path
path = self.get_metric_path(name)
# Create Metric
metric = Metric(path, value, None, precision)
# Publish Metric
self.publish_metric(metric)
def publish_metric(self, metric):
"""
Publish a Metric object
"""
# Process Metric
for h in self.handlers:
h.process(metric)
def derivative(self, name, new, max_value=0):
"""
Calculate the derivative of the metric.
"""
# Format Metric Path
path = self.get_metric_path(name)
if path in self.last_values:
old = self.last_values[path]
# Check for rollover
if new < old:
old = old - max_value
# Get Change in X (value)
dx = new - old
# Get Change in Y (time)
dy = int(self.config['interval'])
result = float(dx) / float(dy)
else:
result = 0
# Store Old Value
self.last_values[path] = new
# Return result
return result
def _run(self):
"""
Run the collector
"""
# Log
self.log.debug("Collecting data from: %s" % (self.__class__.__name__))
try:
# Collect Data
self.collect()
except Exception, e:
# Log Error
self.log.error(traceback.format_exc())
|
Python
| 0.000042
|
@@ -8,16 +8,40 @@
inspect
+%0Aimport os%0Aimport socket
%0A%0Afrom d
@@ -2228,16 +2228,806 @@
l'%5D))%7D%0A%0A
+ def get_hostname(self):%0A if 'hostname' in self.config:%0A hostname = self.config%5B'hostname'%5D%0A if 'hostname_method' not in self.config or self.config%5B'hostname_method'%5D == 'fqdn_short':%0A return socket.getfqdn().split('.')%5B0%5D%0A if self.config%5B'hostname_method'%5D == 'fqdn_rev':%0A hostname = socket.getfqdn().split('.')%0A hostname.reverse()%0A hostname = '.'.join(hostname)%0A return hostname%0A if self.config%5B'hostname_method'%5D == 'uname_short':%0A return os.uname().split('.')%5B0%5D%0A if self.config%5B'hostname_method'%5D == 'uname_rev':%0A hostname = os.uname().split('.')%0A hostname.reverse()%0A hostname = '.'.join(hostname)%0A return hostname %0A %0A%0A
def
@@ -3051,24 +3051,24 @@
elf, name):%0A
-
%22%22%22%0A
@@ -3237,25 +3237,24 @@
ystems'%0A
-%0A
if 'host
@@ -3249,152 +3249,51 @@
-if 'hostname' in self.config:%0A hostname = self.config%5B'hostname'%5D%0A else:%0A hostname = socket.getfqdn().split('.')%5B0%5D
+ %0A hostname = self.get_hostname()
%0A%0A
|
2209d03532d6c0ed7d55cf4cf759fd82585b5ad3
|
Update item.py
|
item.py
|
item.py
|
import pygame
class Item(pygame.sprite.Sprite):
def __init__(self, level, *groups):
super(Item, self).__init__(*groups)
#the game level
self.level = level
#base image
#self.level.animator.set_Img(0,5)
#self.image = self.level.animator.get_Img().convert()
#self.image.set_colorkey((255,0,0))
self.level.animator.set_Img(6,0)
self.image = self.level.animator.get_Img().convert()
self.image.set_colorkey((255,0,0))
#type
self.flavor_saver = ['gem', 'axe', 'sammich', 'telescope']
self.flavor = 'gem'
#location
self.firstflag = True
self.scrnx = 0
self.scrny = 0
self.mapx = 0
self.mapy = 0
def spawn(self,x,y):
self.scrnx = self.level.mymap[x][y].scrnx
self.mapx = x
self.scrny = self.level.mymap[x][y].scrny
self.mapy = y
self.rect = pygame.rect.Rect((self.scrnx * self.level.tilex, self.scrny * self.level.tiley), self.image.get_size())
#self.scrnx = x
#self.scrny = y
#if self.firstflag:
# self.mapx = x
# self.mapy = y
# self.firstflag = False
#self.rect = pygame.rect.Rect((x * self.level.tilex, y * self.level.tiley), self.image.get_size())
def set_type(self, itype):
self.flavor = self.flavor_saver[itype]
if itype == 0:
xind = 6
yind = 0
if itype == 1:
xind = 6
yind = 5
if itype == 2:
xind = 6
yind = 4
if itype == 3:
xind = 6
yind = 3
self.level.animator.set_Img(xind,yind)
self.image = self.level.animator.get_Img().convert()
self.image.set_colorkey((255,0,0))
#def reveal(self):
# self.image = self.secretimage
def set_Index(self, x, y):
self.scrnx = x
self.rect.x = x*self.level.tilex
self.scrny = y
self.rect.y = y*self.level.tiley
def get_Index(self, axis):
if axis == 'X':
return self.scrnx
if axis == 'Y':
return self.scrny
return -1
|
Python
| 0
|
@@ -174,138 +174,8 @@
age%0A
-%09%09#self.level.animator.set_Img(0,5)%0A%09%09#self.image = self.level.animator.get_Img().convert()%0A%09%09#self.image.set_colorkey((255,0,0))%0A
%09%09se
@@ -367,16 +367,27 @@
lescope'
+, 'canteen'
%5D%0A%09%09self
@@ -540,242 +540,63 @@
elf.
-scrnx = self.level.mymap%5Bx%5D%5By%5D.scrnx%0A%09%09self.mapx = x%0A%09%09self.scrny = self.level.mymap%5Bx%5D%5By%5D.scrny%0A%09%09self.mapy = y%0A%09%09self.rect = pygame.rect.Rect((self.scrnx * self.level.tilex, self.scrny * self.level.tiley), self.image.get_size())
+mapx = x%0A%09%09self.mapy = y%0A%09%09%0A%09def position(self,x,y):
%0A%09%09
-#
self
@@ -608,17 +608,16 @@
x = x%0A%09%09
-#
self.scr
@@ -629,94 +629,8 @@
y%0A%09%09
-#if self.firstflag:%0A%09%09#%09self.mapx = x%0A%09%09#%09self.mapy = y%0A%09%09#%09self.firstflag = False%0A%09%09#
self
@@ -659,12 +659,19 @@
ct((
-x *
+self.scrnx*
self
@@ -688,12 +688,19 @@
ex,
-y *
+self.scrny*
self
@@ -972,16 +972,57 @@
ind = 3%0A
+%09%09if itype == 4:%0A%09%09%09xind = 4%0A%09%09%09yind = 4%0A
%09%09%09%0A%09%09se
@@ -1155,61 +1155,8 @@
))%0A%0A
-%09#def reveal(self):%0A%09#%09self.image = self.secretimage%0A
%0A%09de
@@ -1405,8 +1405,92 @@
turn -1%0A
+%09%09%0A%09def draw(self):%0A%09%09self.level.screen.blit(self.image, (self.rect.x,self.rect.y))%0A
|
5b524ae21920f05de32737f8587918112a1e2e4d
|
Fix a couple of pylint issues
|
lisp.py
|
lisp.py
|
"""List processing based on a simple formatting language
Create an instance of Parser, passing a template string and a substitution
dictionary to the constructor. Then use its 'sub' method to replace groups of
elements of a list with one new element obtained by substituting values of the
elements into the template string.
To learn more about the template string syntax, take a look at the file
TemplateSyntax.md in the 'doc' directory.
"""
import re
class Range(object):
def __init__(self, start, length):
self.start = start
self.end = start + length
self.span = (self.start, self.end)
class Parser(object):
"""List processor for element replacement
Initialize it with a template string and a dictionary which contains
functions for searching and replacing tokens defined in the template
string.
The 'search' method finds the first matching sequence of elements in a
list.
The 'sub' method is used to substitute elements in a list.
"""
def __init__(self, template, meta=None):
"""Initialize the instance with a template string
Arguments:
template -- template string
meta -- a dictionary with functions needed for searching,
replacing and transforming list elements
"""
pattern_str, body_str = [x.strip() for x in template.split('=')]
self.meta = meta or {}
self.pattern = Pattern(pattern_str, self.meta)
self.body = Body(body_str, self.meta)
def search(self, list_):
"""Return the range of the first matching sequence in list_"""
return self.pattern.search(list_)
def sub(self, list_):
"""Perform substitution on the given list
Each sequence of elements matching the pattern defined in the template
string will be replaced by exactly one element.
Returns a two-element tuple. The first element is a new list which is
the result of processing list_. The second element is a list of tuples;
each tuple stores the range of each sequence of replaced elements.
"""
result = list_[:]
ranges = []
while True:
m = self.search(result)
if not m:
break
sub_range = (m.start + self.pattern.insets[0],
m.end + self.pattern.insets[1])
result[slice(*sub_range)] = [self.body.format(self.pattern.subs)]
ranges.append(sub_range)
return result, ranges
class Pattern(object):
"""Encapsulates a list of tokens for matching"""
def __init__(self, pattern, meta):
self.tokens = [] # tokens to match against
self.subs = [] # substitutions (tokens with values)
self.offset = 0 # will be set to 1 if pattern has the ^ anchor
self._build(pattern, meta)
def padded_list(self, index, length):
"""Return a list of tokens padded with None values at the beginning"""
start_index = max(0, self.offset - index)
new_list = [None] * (index - self.offset) + self.tokens[start_index:]
return new_list[:length]
def search(self, list_):
"""Search for a sequence of elements matching the pattern
Return a Range of the first sequence of matching elements.
"""
list_len = len(list_)
pattern_len = self.length
if list_len < pattern_len:
return
# Run through list_ looking for a matching sequence of elements
cmp_fn = lambda token, x: (token is None) or token.matches(x)
for i in range(list_len - pattern_len + 1):
if all(map(cmp_fn, self.padded_list(i, list_len), list_)):
return Range(i, pattern_len)
def _build(self, pattern, meta):
"""Build the 'tokens' and 'subs' lists"""
elements = re.split(r'\s+', pattern)
insets = [0, 0]
left_side = True
isliteral = lambda token: type(token) is not MatcherToken
for (i, elem) in enumerate(elements):
token, isphantom = parse_token(elem, meta)
if isphantom:
if left_side:
insets[0] += 1
else:
insets[1] -= 1
else:
left_side = False
self.tokens.append(token)
if (not isphantom) and (not isliteral(token)):
self.subs.append(token)
self.offset = int(elements[0] == '^')
right_offset = int(elements[-1] == '$')
self.length = len(self.tokens) - self.offset - right_offset
self.insets = tuple(insets)
class Body(object):
"""The body defines a replacement for a matching sequence of elements"""
def __init__(self, body, meta):
def wrap_fn(wrapper, fn):
return lambda x: wrapper(fn(x))
def repl_fn(m):
fn = lambda token: meta[token.name + "~replace"](token.value)
wrappers = filter(bool, m.group(1).split(':'))
for w in wrappers:
fn = wrap_fn(meta[w], fn)
self.format_list.append(fn)
return "{}"
# Here we look at each substitution token enclosed in { and }. Inside
# repl_fn, we gather all of the modifiers into a single function using
# wrap_fn. Then this function is appended to the format_list.
self.format_list = []
self.format_str = re.sub(r'{(.*?)}', repl_fn, body)
def format(self, tokens):
"""Returns a final string after substituting token values"""
format_args = [f(x) for f, x in zip(self.format_list, tokens)]
return self.format_str.format(*format_args)
class AnchorToken(object):
"""The anchor token allows to match at either end of a list
This token is represented by ^ and $ symbols in the template string syntax.
"""
def matches(self, obj):
return False
class LiteralToken(object):
"""Literal token simply matches the string it is given"""
def __init__(self, string):
self.string = string
def matches(self, obj):
return obj == self.string
class MatcherToken(object):
"""Matcher token uses a function to match against an element
It also stores the value of the element it matches
"""
def __init__(self, fn, name):
self.fn = fn
self.name = name
self.value = None
def matches(self, obj):
self.value = obj
return self.fn(obj)
def parse_token(string, meta, phantom=False):
"""Determine the type of the token in string
Return a tuple with the token and a phantom flag
"""
# Check for anchors first
if string in ['^', '$']:
return AnchorToken(), False
# Now check for phantoms
m = re.match(r'^\((.+?)\)$', string)
if m:
return parse_token(m.group(1), meta, phantom=True)
# The rest of tokens
m = re.match(r'^<(.+?)>$', string)
if m:
name = m.group(1)
fn = meta[name + "~find"]
token = MatcherToken(fn, name)
else:
token = LiteralToken(string)
return token, phantom
|
Python
| 0.000883
|
@@ -2855,16 +2855,118 @@
anchor%0A
+ self.insets = (0, 0) # a bias applied to the range of substitution%0A self.length = 0%0A
@@ -4145,40 +4145,25 @@
for
- (i,
elem
-)
in
-enumerate(
elements
):%0A
@@ -4158,17 +4158,16 @@
elements
-)
:%0A
@@ -5998,35 +5998,33 @@
f matches(self,
-obj
+_
):%0A retur
|
f408b1368b641be2349266a59b32f7fd1fa53265
|
Fix a couple of minor bugs
|
stream.py
|
stream.py
|
from StringIO import StringIO
import sys
from eventlet.corolocal import local
_installed = False
_save_out = None
_save_err = None
class _StreamLocal(local):
def __init__(self):
# Initialize the output and error streams
self.out = StringIO()
self.err = StringIO()
_stlocal = _StreamLocal()
def pop():
# Get the out stream contents, then close and replace the stream
out = _stlocal.out.getvalue()
_stlocal.out.close()
_stlocal.out = StringIO()
# Ditto with the error stream contents
err = _stlocal.err.getvalue()
_stlocal.err.close()
_stlocal.err = StringIO()
return out, err
class StreamProxy(object):
def __init__(self, stname):
# Save the stream name of interest
self._stname = stname
def __getattr__(self, attr):
# Proxy out to the appropriate stream
return getattr(getattr(_stlocal, self._stname), attr)
def __setattr__(self, attr, value):
# Proxy out to the appropriate stream
return setattr(getattr(_stlocal, self._stname), attr, value)
def __delattr__(self, attr):
# Proxy out to the appropriate stream
return delattr(getattr(_stlocal, self._stname), attr)
def install():
global _installed
global _save_out
global _save_err
# Do nothing if we're already installed
if _installed:
return
# Remember that we've been installed
_installed = True
# Save original stdout and stderr
_save_out = sys.stdout
_save_err = sys.stderr
# Replace them with StreamProxy instances
sys.stdout = StreamProxy('out')
sys.stderr = StreamProxy('err')
def uninstall():
global _save_out
global _save_err
# Do nothing if we haven't been installed
if not _installed:
return
# Restore original stdout and stderr
sys.stdout = _save_out
sys.stderr = _save_err
# Reset our state
_save_out = None
_save_err = None
_installed = False
|
Python
| 0.000029
|
@@ -760,28 +760,62 @@
s
-elf.
+uper(StreamProxy, self).__setattr__('
_stname
- =
+',
stname
+)
%0A%0A
@@ -1698,32 +1698,54 @@
ef uninstall():%0A
+ global _installed%0A
global _save
|
096b4f6c3707455c30de4c623b9fe92113b61d33
|
attach span to where query only if needed
|
ui/app/index/views.py
|
ui/app/index/views.py
|
import sys
import string
import json
import time
from urllib import unquote
from flask import request, redirect, render_template
from . import index
from .. import db
@index.route('/', methods=['GET'])
def index():
# read GET values
spanName = request.args.get('spanName')
serviceName = request.args.get('serviceName') or unquote(request.cookies.get('last-serviceName'))
timestamp = request.args.get('timestamp')
limit = request.args.get('limit') or 10
formSubmitted = True
if timestamp is None or timestamp.strip() == '':
formSubmitted = False
timestamp = int(time.time() * 1000000)
# get database engine connection
connection = db.engine.connect()
# query results
traceResults = None
# query database based on query parameters if service is given
if formSubmitted:
# query results that would be sent over to view
traceResults = []
# find all traces to which related to this service
query = "SELECT DISTINCT trace_id \
FROM zipkin_annotations "
# where
whereQuery = ''
if serviceName is not None and len(serviceName) > 0:
whereQuery += " service_name = '%s' " % serviceName
if spanName is not None and len(spanName) > 0 and spanName is not 'all':
whereQuery += " AND span_name = '%s' " % spanName
if timestamp is not None and len(timestamp) > 0:
whereQuery += " AND a_timestamp < %s " % timestamp
# attach where clause only if there is a criteria
if len(whereQuery) > 0:
whereQuery = " WHERE %s " % whereQuery
query += whereQuery
# order by
orderByQuery = " ORDER BY a_timestamp DESC"
query += orderByQuery
# limit search results
limitQuery = ""
if limit is not None:
limitQuery += " LIMIT 0, %s" % limit
query += limitQuery
traceIds = []
resultTraceIds = connection.execute(query)
for row in resultTraceIds:
traceIds.append(row['trace_id'])
if len(traceIds) > 0:
# find the number of DISTINCT spans, that above service connects with
query = "SELECT COUNT(DISTINCT span_id) as spanCount, parent_id, created_ts, trace_id \
FROM zipkin_spans \
GROUP BY trace_id \
HAVING \
trace_id IN (%s) \
ORDER BY created_ts DESC" \
% (",".join(str(traceId) for traceId in traceIds))
result = connection.execute(query)
for row in result:
trace = {}
trace['serviceName'] = serviceName
trace['spanCount'] = row['spanCount']
trace['trace_id'] = row['trace_id']
startTime = (int(row['created_ts']) / 1000000)
trace['startTime'] = time.strftime('%m-%d-%YT%H:%M:%S%z', time.gmtime(startTime))
servicesQuery = "SELECT service_name, `value`, a_timestamp \
FROM zipkin_annotations \
WHERE trace_id = %s AND \
`value` IN ('cs', 'sr', 'ss', 'cr') \
ORDER BY service_name ASC" % (row['trace_id'])
servicesResult = connection.execute(servicesQuery)
services = {}
service = None
for serviceRow in servicesResult:
if serviceRow['service_name'] not in services:
services[serviceRow['service_name']] = {}
service = services[serviceRow['service_name']]
service['count'] = 0
if serviceRow['value'] == 'sr':
service['count'] += 1
service[serviceRow['value']] = serviceRow['a_timestamp']
duration = 0
serviceDuration = 0
serviceDurations = []
minTimestamp = sys.maxint
maxTimestamp = 0
selectedServiceDuration = 0
for key in services:
service = services[key]
if 'cs' in service:
minTimestamp = min(service['cr'], minTimestamp)
maxTimestamp = max(service['cs'], maxTimestamp)
serviceDuration = service['cr'] - service['cs']
else:
minTimestamp = min(service['sr'], minTimestamp)
maxTimestamp = max(service['ss'], maxTimestamp)
serviceDuration = service['ss'] - service['sr']
if serviceName == key:
selectedServiceDuration = serviceDuration
# service duration
serviceDurations.append({
'name': key,
'count': service['count'],
'duration': serviceDuration
})
# adding up duration to get total duration time
duration = duration + serviceDuration
# total duration for a trace
trace['duration'] = duration
# service durations
# sort service durations
serviceDurations = sorted(serviceDurations, key=lambda x: x['name'])
trace['serviceDurations'] = serviceDurations
#trace['serviceTimestampMin'] = minTimestamp
#trace['serviceTimestampMax'] = maxTimestamp
servicesTotalDuration = (maxTimestamp - minTimestamp) / 1000
trace['servicesTotalDuration'] = '{:.3f}'.format(servicesTotalDuration)
selectedServicePercentage = int(((selectedServiceDuration / servicesTotalDuration) * 100) / 1000)
trace['selectedServicePercentage'] = selectedServicePercentage
traceResults.append( trace )
#return json.dumps(traceResults)
# populate services
services = []
result = connection.execute("SELECT DISTINCT service_name FROM zipkin_annotations")
for row in result:
services.append( row['service_name'] )
spans = []
if serviceName:
query = "SELECT DISTINCT span_name FROM zipkin_annotations WHERE service_name='%s'" % serviceName
result = connection.execute(query)
for row in result:
spans.append( row['span_name'] )
if len(spans) > 0:
spans.insert(0, 'all')
# close connection
connection.close()
return render_template('index.html', \
results=traceResults, \
services=services, spans=spans, \
get_SpanName=spanName, get_ServiceName=serviceName, \
get_Timestamp=timestamp, get_Limit=limit)
|
Python
| 0.000001
|
@@ -1299,22 +1299,18 @@
panName
-is not
+!=
'all':%0A
|
04745291580ac4feb6339516dbd9eaea24619850
|
Print channel name
|
sudoku.py
|
sudoku.py
|
import sys
from platform import machine
import os
import datetime
import json
# Taken from conda.config
_sys_map = {'linux2': 'linux', 'linux': 'linux',
'darwin': 'osx', 'win32': 'win'}
non_x86_linux_machines = {'armv6l', 'armv7l', 'ppc64le'}
platform = _sys_map.get(sys.platform, 'unknown')
bits = 8 * tuple.__itemsize__
if platform == 'linux' and machine() in non_x86_linux_machines:
arch_name = machine()
subdir = 'linux-%s' % arch_name
else:
arch_name = {64: 'x86_64', 32: 'x86'}[bits]
subdir = '%s-%d' % (platform, bits)
REPODATA = {
"info": {
"arch": arch_name,
"platform": platform,
},
"packages": {}
}
def generate_info(name, version, depends):
return {
"{name}-{version}-0.tar.bz2".format(name=name, version=version): {
"build": "0",
"build_number": 0,
"date": datetime.date.today().strftime("%Y-%m-%d"),
"depends": depends,
"name": name,
"size": 0,
"version": str(version)
}
}
def generate_cells():
packages = {}
for row in range(1, 10):
for column in range(1, 10):
for entry in range(1, 10):
depends = ["sudoku"]
for d in range(1, 10):
if d == entry:
continue
# Each entry being set (version 1) requires that the other
# entries are not set (version 0)
depends.append("%sx%s-is-%s 0" % (row, column, d))
for other_row in range(1, 10):
if other_row == row:
continue
# If an entry is set, other cells in the same column can't
# have the same entry.
depends.append("%sx%s-is-%s 0" % (other_row, column, entry))
for other_column in range(1, 10):
if other_column == column:
continue
# If an entry is set, other cells in the same row can't
# have the same entry.
depends.append("%sx%s-is-%s 0" % (row, other_column, entry))
# x - (x - 1)%3 is the largest of 1, 4, 7 that is less than x
top_corner = (row - (row - 1)%3, column - (column - 1)%3)
for i in range(9):
cell = (top_corner[0] + i//3, top_corner[1] + i%3)
# If an entry is set, other cells in the same 3x3 square
# can't have the same entry.
depends.append("%sx%s-is-%s 0" % (cell[0], cell[1], entry))
p1 = generate_info("%sx%s-is-%s" % (row, column, entry), 1, depends)
p0 = generate_info("%sx%s-is-%s" % (row, column, entry), 0, [])
packages.update({**p0, **p1})
return packages
# In addition to the usual rules of sudoku, we need to assert that each cell
# has an entry set. We do this by creating nine versions of a metapackage for
# each cell which each depend on an entry.
def generate_cell_metapackages():
packages = {}
for row in range(1, 10):
for column in range(1, 10):
for entry in range(1, 10):
p = generate_info("cell-%sx%s" % (row, column), entry,
["%sx%s-is-%s" % (row, column, entry)])
packages.update(p)
return packages
# Finally, we have one metapackage "sudoku" that depends on all the "cell"
# metapackages.
def generate_sudoku_metapackage():
return generate_info("sudoku", 0,
["cell-%sx%s" % (row, column)
for row in range(1, 10)
for column in range(1, 10)])
if __name__ == '__main__':
packages = {
**generate_sudoku_metapackage(),
**generate_cell_metapackages(),
**generate_cells()
}
if not os.path.isdir(platform):
os.makedirs(platform)
with open(os.path.join(platform, "repodata.json"), 'w') as f:
r = REPODATA.copy()
r["packages"] = packages
json.dump(r, f, indent=2, sort_keys=True)
print("Wrote repodata.json")
|
Python
| 0.000001
|
@@ -4151,8 +4151,65 @@
.json%22)%0A
+ print(%22Use conda -c file://%22 + os.path.abspath(%22.%22))%0A
|
cbcb89a7a3ee4884768e272bbe3435bb6e08d224
|
Add constraints for the same column and the same row
|
sudoku.py
|
sudoku.py
|
import datetime
def generate_info(name, version, depends):
return {
"{name}-{version}-0.tar.bz2".format(name=name, version=version): {
"build": "0",
"build_number": 0,
"date": datetime.date.today().strftime("%Y-%m-%d"),
"depends": depends,
"name": name,
"size": 0,
"version": str(version)
}
}
REPODATA = {
"info": {
"arch": "x86_64",
"platform": "osx"
},
"packages": {}
}
def generate_cells():
packages = {}
for row in range(1, 10):
for column in range(1, 10):
for entry in range(1, 10):
depends = []
for d in range(1, 10):
if d == entry:
continue
# Each entry being 1 requires that the other entries be 0
depends.append("%sx%s-is-%s 0" % (row, column, d))
p1 = generate_info("%sx%s-is-%s" % (row, column, entry), 1,
depends)
p0 = generate_info("%sx%s-is-%s" % (row, column, entry), 0, [])
packages.update({**p0, **p1})
return packages
print(generate_cells())
|
Python
| 0.000009
|
@@ -661,16 +661,47 @@
depends
+1 = %5B%5D%0A depends0
= %5B%5D%0A
@@ -844,17 +844,31 @@
y being
-1
+set (version 1)
require
@@ -887,21 +887,62 @@
ther
- entries be 0
+%0A # entries are not set (version 0)
%0A
@@ -965,16 +965,17 @@
depends
+1
.append(
@@ -1031,26 +1031,270 @@
-p1 = generate_info
+for other_row in range(1, 10):%0A if other_row == row:%0A continue%0A # If an entry is set, other cells in the same column can't%0A # have the same entry.%0A depends1.append
(%22%25s
@@ -1294,37 +1294,45 @@
end(%22%25sx%25s-is-%25s
+ 0
%22 %25 (
+other_
row, column, ent
@@ -1338,40 +1338,427 @@
try)
-, 1,%0A depends
+)%0A%0A for other_column in range(1, 10):%0A if other_column == column:%0A continue%0A # If an entry is set, other cells in the same row can't%0A # have the same entry.%0A depends1.append(%22%25sx%25s-is-%25s 0%22 %25 (row, other_column, entry))%0A%0A p1 = generate_info(%22%25sx%25s-is-%25s%22 %25 (row, column, entry), 1, depends1
)%0A
@@ -1831,18 +1831,24 @@
ry), 0,
-%5B%5D
+depends0
)%0A
|
2fb9e916155fce16a807c1c7eebf4a607c22ef94
|
Correct Celery support to be backwards compatible (fixes GH-124)
|
raven/contrib/celery/__init__.py
|
raven/contrib/celery/__init__.py
|
"""
raven.contrib.celery
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
try:
from celery.task import task
except ImportError:
from celery.decorators import task
from celery.signals import after_setup_logger, task_failure
from raven.base import Client
from raven.handlers.logging import SentryHandler
class CeleryMixin(object):
def send_encoded(self, message):
"Errors through celery"
self.send_raw.delay(message)
@task(routing_key='sentry')
def send_raw(self, message):
return super(CeleryMixin, self).send_encoded(message)
class CeleryClient(CeleryMixin, Client):
pass
class CeleryFilter(object):
def filter(self, record):
if record.funcName in ('_log_error',):
return 0
else:
return 1
def register_signal(client):
@task_failure.connect(weak=False)
def process_failure_signal(sender, task_id, exception, args, kwargs,
traceback, einfo, **kw):
client.captureException(
exc_info=einfo.exc_info,
extra={
'task_id': task_id,
'task': sender,
'args': args,
'kwargs': kwargs,
})
@after_setup_logger.connect(weak=False)
def process_logger_event(sender, logger, loglevel, logfile, format,
colorize, **kw):
import logging
logger = logging.getLogger()
handler = SentryHandler(client)
if handler.__class__ in map(type, logger.handlers):
return False
handler.setLevel(logging.ERROR)
handler.addFilter(CeleryFilter())
logger.addHandler(handler)
|
Python
| 0
|
@@ -918,46 +918,8 @@
t):%0A
- @task_failure.connect(weak=False)%0A
@@ -1284,41 +1284,57 @@
%7D)%0A
-%0A
-@after_setup_logger.connect(
+task_failure.connect(process_failure_signal,
weak
@@ -1341,16 +1341,17 @@
=False)%0A
+%0A
def
@@ -1762,8 +1762,73 @@
andler)%0A
+ after_setup_logger.connect(process_logger_event, weak=False)%0A
|
3f09216ed6afc8fc2173dae40c4776fef4b4d4d2
|
update docstring, clarify variable name
|
matchzoo/datapack.py
|
matchzoo/datapack.py
|
"""Matchzoo DataPack, pair-wise tuple (feature) and context as input."""
import typing
from pathlib import Path
import dill
import pandas as pd
class DataPack(object):
"""
Matchzoo DataPack data structure, store dataframe and context.
Example:
>>> features = [([1,3], [2,3]), ([3,0], [1,6])]
>>> context = {'vocab_size': 2000}
>>> dp = DataPack(data=features,
... context=context)
>>> type(dp.sample(1))
<class 'matchzoo.datapack.DataPack'>
>>> len(dp)
2
>>> features, context = dp.dataframe, dp.context
>>> context
{'vocab_size': 2000}
"""
DATA_FILENAME = 'data.dill'
def __init__(self,
data: list,
context: dict={}):
"""Initialize."""
self._dataframe = pd.DataFrame(data)
self._context = context
def __len__(self) -> int:
"""Get size of the data pack."""
return self._dataframe.shape[0]
@property
def dataframe(self):
"""Get data frame."""
return self._dataframe
@property
def context(self):
"""Get context of `DataPack`."""
return self._context
def sample(self, number, replace=True):
"""
Sample records from `DataPack` object, for generator.
:param number: number of records to be sampled, use `batch_size`.
:param replace: sample with replacement, default value is `True`.
:return data_pack: return `DataPack` object including sampled data
and context (shallow copy of the context`).
"""
return DataPack(self._dataframe.sample(n=number, replace=replace),
self._context.copy())
def append(self, new_data_pack: 'DataPack'):
"""
Append a new `DataPack` object to current `DataPack` object.
It should be noted that the context of the previous `DataPack`
will be updated by the new one.
:param new_data_pack: A new DataPack object.
:param overwite_context: Allow overwrite common context by new context.
"""
new_dataframe = new_data_pack.dataframe
new_context = new_data_pack.context
self._dataframe = self._dataframe.append(
new_dataframe,
ignore_index=True)
self.context.update(new_context)
def save(self, dirpath: typing.Union[str, Path]):
"""
Save the `DataPack` object.
A saved `DataPack` is represented as a directory with two files.
One is a `DataPack` records (transformed user input as features),
the otehr one is fitted context parameters such as `vocab_size`.
Both of them will be saved by `pickle`.
:param dirpath: directory path of the saved `DataPack`.
"""
dirpath = Path(dirpath)
if dirpath.exists():
raise FileExistsError
else:
dirpath.mkdir()
data_file_path = dirpath.joinpath(self.DATA_FILENAME)
dill.dump(self, open(data_file_path, mode='wb'))
def load_datapack(dirpath: typing.Union[str, Path]) -> DataPack:
"""
Load a `DataPack`. The reverse function of :meth:`DataPack.save`.
:param dirpath: directory path of the saved model
:return: a :class:`DataPack` instance
"""
dirpath = Path(dirpath)
data_file_path = dirpath.joinpath(DataPack.DATA_FILENAME)
dp = dill.load(open(data_file_path, 'rb'))
return dp
|
Python
| 0.000001
|
@@ -932,29 +932,46 @@
Get
-size of
+numer of rows in
the
-data p
+%60DataP
ack
+%60 object
.%22%22%22
@@ -1790,29 +1790,21 @@
d(self,
-new_data_pack
+other
: 'DataP
@@ -2023,37 +2023,28 @@
ram
-new_data_pack: A new
+other: the %60
DataPack
obj
@@ -2043,95 +2043,31 @@
Pack
+%60
object
-.%0A :param overwite_context: Allow overwrite common context by new context
+ to be appended
.%0A
@@ -2076,35 +2076,37 @@
%22%22%22%0A
-new
+other
_dataframe = new
@@ -2102,29 +2102,21 @@
frame =
-new_data_pack
+other
.datafra
@@ -2126,19 +2126,21 @@
-new
+other
_context
@@ -2146,21 +2146,13 @@
t =
-new_data_pack
+other
.con
@@ -2218,19 +2218,21 @@
-new
+other
_datafra
@@ -2294,19 +2294,21 @@
.update(
-new
+other
_context
@@ -2479,194 +2479,104 @@
ith
-two files.%0A One is a %60DataPack%60 records (transformed user input as features),%0A the otehr one is fitted context parameters such as %60vocab_size%60.%0A Both of them will be
+a %60DataPack%60%0A object (transformed user input as features and context), it will be%0A
sav
|
4582a8a77eaa3a21a50fdbf49d19142be9812cfe
|
remove print statement
|
tajima.py
|
tajima.py
|
#! /usr/bin/env python
"""
usage:
tb.py tajima [--no-header --extra] <window-size> <step-size> <vcf>
tb.py tajima [--no-header --extra] <window-size> --sliding <vcf>
options:
-h --help Show this screen.
--version Show version.
--window-size blah
--step-size blash
--extra display extra
command:
tajima Calculate Tajima's D
output:
CHROM
BIN_START
BIN_END
N_Sites
N_SNPs
TajimaD
"""
from docopt import docopt
from subprocess import call, Popen, PIPE
from itertools import combinations
from utils.vcf import *
from math import isinf
import sys
import os
debug = None
class tajima(vcf):
"""
Subclass of the vcf object
used for calculating tajima's D
"""
def __init__(self, filename):
vcf.__init__(self,filename)
def calc_tajima(self, window_size, step_size, extra = False):
# Tajima D Constants
n = self.n*2
a1 = sum([1.0/i for i in xrange(1,n)])
a2 = sum([1.0/i**2 for i in xrange(1,n)])
b1 = (n + 1.0) / (3.0*(n - 1))
b2 = (2.0 * (n**2 + n + 3.0)) / \
(9.0 * n * (n -1.0))
c1 = b1 - (1.0 / a1)
c2 = b2 - ((n + 2.0) / (a1*n)) + (a2 / (a1**2) )
e1 = c1 / a1
e2 = c2 / (a1**2 + a2)
if args["--sliding"]:
shift_method = "POS-Sliding"
step_size = None
else:
shift_method = "POS-Interval"
for variant_interval in self.window(window_size= window_size, step_size = step_size, shift_method=shift_method):
pi = 0.0
S = 0
n_sites = 0
for variant in variant_interval:
n_sites += 1
AN = variant.INFO.get("AN") # c;AN : total number of alleles in called genotypes
AC = variant.INFO.get("AC") # j;AC : allele count in genotypes, for each ALT allele, in the same order as listed
try:
# Biallelic sites only!
pi += (2.0 * AC * (AN-AC)) / (AN * (AN-1.0))
S += 1
except:
pass
try:
CHROM = variant_interval[0].CHROM
tw = (S / a1)
var = (e1 * S) + ((e2 * S) * (S - 1.0))
TajimaD = (pi - tw) / \
(var)**(0.5)
if not isinf(TajimaD) and S > 0:
output = [CHROM,
variant_interval.lower_bound,
variant_interval.upper_bound,
n_sites,
S,
TajimaD]
if extra:
if step_size is None:
step_size = "NA"
output += [os.path.split(self.filename)[1],
window_size,
step_size]
yield "\t".join(map(str,output))
except:
pass
if len(sys.argv) == 1:
debug = ["tajima","100000", "10000", "~/Dropbox/AndersenLab/wormreagents/Variation/Andersen_VCF/20150731_WI_PASS.vcf.gz"]
if __name__ == '__main__':
args = docopt(__doc__,
version='VCF-Toolbox v0.1',
argv = debug)
if args["<vcf>"] == "":
print(__doc__)
print args
wz = int(args["<window-size>"].replace(",",""))
sz = None
if not args["--sliding"]:
sz = int(args["<step-size>"].replace(",",""))
if args["--no-header"] == False:
header_line = ["CHROM",
"BIN_START",
"BIN_END",
"N_Sites",
"N_SNPs",
"TajimaD"]
if args["--extra"]:
header_line += ["filename",
"window_size",
"step_size"]
print "\t".join(header_line)
for i in tajima(args["<vcf>"]).calc_tajima(wz, sz, extra = args["--extra"]):
print(i)
|
Python
| 0.999999
|
@@ -3447,23 +3447,8 @@
__)%0A
- print args%0A
|
dcd3d67b77946922ff3b133d062798bf2f3e5786
|
fix check_clang_tidy to forbid mixing of CHECK-NOTES and CHECK-MESSAGES
|
test/clang-tidy/check_clang_tidy.py
|
test/clang-tidy/check_clang_tidy.py
|
#!/usr/bin/env python
#
#===- check_clang_tidy.py - ClangTidy Test Helper ------------*- python -*--===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
r"""
ClangTidy Test Helper
=====================
This script runs clang-tidy in fix mode and verify fixes, messages or both.
Usage:
check_clang_tidy.py [-resource-dir=<resource-dir>] \
[-assume-filename=<file-with-source-extension>] \
[-check-suffix=<file-check-suffix>] \
<source-file> <check-name> <temp-file> \
-- [optional clang-tidy arguments]
Example:
// RUN: %check_clang_tidy %s llvm-include-order %t -- -- -isystem %S/Inputs
"""
import argparse
import os
import re
import subprocess
import sys
def write_file(file_name, text):
with open(file_name, 'w') as f:
f.write(text)
f.truncate()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-expect-clang-tidy-error', action='store_true')
parser.add_argument('-resource-dir')
parser.add_argument('-assume-filename')
parser.add_argument('-check-suffix', default='')
parser.add_argument('input_file_name')
parser.add_argument('check_name')
parser.add_argument('temp_file_name')
args, extra_args = parser.parse_known_args()
resource_dir = args.resource_dir
assume_file_name = args.assume_filename
input_file_name = args.input_file_name
check_name = args.check_name
temp_file_name = args.temp_file_name
expect_clang_tidy_error = args.expect_clang_tidy_error
file_name_with_extension = assume_file_name or input_file_name
_, extension = os.path.splitext(file_name_with_extension)
if extension not in ['.c', '.hpp', '.m', '.mm']:
extension = '.cpp'
temp_file_name = temp_file_name + extension
clang_tidy_extra_args = extra_args
if len(clang_tidy_extra_args) == 0:
clang_tidy_extra_args = ['--']
if extension in ['.cpp', '.hpp', '.mm']:
clang_tidy_extra_args.append('--std=c++11')
if extension in ['.m', '.mm']:
clang_tidy_extra_args.extend(
['-fobjc-abi-version=2', '-fobjc-arc'])
if args.check_suffix and not re.match('^[A-Z0-9\-]+$', args.check_suffix):
sys.exit('Only A..Z, 0..9 and "-" are allowed in check suffix, but "%s" was given' % (args.check_suffix))
file_check_suffix = ('-' + args.check_suffix) if args.check_suffix else ''
check_fixes_prefix = 'CHECK-FIXES' + file_check_suffix
check_messages_prefix = 'CHECK-MESSAGES' + file_check_suffix
check_notes_prefix = 'CHECK-NOTES' + file_check_suffix
# Tests should not rely on STL being available, and instead provide mock
# implementations of relevant APIs.
clang_tidy_extra_args.append('-nostdinc++')
if resource_dir is not None:
clang_tidy_extra_args.append('-resource-dir=%s' % resource_dir)
with open(input_file_name, 'r') as input_file:
input_text = input_file.read()
has_check_fixes = check_fixes_prefix in input_text
has_check_messages = check_messages_prefix in input_text
has_check_notes = check_notes_prefix in input_text
if not has_check_fixes and not has_check_messages and not has_check_notes:
sys.exit('%s, %s or %s not found in the input' % (check_fixes_prefix,
check_messages_prefix, check_notes_prefix) )
# Remove the contents of the CHECK lines to avoid CHECKs matching on
# themselves. We need to keep the comments to preserve line numbers while
# avoiding empty lines which could potentially trigger formatting-related
# checks.
cleaned_test = re.sub('// *CHECK-[A-Z0-9\-]*:[^\r\n]*', '//', input_text)
write_file(temp_file_name, cleaned_test)
original_file_name = temp_file_name + ".orig"
write_file(original_file_name, cleaned_test)
args = ['clang-tidy', temp_file_name, '-fix', '--checks=-*,' + check_name] + \
clang_tidy_extra_args
if expect_clang_tidy_error:
args.insert(0, 'not')
print('Running ' + repr(args) + '...')
try:
clang_tidy_output = \
subprocess.check_output(args, stderr=subprocess.STDOUT).decode()
except subprocess.CalledProcessError as e:
print('clang-tidy failed:\n' + e.output.decode())
raise
print('------------------------ clang-tidy output -----------------------\n' +
clang_tidy_output +
'\n------------------------------------------------------------------')
try:
diff_output = subprocess.check_output(
['diff', '-u', original_file_name, temp_file_name],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
diff_output = e.output
print('------------------------------ Fixes -----------------------------\n' +
diff_output.decode() +
'\n------------------------------------------------------------------')
if has_check_fixes:
try:
subprocess.check_output(
['FileCheck', '-input-file=' + temp_file_name, input_file_name,
'-check-prefix=' + check_fixes_prefix, '-strict-whitespace'],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print('FileCheck failed:\n' + e.output.decode())
raise
if has_check_messages:
messages_file = temp_file_name + '.msg'
write_file(messages_file, clang_tidy_output)
try:
subprocess.check_output(
['FileCheck', '-input-file=' + messages_file, input_file_name,
'-check-prefix=' + check_messages_prefix,
'-implicit-check-not={{warning|error}}:'],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print('FileCheck failed:\n' + e.output.decode())
raise
if has_check_notes:
notes_file = temp_file_name + '.notes'
write_file(notes_file, clang_tidy_output)
try:
subprocess.check_output(
['FileCheck', '-input-file=' + notes_file, input_file_name,
'-check-prefix=' + check_notes_prefix,
'-implicit-check-not={{note|warning|error}}:'],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print('FileCheck failed:\n' + e.output.decode())
raise
if __name__ == '__main__':
main()
|
Python
| 0.000005
|
@@ -3386,16 +3386,139 @@
fix) )%0A%0A
+ if has_check_notes and has_check_messages:%0A sys.exit('Please use either CHECK-NOTES or CHECK-MESSAGES but not both')%0A%0A
# Remo
|
7f0617dc8eed2d7b2da6b395f64f06ec1947072f
|
Support models as constants
|
tastytools/test/resources.py
|
tastytools/test/resources.py
|
from django.db.models.fields.related import ManyToManyField, ManyRelatedObjectsDescriptor
from django.db.models.fields.related import ForeignRelatedObjectsDescriptor
from django.db import IntegrityError, DatabaseError
from django.db.utils import ConnectionDoesNotExist
from django.core.management import call_command
import sys
class Related(object):
'''Constants holder class for various types of data generation modes'''
Model = "MODEL"
Uri = "URI"
Full = "FULL"
class TestData(object):
def __init__(self, api, force=None, related=None, id=None):
self.api = api
self.force = force or {}
self.related = related
self.data = {}
self.related_data = []
self.id = id
def __getitem__(self, name):
return self.data[name]
def __setitem__(self, name, value):
self.data[name] = value
def __delitem__(self, name):
del self.data[name]
def update(self, data):
return self.data.update(data)
def to_dict(self):
return self.data
def set_related(self, obj):
for args in self.related_data:
args['force'] = {args['related_name']: obj}
del args['related_name']
self.set(**args)
def set(self, name, constant=None, resource=None, count=None,
force=False, related_name=False, id=None):
if related_name:
self.related_data.append({
'name': name,
'constant': constant,
'resource': resource,
'count': count,
'related_name': related_name,
})
return
value = None
force = force or {}
if name in self.force:
value = self.force[name]
elif resource is not None:
if count > 0:
value = []
while count > 0:
res = self.create_test_data(resource,
related=self.related, force=force, id=id)
value.append(res)
count -= 1
else:
value = self.create_test_data(resource,
related=self.related, force=force, id=id)
#elif constant is not None:
else:
value = constant
#else:
# raise Exception("Expected resource or constant")
self.data[name] = value
return value
def create_test_data(self, resource_name, related=Related.Model,
force=False, id=None):
force = force or {}
resource = self.api.resource(resource_name)
#resource.start_test_session(self.test_session)
(uri, res) = resource.create_test_resource(force, id=id)
if related == Related.Uri:
return uri
elif related == Related.Model:
return res
elif related == Related.Full:
return self.api.dehydrate(resource=resource_name, obj=res)
raise Exception("Missing desired related type. Given: %s" % related)
class ResourceTestData(object):
test_session = None
def __init__(self, api, resource=None, db=None):
'''Constructor - requires the resource name or class to be registered
on the given api.'''
if resource is None:
resource = self.resource
if resource is None:
msg = "ResourceTestData initialized without a resource. "\
"Did you forget to override the constructor?"
raise Exception(msg)
self.api = api
if type(resource) is str:
resource = self.api.resource(resource)
self.resource = resource
self.db = db
@property
def post(self):
'''Returns sample POST data for the resource.'''
return self.sample_data(related=Related.Uri).data
@property
def get(self):
'''Returns sample GET data for the resource.'''
(location, model) = self.create_test_resource()
return self.api.dehydrate(resource=self.resource, obj=model)
def create_test_resource(self, force={}, *args, **kwargs):
'''Creates a test resource and obtains it's URI
and related object'''
model = self.create_test_model(force=force, *args, **kwargs)
bundle = self.resource.build_bundle(obj=model)
location = self.resource.get_resource_uri(bundle)
return location, bundle.obj
def save_test_obj(self, model):
if self.db is not None:
databases = [self.db]
else:
databases = ['tastytools', 'test', '']
for db in databases:
try:
model.save(using=db)
except ConnectionDoesNotExist:
continue
if model.pk is None:
raise ConnectionDoesNotExist("Tried: %s" % ', '.join(databases))
def create_test_model(self, data=False, force=False, id=None, *args,
**kwargs):
'''Creates a test model (or object asociated with the resource and
returns it
'''
force = force or {}
data = data or self.sample_data(related=Related.Model, force=force,
id=id)
model_class = self.resource._meta.object_class
valid_data = {}
m2m = {}
class_fields = model_class._meta.get_all_field_names()
for field in class_fields:
try:
valid_data[field] = data[field]
try:
field_obj = model_class._meta.get_field(field)
is_m2m = isinstance(field_obj, ManyToManyField)
except Exception:
field_obj = getattr(model_class, field)
is_m2m = isinstance(field_obj,
ForeignRelatedObjectsDescriptor)
is_m2m = is_m2m or isinstance(field_obj, ManyRelatedObjectsDescriptor)
if is_m2m:
m2m[field] = data[field]
del valid_data[field]
except KeyError:
pass
model = model_class(**valid_data)
try:
# if we are running tests, use the default database
if 'test' in sys.argv:
databases = ['']
elif self.db is not None:
databases = [self.db]
else:
databases = ['tastytools', 'test', '']
for db in databases:
try:
model.save(using=db)
except ConnectionDoesNotExist:
continue
except DatabaseError:
try:
call_command('syncdb', migrate=True, database=db, interactive=False)
model.save(using=db)
except ConnectionDoesNotExist:
continue
except IntegrityError as e:
if id is not None:
model = model_class.objects.get(**valid_data)
#print "Got %s %s" % (model_class.__name__, id)
else:
raise e
#print model
for m2m_field, values in m2m.items():
if type(values) is not list:
values = [values]
for value in values:
getattr(model, m2m_field).add(value)
data.set_related(model)
return model
#@property
def sample_data(self, related=Related.Model, force=False, id=None):
'''Returns the full a full set of data as an _meta.testdata for
interacting with the resource
'''
data = TestData(self.api, force, related, id=id)
return self.get_data(data)
def get_data(self, data):
return data
|
Python
| 0
|
@@ -1997,34 +1997,50 @@
rce=force, id=id
+, model=constant
)%0A
-
@@ -2222,24 +2222,40 @@
force, id=id
+, model=constant
)%0A #e
@@ -2545,32 +2545,44 @@
e=False, id=None
+, model=None
):%0A force
@@ -2706,16 +2706,139 @@
ssion)%0A%0A
+ if model is not None:%0A uri = resource.get_resource_uri(model)%0A res = model%0A else:%0A
|
8fb149400a115fd0abf595c6716aed22c396eb86
|
remove call to curCycle in panic() The panic() function already prints the current tick value. This call to curCycle() is as such redundant. Since we are trying to move towards multiple clock domains, this call will print misleading time.
|
src/mem/slicc/ast/AST.py
|
src/mem/slicc/ast/AST.py
|
# Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.util import PairContainer, Location
class AST(PairContainer):
def __init__(self, slicc, pairs=None):
self.slicc = slicc
self.location = slicc.currentLocation()
self.pairs = {}
if pairs:
self.pairs.update(getattr(pairs, "pairs", pairs))
@property
def symtab(self):
return self.slicc.symtab
@property
def state_machine(self):
return self.slicc.symtab.state_machine
def warning(self, message, *args):
self.location.warning(message, *args)
def error(self, message, *args):
self.location.error(message, *args)
def embedError(self, message, *args):
if args:
message = message % args
code = self.slicc.codeFormatter()
code('''
panic("Runtime Error at ${{self.location}}, Ruby Time: %d, %s.\\n",
curCycle(), $message);
''')
return code
|
Python
| 0
|
@@ -2423,48 +2423,17 @@
on%7D%7D
-, Ruby Time: %25d, %25s.%5C%5Cn%22,%0A curCycle()
+: %25s.%5C%5Cn%22
, $m
|
80fafd59340bc749967880d04e429d5c077db34b
|
Add an another lazy if
|
main.py
|
main.py
|
#!/usr/bin/python3
#
# The MIT License (MIT)
#
# Copyright (c) 2013 Andrian Nord
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import sys
import ljd.rawdump.parser
import ljd.pseudoasm.writer
import ljd.ast.builder
import ljd.ast.validator
import ljd.ast.locals
import ljd.ast.slotworks
import ljd.ast.unwarper
import ljd.ast.mutator
import ljd.lua.writer
def dump(name, obj, level=0):
indent = level * '\t'
if name is not None:
prefix = indent + name + " = "
else:
prefix = indent
if isinstance(obj, (int, float, str)):
print(prefix + str(obj))
elif isinstance(obj, list):
print (prefix + "[")
for value in obj:
dump(None, value, level + 1)
print (indent + "]")
elif isinstance(obj, dict):
print (prefix + "{")
for key, value in obj.items():
dump(key, value, level + 1)
print (indent + "}")
else:
print (prefix + obj.__class__.__name__)
for key in dir(obj):
if key.startswith("__"):
continue
val = getattr(obj, key)
dump(key, val, level + 1)
def main():
file_in = sys.argv[1]
header, prototype = ljd.rawdump.parser.parse(file_in)
if not prototype:
return 1
# TODO: args
# ljd.pseudoasm.writer.write(sys.stdout, header, prototype)
ast = ljd.ast.builder.build(prototype)
assert ast is not None
ljd.ast.validator.validate(ast, warped=True)
ljd.ast.mutator.pre_pass(ast)
# ljd.ast.validator.validate(ast, warped=True)
ljd.ast.locals.mark_locals(ast)
# ljd.ast.validator.validate(ast, warped=True)
ljd.ast.slotworks.eliminate_temporary(ast)
# ljd.ast.validator.validate(ast, warped=True)
if True:
ljd.ast.unwarper.unwarp(ast)
# ljd.ast.validator.validate(ast, warped=False)
ljd.ast.locals.mark_local_definitions(ast)
# ljd.ast.validator.validate(ast, warped=False)
ljd.ast.mutator.primary_pass(ast)
ljd.ast.validator.validate(ast, warped=False)
ljd.lua.writer.write(sys.stdout, ast)
return 0
if __name__ == "__main__":
retval = main()
sys.exit(retval)
# vim: ts=8 noexpandtab nosmarttab softtabstop=8 shiftwidth=8
|
Python
| 0.000182
|
@@ -2666,32 +2666,44 @@
warped=False)%0A%0A
+%09%09if True:%0A%09
%09%09ljd.ast.locals
@@ -2726,32 +2726,33 @@
nitions(ast)%0A%0A%09%09
+%09
# ljd.ast.valida
@@ -2782,24 +2782,25 @@
d=False)%0A%0A%09%09
+%09
ljd.ast.muta
@@ -2822,16 +2822,17 @@
s(ast)%0A%0A
+%09
%09%09ljd.as
|
a69cdac9b5a0cc0a6bf97b3ff7bac5da874ca63e
|
bump ec2 api version
|
main.py
|
main.py
|
import urllib
import httplib
from amazon.query import AmazonQuery
if __name__ == '__main__':
key_id = 'AKIAIEXAMPLEEXAMPLE6'
secret = 'example+example+example+example+example7'
endpoint = 'http://ec2.us-west-1.amazonaws.com'
question = { 'Version': '2012-03-01', 'Action': 'DescribeInstances' }
query = AmazonQuery(endpoint, key_id, secret, question)
# httplib.debuglevel = 1
# httplib.HTTPConnection.debuglevel = 1
print urllib.FancyURLopener().open(endpoint, query.signed_parameters).read()
#
# other questions to try ...
#
# endpoint = 'https://cloudformation.us-west-1.amazonaws.com'
# question = { 'Version': '2010-05-15', 'Action': 'ListStacks' }
# endpoint = 'http://monitoring.us-west-1.amazonaws.com'
# question = { 'Version': '2010-08-01', 'Action': 'ListMetrics' }
# endpoint = 'http://ec2.us-west-1.amazonaws.com'
# question = { 'Version': '2012-03-01', 'Action': 'DescribeInstances' }
# question = { 'Version': '2012-03-01', 'Action': 'DescribeReservedInstancesOfferings' }
# question = { 'Version': '2012-03-01', 'Action': 'DescribeAvailabilityZones' }
# endpoint = 'http://elasticmapreduce.us-west-1.amazonaws.com'
# question = { 'Version': '2009-03-31', 'Action': 'DescribeJobFlows' }
# endpoint = 'http://ec2.us-east-1.amazonaws.com'
# question = { 'Version': '2012-10-01', 'Action': 'DescribeSpotPriceHistory' }
# endpoint = 'https://elasticache.us-west-1.amazonaws.com'
# question = { 'Version': '2011-07-15', 'Action': 'DescribeCacheClusters' }
# endpoint = 'https://rds.us-west-1.amazonaws.com'
# question = { 'Version': '2012-01-15', 'Action': 'DescribeDBEngineVersions' }
# endpoint = 'http://sns.us-west-1.amazonaws.com'
# question = { 'Version': '2010-03-31', 'Action': 'ListTopics' }
# endpoint = 'http://sqs.us-west-1.amazonaws.com'
# question = { 'Version': '2011-10-01', 'Action': 'ListQueues' }
# endpoint = 'http://sdb.us-west-1.amazonaws.com'
# question = { 'Version': '2009-04-15', 'Action': 'ListDomains' }
# endpoint = 'http://s3-us-west-1.amazonaws.com'
# question = { 'Version': '2006-03-01', 'Action': 'ListAllMyBuckets' }
# endpoint = 'https://elasticbeanstalk.us-east-1.amazonaws.com'
# question = { 'Version': '2010-12-01', 'Action': 'ListAvailableSolutionStacks' }
# endpoint = 'https://iam.amazonaws.com'
# question = { 'Version': '2010-05-08', 'Action': 'ListUsers' }
# endpoint = 'http://autoscaling.us-west-1.amazonaws.com'
# question = { 'Version': '2011-01-01', 'Action': 'DescribeTags' }
# endpoint = 'https://importexport.amazonaws.com'
# question = { 'Version': '2010-06-01', 'Action': 'ListJobs' }
# endpoint = 'https://sts.amazonaws.com'
# question = { 'Version': '2011-06-15', 'Action': 'GetSessionToken' }
# endpoint = 'https://iam.amazonaws.com'
# question = { 'Version': '2010-05-08', 'Action': 'GetUser' }
# question = { 'Version': '2010-05-08', 'Action': 'ListUserPolicies', 'UserName': 'test_iam' }
# question = { 'Version': '2010-05-08', 'Action': 'GetUserPolicy', 'UserName': 'test_iam', 'PolicyName': 'ReadOnlyAccess-test_iam-201203291722' }
# endpoint = 'http://elasticloadbalancing.us-west-1.amazonaws.com'
# question = { 'Version': '2011-11-15', 'Action': 'DescribeLoadBalancers' }
|
Python
| 0
|
@@ -261,36 +261,36 @@
'Version': '201
-2-03
+4-02
-01', 'Action':
|
09af9f28079685ce9360381708a7b0529ef77f3f
|
Rename file of generated images
|
main.py
|
main.py
|
import os
import numpy as np
import scipy
import colorlog as log
import logging
from utils.model_monitor import ModelMonitor
from network.pixel_rnn import PixelRNN
from utils.visualization import save_network_graph, dynamic_image, save_grayscale_images_grid
log.basicConfig(level=logging.DEBUG)
def train_model(model, model_monitor, X, y, X_valid, y_valid, batch_size):
minibatch_count = X.shape[0] // batch_size
val_minibatch_count = X_valid.shape[0] // batch_size
best_loss = 100
try:
for e in range(0, 10):
for i in range(0, minibatch_count):
y_next = y[i * batch_size:(i + 1) * batch_size]
X_next = X[i * batch_size:(i + 1) * batch_size]
loss, train_image = model.train_pass(X_next, y_next)
if i % 100 == 0:
log.info("epoch {} minibatch {} loss: {}".format(e, i, loss))
val_losses = list()
for j in range(0, val_minibatch_count):
y_val_next = y_valid[j * batch_size:(j + 1) * batch_size]
x_val_next = X_valid[j * batch_size:(j + 1) * batch_size]
val_loss, _ = model.validation_pass(x_val_next, y_val_next)
val_losses.append(val_loss)
mean_val_loss = np.array(val_losses).mean()
log.info("validation: epoch {}, iteration {}, loss: {}".format(e, i, mean_val_loss))
if mean_val_loss < best_loss:
best_loss = mean_val_loss
model_monitor.save_model()
except KeyboardInterrupt:
log.info("Training was interrupted. Proceeding with image generation.")
def generate_images(model, model_monitor):
"""
Samples a grid of images from the model
:param model: e.g. PixelRNN
:param model_monitor: the monitor to load the model weights with
"""
model_monitor.load_model(model_name="best_params.npz", network=model.network)
images = np.zeros((100, 1, 28, 28), dtype=np.float32)
for row_i in range(0, height):
for col_i in range(0, width):
for chan_i in range(0, input_channels):
new_images = sample(model.test_pass(images))
# copy one generated pixel of one channel, then use it for the next generation
images[:, chan_i, row_i, col_i] = new_images[:, chan_i, row_i, col_i]
save_grayscale_images_grid(images=images, image_size=(28, 28), grid_size=(10, 10),
filepath=os.path.join(os.path.dirname(__file__), "data/generated/images.jpg"))
log.info("Images generated under data/generated :) ")
def test_model(model, model_monitor, X_test):
"""
Dynamically completes the lower half an image pixel by pixel, showing the generation in a window.
:param model: e.g. PixelRNN
:param model_monitor: model monitor to load the model weights with
:param X_test: an array of images, the lower half of which will be completed
"""
model_monitor.load_model(model_name="best_params.npz", network=model.network)
def image_gen():
for i in range(0, 100):
image = X_test[[i], :, :, :]
image[:, :, height // 2:, :] = 0.5
# show the image and keep refreshing it
for row_i in range(height // 2, height):
for col_i in range(0, width):
for chan_i in range(0, input_channels):
next = sample(model.test_pass(image))
image[:, chan_i, row_i, col_i] = next[:, chan_i, row_i, col_i]
img = scipy.misc.toimage(image[0, 0], cmin=0.0, cmax=1.0)
yield img
dynamic_image(init_img=scipy.misc.toimage(X_test[0, 0], cmin=0.0, cmax=1.0), image_generator=image_gen)
if __name__ == "__main__":
# CONSTANTS
batch_size = 16
h = 64
height = width = 28
input_channels = 1
# INSTANTIATE MODEL
pixel_rnn = PixelRNN(batch_size=batch_size, image_shape=(input_channels, height, width), n_hidden=h)
save_network_graph(pixel_rnn.network, os.path.join(os.path.dirname(__file__), "data/network_graph.png"))
model_monitor = ModelMonitor(outputs=pixel_rnn.network, name=pixel_rnn.get_name())
# DATA PREP
from mnist import load_data, sample
data = load_data()
x_train, x_valid, x_test = data['x_train'], data['x_valid'], data['x_test']
y = x_train
X = np.array(x_train, dtype=np.float32)
y_valid = x_valid[:1000]
X_valid = np.array(x_valid[:1000], dtype=np.float32)
y_test = x_test
X_test = np.array(x_test, dtype=np.float32)
# USE MODEL
# train_model(model=pixel_rnn, model_monitor=model_monitor, X=X, y=y, X_valid=X_valid, y_valid=y_valid,
# batch_size=batch_size)
test_model(model=pixel_rnn, model_monitor=model_monitor, X_test=X_test)
# generate_images(model=pixel_rnn, model_monitor=model_monitor)
|
Python
| 0
|
@@ -3,16 +3,73 @@
port os%0A
+%0Aos.environ%5B%22THEANO_FLAGS%22%5D = %22device=gpu0,lib.cnmem=1%22%0A%0A
import n
@@ -2680,22 +2680,25 @@
nerated/
-images
+generated
.jpg%22))%0A
@@ -4913,16 +4913,18 @@
ize)%0A
+ #
test_mo
@@ -4991,18 +4991,16 @@
est)%0A
- #
generat
|
723abaf9bb1ad6d0b8c67e06522bb1d87f3ab82d
|
Fix broken test, handle terminate on the REQUEST
|
test/test_listallobjects_handler.py
|
test/test_listallobjects_handler.py
|
from handler_fixture import StationHandlerTestCase
from groundstation.transfer.request_handlers import handle_listallobjects
from groundstation.transfer.response_handlers import handle_terminate
import groundstation.transfer.response as response
from groundstation.proto.object_list_pb2 import ObjectList
class TestHandlerListAllObjects(StationHandlerTestCase):
def test_handle_listallobjects_returns_stream_for_few_objects(self):
# Make ourselves cached
self.station.station.mark_queried(self.station.origin)
oids = list()
for i in xrange(64):
oids.append(self.station.station.write("test_%i" % (i)))
handle_listallobjects(self.station)
resp = self.station.stream.pop()
self.assertIsInstance(resp, response.Response)
objects = ObjectList()
objects.ParseFromString(resp.payload)
self.assertEqual(len(objects.objectname), len(oids))
for i in objects.objectname:
self.assertIn(i, oids)
def test_follows_up_on_channels(self):
self.station.set_real_terminate(True)
self.station.set_real_id(True)
self.station.set_real_register(True)
handle_listallobjects(self.station)
req1 = self.station.stream.pop(0)
self.assertEqual(req1.verb, "LISTALLOBJECTS")
while self.station.stream:
resp = self.station.stream.pop(0)
if resp.verb == "TERMINATE":
break
self.assertEqual(resp.verb, "DESCRIBEOBJECTS")
self.assertEqual(len(self.station.stream), 0)
resp.stream = self.station.stream
handle_terminate(resp)
req2 = self.station.stream.pop(0)
self.assertEqual(req2.verb, "LISTALLCHANNELS")
class TestHandlerListAllObjectsCached(StationHandlerTestCase):
def test_has_cache(self):
handle_listallobjects(self.station)
req1 = self.station.stream.pop(0)
self.assertEqual(req1.verb, "LISTALLOBJECTS")
while self.station.stream:
resp = self.station.stream.pop()
self.assertEqual(resp.verb, "DESCRIBEOBJECTS")
handle_listallobjects(self.station)
resp = self.station.stream.pop(0)
self.assertIsInstance(resp, response.Response)
|
Python
| 0
|
@@ -1643,18 +1643,18 @@
inate(re
-sp
+q1
)%0A%0A
|
4c5550420b8a9f1bf88f4329952f6e2a161cd20f
|
Fix test on kaos with latest qt5
|
test/test_panels/test_navigation.py
|
test/test_panels/test_navigation.py
|
from pyqode.core.api import TextHelper
from pyqode.qt.QtTest import QTest
def test_toggle_button(editor):
editor.file.open('test/files/example.json')
editor.show()
TextHelper(editor).goto_line(6)
QTest.qWait(500)
panel = editor.panels.get('NavigationPanel')
assert len(panel._widgets) == 4
assert panel._widgets[1].text() == 'window'
panel._widgets[1].toggled.emit(True)
QTest.qWait(500)
assert TextHelper(editor).cursor_position()[0] == 3
|
Python
| 0
|
@@ -344,16 +344,41 @@
%5D.text()
+.replace('&', '').lower()
== 'win
|
07a9f573ad0872c9b8a6520a93cdf78db3336305
|
fix timeout typo
|
main.py
|
main.py
|
#!/usr/bin/python2
from multiprocessing import Process, Queue
import sys
import os
import time
import shlex
import cmd
import logging
from datetime import datetime
import RPi.GPIO as GPIO
from stepper_controller import MotorController
# stepper sequence. motor dependent
# SEQ = [(1,0,0,0),
# (0,1,0,0),
# (1,1,0,0),
# (0,0,1,0),
# (1,0,1,0),
# (0,1,1,0),
# (1,1,1,0),
# (0,0,0,1)]
# for Berg's particular stepper
SEQ = [(1,0,0,1),
(1,0,0,0),
(1,1,0,0),
(0,1,0,0),
(0,1,1,0),
(0,0,1,0),
(0,0,1,1),
(0,0,0,1)]
# define pins and initialize low
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
XPINS = [13,15,16,18]
YPINS = [37,33,31,29]
ZPINS = [32, 36, 38, 40]
GPIO.setup(XPINS + YPINS + ZPINS,
GPIO.OUT,
initial=GPIO.LOW)
# command interpreter class
class Hello(cmd.Cmd):
""" simple command processor """
# def __init__(self):
# cmd.Cmd.__init__(self)
# printed at start of lopo
intro = """
Usage:
List available controllers with 'list'
Select a controller with 'use', then send commands.
? for help
CTRL-D or EOF to exit
"""
# def preloop(self):
# """stuff done *before* command loops starts """
# pass
# def postcmd(self, stop, line):
# """ stuff done after *each* command """
# pass
def do_greet(self, line):
print( 'hello ' + line)
def do_EOF(self, line):
"""Exit program"""
logging.debug("do EOF")
print()
return True # to exit the command interpreter loop
def do_fwd(self, args):
"""Go forward N steps"""
global current
current.send('step ' + args)
print(status_que.get(timout=10))
def do_mov(self, args):
""" move X Y Z steps """
"""
for now, we assume controllers are numbered 0, 1, 2
"""
logging.debug("do mov: " + args)
try:
(x_steps, y_steps, z_steps) = [ i for i in shlex.split(args)]
controls[0].send('step ' + x_steps)
controls[1].send('step ' + y_steps)
controls[2].send('step ' + z_steps)
# wait for returns
print(status_que.get(timeout=10))
print(status_que.get(timeout=10))
print(status_que.get(timeout=10))
return False
except Exception as ex:
logging.debug(ex)
def do_rev(self, args):
"""NOT IMPLEMENTED"""
print( ' NOT IMPL rev ' + args)
def do_list(self, args):
""" list available controllers"""
li = [n.name for n in controls]
for i in range(len(li)):
print(i, li[i])
def do_use(self, arg):
""" Use controller N from list"""
global current
try:
val = int(arg)
if 0 <= val < len(controls):
current = controls[val]
prompt.prompt = ('%s > ' ) % current.name
print('using controller %s' % current.name)
else:
print('bad value for %d' % val)
except ValueError:
print('bad arg for use')
pass
def do_current(self, line):
"""Show name of current controller"""
global current
print('current: ', current.name)
def do_quit(self, line):
"""Send 'quit' to current controller"""
global current
current.send('quit')
def do_file(self, line):
""" Read commands from a file instead of keyboard"""
global current
parsed = shlex.split(line)
#save state
old_use_rawinput = self.use_rawinput
old_prompt = self.prompt
self.useraw_input = False
self.prompt = ""
try:
name = parsed[0]
print('== executing from: %s' % name)
with open(name, 'rt') as fi:
lines = [l.strip() for l in fi.readlines()]
# for li in lines:
# self.onecmd(li) # execute single command
# stuff contents of file into command loop
self.cmdqueue = lines
except Exception as ex:
print(ex)
finally:
# restore state
self.lastcmd = ""
self.use_rawinput = old_use_rawinput
self.prompt = old_prompt
#print('== done: %s' % name)
#
# main line
#
if __name__ == '__main__':
try:
# set up logging
logging.basicConfig(filename='stepper.log',
level = logging.DEBUG)
logging.info("Start: %s", datetime.now() )
# process id
logging.info('main pid: %d ',os.getpid())
# return messages from controllers
status_que = Queue()
# create controllers and add to our list of controls
# each control has its own msg queue
controls = []
stepx = MotorController( 'stepx', Queue(), status_que, SEQ, XPINS)
stepy = MotorController( 'stepy', Queue(), status_que, SEQ, YPINS)
stepz = MotorController( 'stepz', Queue(), status_que, SEQ, ZPINS)
controls.append(stepx)
controls.append(stepy)
controls.append(stepz)
# log controller pids
for con in controls:
logging.info('%s pid: %d', con.name, con.proc.pid)
current = controls[0] # default to first in list
## send some commands
# stepx.send('step 500')
# stepy.send('step 600')
# stepz.send('step 700')
# start command interpreter
prompt = Hello()
# put current controller name in prompt
prompt.prompt = "%s > " % current.name
prompt.cmdloop()
# time to quit
except KeyboardInterrupt as ex:
print('Caught exception: %s' % ex)
finally:
logging.info('cleanup')
# stop controller sub process
for con in controls:
con.send('quit')
for con in controls:
con.proc.join()
GPIO.cleanup()
logging.info('===== done ====')
|
Python
| 0.000001
|
@@ -1768,16 +1768,17 @@
.get(tim
+e
out=10))
|
6090a569810a3ebcac5600569b70a4239f99bc27
|
Fix NameError in SubmitCommentHandler
|
main.py
|
main.py
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
from os import path
from google.appengine.api import users
from google.appengine.ext import ndb
import jinja2
import webapp2
import models
template_dir = path.join(path.dirname(__file__), 'templates')
jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir), autoescape=True)
class IndexHandler(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
email = user.email()
auth_url = users.create_logout_url(dest_url='/')
else:
email = None
auth_url = users.create_login_url(dest_url='/')
self.response.write(jinja_environment.get_template('index.html').render(
email=email,
is_admin=users.is_current_user_admin(),
auth_url=auth_url,
posts=models.Post.query().order(-models.Post.posted_at).fetch()))
class ViewPostHandler(webapp2.RequestHandler):
def get(self):
post_id = self.request.get('id')
post_key = ndb.Key(urlsafe=post_id)
post = post_key.get()
if post:
comments = (models.Comment.query(models.Comment.post == post_key)
.order(models.Comment.posted_at).fetch())
if users.get_current_user():
sign_in_url = None
else:
sign_in_url = users.create_login_url(
dest_url=('/view-post?id=' + post_id))
self.response.write(
jinja_environment.get_template('view_post.html').render(
post=post, comments=comments, sign_in_url=sign_in_url))
else:
webapp2.abort(404)
class NewPostHandler(webapp2.RequestHandler):
def get(self):
if users.is_current_user_admin():
self.response.write(
jinja_environment.get_template('new_post.html').render())
else:
webapp2.abort(403)
class SubmitPostHandler(webapp2.RequestHandler):
def post(self):
if users.is_current_user_admin():
title = self.request.get('title')
content = self.request.get('content')
if title and content:
post_key = models.Post(title=title, content=content).put()
return webapp2.redirect('/view-post?id=' + post_key.urlsafe())
else:
webapp2.abort(400)
else:
webapp2.abort(403)
class SubmitCommentHandler(webapp2.RequestHandler):
def post(self):
user = users.get_current_user()
if user:
post_key = ndb.Key(urlsafe=self.request.get('post_id'))
content = self.request.get('content')
if post_key.get() and content:
comment = models.Comment(
post=post_key, author_email=user.email(), content=content)
comment.put()
if ('application/json' in
self.request.headers.get('Accept', '').lower()):
self.response.write(json.dumps({'email': author.email}))
else:
return webapp2.redirect(
'/view-post?id=' + post_key.urlsafe())
else:
webapp2.abort(400)
else:
webapp2.abort(403)
class ClearCommentsHandler(webapp2.RequestHandler):
def get(self):
if (users.is_current_user_admin() or
self.request.headers.get('X-Appengine-Cron') == 'true'):
for comment_key in models.Comment.query().iter(keys_only=True):
comment_key.delete()
self.response.set_status(204)
else:
webapp2.abort(403)
def handle_400(request, response, exception):
response.set_status(400)
response.write(jinja_environment.get_template('400.html').render())
def handle_403(request, response, exception):
response.set_status(403)
response.write(jinja_environment.get_template('403.html').render())
def handle_404(request, response, exception):
response.set_status(404)
response.write(jinja_environment.get_template('404.html').render())
def handle_500(request, response, exception):
logging.exception(exception)
response.set_status(500)
response.write(jinja_environment.get_template('500.html').render())
production = os.getenv('SERVER_SOFTWARE', '').startswith('Google App Engine/')
app = webapp2.WSGIApplication(
routes=[
('/', IndexHandler),
('/view-post', ViewPostHandler),
('/new-post', NewPostHandler),
('/submit-post', SubmitPostHandler),
('/submit-comment', SubmitCommentHandler),
('/clear-comments', ClearCommentsHandler),
],
debug=(not production))
app.error_handlers[400] = handle_400
app.error_handlers[403] = handle_403
app.error_handlers[404] = handle_404
if production:
app.error_handlers[500] = handle_500
|
Python
| 0.000001
|
@@ -3654,20 +3654,20 @@
l':
-autho
+use
r.email
+()
%7D))%0A
|
6878860d8b8d3377960a8310b6b733a4cbc30959
|
use environment variable
|
main.py
|
main.py
|
import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
if __name__ == "__main__":
application = tornado.web.Application([
(r"/", MainHandler),
])
application.listen(80)
tornado.ioloop.IOLoop.current().start()
|
Python
| 0.000008
|
@@ -1,20 +1,30 @@
+import os%0A
import tornado.ioloo
@@ -285,10 +285,26 @@
ten(
-80
+os.environ%5B'PORT'%5D
)%0A
|
4962705b6876592657f51fa6546d75404a460010
|
bump version
|
main.py
|
main.py
|
#!/usr/bin/env python
# coding: utf-8
__version__ = "0.5.1"
import kivy
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.clock import Clock
from kivy.core.window import Window
try :
from jnius import autoclass
from android.runnable import run_on_ui_thread
except ImportError, e :
pass
from time import sleep
import base64
import re
import os
import urllib2
import sys
import threading
import codecs
print "Starting up."
from params import parameters
from mica.mica import go, second_splash
from mica.common import pre_init_localization
cwd = re.compile(".*\/").search(os.path.realpath(__file__)).group(0)
sys.path = [cwd, cwd + "mica/"] + sys.path
WebView = autoclass('android.webkit.WebView')
WebViewClient = autoclass('android.webkit.WebViewClient')
activity = autoclass('org.renpy.android.PythonActivity').mActivity
String = autoclass('java.lang.String')
MLog = autoclass("org.renpy.android.MLog")
CouchBase = autoclass("org.renpy.android.Couch")
MobileInternet = autoclass("org.renpy.android.Internet")
print("Loading mica services")
log = MLog(activity)
mobile_internet = MobileInternet(activity)
log.debug(String("Loading certificate file for couch"))
fh = codecs.open(parameters["cert"], 'r', "utf-8")
cert = fh.read()
fh.close()
log.debug(String("Starting couchbase"))
couch = CouchBase(String(parameters["local_username"]), String(parameters["local_password"]), parameters["local_port"], String(cert), activity)
pre_init_localization(couch.get_language(), log)
port = couch.start(String(parameters["local_database"]))
if port == -1 :
log.error(String("AAAHHHHHH. FAILURE."))
log.debug(String("Trying to start replication"))
parameters["couch"] = couch
parameters["mobileinternet"] = mobile_internet
parameters["duplicate_logger"] = log
class Wv(Widget):
def __init__(self, **kwargs):
log.debug(String("Initializing webview widget"))
super(Wv, self).__init__(**kwargs)
Clock.schedule_once(self.create_webview, 0)
log.debug(String("first clock scheduled"))
self.wu = False
Window.bind(on_keyboard=self.disable_back_button_death)
def disable_back_button_death(self,window,key,*largs) :
if key == 27 :
log.debug(String("back button death is stupid. not doing it."))
return True
log.debug(String("Ignoring other buttons: " + str(key)))
@run_on_ui_thread
def go(self, *args) :
#log.debug(String("polling twisted"))
try:
urllib2.urlopen('http://localhost:10000/serve/favicon.ico')
self.webview.loadUrl('http://localhost:10000/')
log.debug(String("Storing webview for web updates"))
couch.setWebView(self.webview)
log.debug(String("webview stored initialized"))
#self.webview.setInitialScale(180);
return
except urllib2.HTTPError, e:
#log.warn(String(str(e.code)))
pass
except urllib2.URLError, e:
#log.warn(String(str(e.args)))
pass
Clock.schedule_once(self.go, 1)
@run_on_ui_thread
def account(self) :
log.debug(String("Loading account settings."))
self.webview.loadUrl('http://localhost:10000/account')
@run_on_ui_thread
def create_webview(self, *args):
log.debug(String("creating webview"))
self.webview = WebView(activity)
self.webview.clearCache(True);
#self.webview.clearFormData();
#self.webview.clearHistory();
settings = self.webview.getSettings()
settings.setDefaultTextEncodingName("utf-8")
settings.setJavaScriptEnabled(True)
settings.setBuiltInZoomControls(True)
settings.setAllowUniversalAccessFromFileURLs(True)
#settings.setCacheMode(settings.LOAD_NO_CACHE);
log.debug(String("setting webview client"))
self.webview.setWebViewClient(WebViewClient());
#WebView.setWebContentsDebuggingEnabled(True);
log.debug(String("setting content view"))
activity.setContentView(self.webview)
self.webview.loadData(String(second_splash()), "text/html; charset=utf-8", "utf-8");
Clock.schedule_once(self.go, 5)
def background() :
log.debug(String("Entering MICA thread"))
go(parameters)
while True:
log.error(String("Uh oh. Problem in MICA. May need to restart application."))
sleep(1)
class ReaderApp(App):
def open_settings(self):
log.debug(String("Menu button pressed."))
self.mwv.account()
def build(self):
log.debug(String("Starting MICA thread."))
self.t = threading.Thread(target = background)
self.t.daemon = True
self.t.start()
log.debug(String("Started. Returning webview object"))
self.mwv = Wv()
return self.mwv
def on_pause(self):
log.debug(String("MICA is pausing. Don't know what to do about that yet."))
return True
if __name__ == '__main__':
ReaderApp().run()
|
Python
| 0
|
@@ -50,17 +50,17 @@
= %220.5.
-1
+2
%22%0Aimport
|
119893b1eca46cc8889fd2c299e656c659eb1a84
|
Update amazon endpoint in code.
|
main.py
|
main.py
|
#! /usr/bin/env python
from PyMata.pymata import PyMata
import subprocess
import os
import random
import time
import alsaaudio
import wave
import random
from creds import *
import requests
import json
import re
from memcache import Client
# Import SDK packages
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient
import json
myMQTTClient = AWSIoTMQTTClient("AWS_IOT_ARN")
myMQTTClient.configureEndpoint("REST_API_ENDPOINT", 8883)
myMQTTClient.configureCredentials("root-CA.crt", "NUC-Gateway.private.key", "NUC-Gateway.cert.pem")
myMQTTClient.configureOfflinePublishQueueing(-1) # Infinite offline Publish queueing
myMQTTClient.configureDrainingFrequency(2) # Draining: 2 Hz
myMQTTClient.configureConnectDisconnectTimeout(10) # 10 sec
myMQTTClient.configureMQTTOperationTimeout(5) # 5 sec
myMQTTClient.connect()
Button = 8
LED_Status = 3
LED_Record = 4
blue_led = 5;
red_led = 6;
board = PyMata("/dev/ttyACM0", verbose=True)
board.set_pin_mode(LED_Status, board.OUTPUT, board.DIGITAL)
board.set_pin_mode(LED_Record, board.OUTPUT, board.DIGITAL)
board.set_pin_mode(Button, board.INPUT, board.DIGITAL)
def customCallback(client, userdata, message):
print("Received a new message: ")
parsed_json = json.loads(message.payload)
blue_led_state = parsed_json["state"]["desired"]["blue_led"]
red_led_state = parsed_json["state"]["desired"]["red_led"]
print(blue_led_state)
print(red_led_state)
board.digital_write(blue_led, blue_led_state)
board.digital_write(red_led, red_led_state)
#Setup
recorded = False
servers = ["127.0.0.1:11211"]
mc = Client(servers, debug=1)
path = os.path.realpath(__file__).rstrip(os.path.basename(__file__))
def internet_on():
print "Checking Internet Connection"
try:
r =requests.get('https://api.amazon.com/auth/o2/token')
print "Connection OK"
return True
except:
print "Connection Failed"
return False
def gettoken():
token = mc.get("access_token")
refresh = refresh_token
if token:
return token
elif refresh:
payload = {"client_id" : Client_ID, "client_secret" : Client_Secret, "refresh_token" : refresh, "grant_type" : "refresh_token", }
url = "https://api.amazon.com/auth/o2/token"
r = requests.post(url, data = payload)
resp = json.loads(r.text)
mc.set("access_token", resp['access_token'], 3570)
return resp['access_token']
else:
return False
def alexa():
print 'alexa function called'
board.digital_write(LED_Status, 1)
url = 'https://access-alexa-na.amazon.com/v1/avs/speechrecognizer/recognize'
headers = {'Authorization' : 'Bearer %s' % gettoken()}
d = {
"messageHeader": {
"deviceContext": [
{
"name": "playbackState",
"namespace": "AudioPlayer",
"payload": {
"streamId": "",
"offsetInMilliseconds": "0",
"playerActivity": "IDLE"
}
}
]
},
"messageBody": {
"profile": "alexa-close-talk",
"locale": "en-us"#,
# "format": "audio/L16; rate=16000; channels=1"
}
}
with open(path+'recording.wav') as inf:
print 'with open recording.wav'
files = [
('file', ('request', json.dumps(d), 'application/json; charset=UTF-8')),
('file', ('audio', inf, 'audio/L16; rate=16000; channels=1'))
]
r = requests.post(url, headers=headers, files=files)
print r.status_code
if r.status_code == 200:
print 'status code 200'
for v in r.headers['content-type'].split(";"):
if re.match('.*boundary.*', v):
print 'if re.match'
boundary = v.split("=")[1]
data = r.content.split(boundary)
for d in data:
print 'for d in data'
if (len(d) >= 1024):
audio = d.split('\r\n\r\n')[1].rstrip('--')
with open(path+"response.mp3", 'wb') as f:
f.write(audio)
board.digital_write(LED_Record, 0)
os.system('mpg123 -q {}1sec.mp3 {}response.mp3'.format(path, path))
board.digital_write(LED_Status, 0)
else:
print 'else, no code 200'
board.digital_write(LED_Record, 0)
board.digital_write(LED_Status, 0)
for x in range(0, 3):
time.sleep(.2)
board.digital_write(LED_Record, 1)
time.sleep(.2)
board.digital_write(LED_Record, 0)
board.digital_write(LED_Status, 0)
def start():
recording = 0
last = 0
while True:
val = board.digital_read(Button)
if val == 1 and last == 0:
last = 1;
record = subprocess.Popen(['arecord', '-r', '16000', '-f', 'S16_LE', '--period-size', '500', '-c', '1', '-vv', 'recording.wav'])
recording = 1;
board.digital_write(LED_Record, 1)
elif val == 0 and recording == 1:
last = 0;
recording = 0;
record.kill()
board.digital_write(LED_Record, 0)
myMQTTClient.subscribe("$aws/things/NUC-Gateway/shadow/update/accepted", 1, customCallback)
alexa()
if __name__ == "__main__":
board.digital_write(LED_Status, 0)
board.digital_write(LED_Record, 0)
while internet_on() == False:
print "."
token = gettoken()
os.system('mpg123 -q {}1sec.mp3 {}hello.mp3'.format(path, path))
for x in range(0, 3):
time.sleep(.1)
board.digital_write(LED_Status, 1)
time.sleep(.1)
board.digital_write(LED_Status, 0)
start()
|
Python
| 0
|
@@ -359,71 +359,137 @@
nt(%22
-AWS_IOT_ARN%22)%0AmyMQTTClient.configureEndpoint(%22REST_API_ENDPOINT
+arn:aws:iot:us-east-1:194337674115:thing/NUC-Gateway%22)%0AmyMQTTClient.configureEndpoint(%22a1am3uuthfk12b.iot.us-east-1.amazonaws.com
%22, 8
|
944515624ec57f94b6bdb4e9a46988b9604f4c3b
|
add basic command line parser.
|
main.py
|
main.py
|
#!/usr/bin/python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
import sys
import argparse
import re
def main():
print("YMK Goodbye World!!!")
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -104,55 +104,1083 @@
re%0A
-%0A%0Adef main():%0A print(%22YMK Goodbye World!!!%22)
+import logging%0A%0A%0Alg = logging.getLogger(%22DRIVE_MAIN%22)%0Alg.setLevel(logging.DEBUG)%0Ach = logging.StreamHandler()%0Ach.setLevel(logging.DEBUG)%0Aformatter = logging.Formatter('%5B%25(name)s%5D %25(levelname)s - %25(message)s')%0Ach.setFormatter(formatter)%0Alg.addHandler(ch)%0A%0A%0Adrive_commands = %5B'info', 'list'%5D%0A%0A%0Adef command_list():%0A lg.debug(%22YMK command_list!!!%22)%0A parser = argparse.ArgumentParser(%0A description='YMK google drive command line tool -- list')%0A parser.add_argument('list', nargs='+')%0A args = parser.parse_args()%0A if args.list%5B0%5D != 'list':%0A print parser.print_help()%0A if len(args.list) %3E 1 and args.list%5B1%5D == 'help':%0A print parser.print_help()%0A%0A lg.debug(args)%0A%0A%0Adef main():%0A lg.debug(%22YMK Goodbye World!!!%22)%0A parser = argparse.ArgumentParser(%0A description='YMK google drive command line tool')%0A parser.add_argument('command', nargs=1, choices=drive_commands)%0A parser.add_argument('others', nargs='?')%0A args = parser.parse_args()%0A%0A lg.debug(args.command)%0A if args.command%5B0%5D == 'list':%0A command_list()%0A
%0A%0Aif
|
4bcf8ea9572b90782e2f1d6150ec96e28002378f
|
set loglevel to warning
|
main.py
|
main.py
|
"""
The MIT License (MIT)
Copyright (c) 2014 Kord Campbell, StackGeek
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
__author__ = 'Kord Campbell'
__website__ = 'http://www.stackmonkey.com/'
import os,sys
# python paths
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'web/models'))
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'lib'))
# imports
import webapp2
import config
import routes
from web.basehandler import handle_error
# base application
app = webapp2.WSGIApplication(debug = os.environ['SERVER_SOFTWARE'].startswith('Dev'), config=config.webapp2_config)
# error handling
app.error_handlers[403] = handle_error
app.error_handlers[404] = handle_error
# debug output
if not app.debug:
app.error_handlers[500] = handle_error
# add routes
routes.add_routes(app)
|
Python
| 0.000001
|
@@ -1391,16 +1391,31 @@
t routes
+%0Aimport logging
%0A%0Afrom w
@@ -1450,16 +1450,63 @@
_error%0A%0A
+logging.getLogger().setLevel(logging.WARNING)%0A%0A
# base a
|
c126348a70f316c9ef25d70dc87d6b25f69f83af
|
remove routes
|
main.py
|
main.py
|
from flask import Flask
app = Flask(__name__)
app.config['DEBUG'] = True
from util import today
from models import Event
from google.appengine.ext import ndb
import twilio.twiml
@app.route('/')
def hello():
"""Return a friendly HTTP greeting."""
return 'Hello World!'
@app.route('/message', methods=['GET', 'POST'])
def reply():
query = Event.query(Event.date == today())
messages = []
for event in query:
messages.append('%s %s (%s)' %
(event.start, event.summary, event.location))
response = twilio.twiml.Response()
if len(messages) == 0:
response.message('No events today')
else:
response.message(' | '.join(messages))
return str(response)
@app.errorhandler(404)
def page_not_found(e):
"""Return a custom 404 error."""
return 'Sorry, nothing at this URL.', 404
|
Python
| 0.00178
|
@@ -177,107 +177,8 @@
ml%0A%0A
-@app.route('/')%0Adef hello():%0A %22%22%22Return a friendly HTTP greeting.%22%22%22%0A return 'Hello World!'%0A%0A
@app
@@ -649,161 +649,4 @@
se)%0A
- %0A %0A %0A %0A@app.errorhandler(404)%0Adef page_not_found(e):%0A %22%22%22Return a custom 404 error.%22%22%22%0A return 'Sorry, nothing at this URL.', 404%0A
|
60ae3ae54ccc573983cb9c283844eab1b62ba7a7
|
Use multiprocessing instead of threading
|
main.py
|
main.py
|
#!/usr/bin/env python
###############################################################################
# bitcoind-ncurses by Amphibian
# thanks to jgarzik for bitcoinrpc
# wumpus and kylemanna for configuration file parsing
# all the users for their suggestions and testing
# and of course the bitcoin dev team for that bitcoin gizmo, pretty neat stuff
###############################################################################
import threading, Queue, ConfigParser, argparse, signal
import rpc
import interface
import config
def interrupt_signal(signal, frame):
s = {'stop': "Interrupt signal caught"}
interface_queue.put(s)
def debug(rpc_queue):
# coinbase testnet transaction for debugging
#s = {'txid': "cfb8bc436ca1d8b8b2d324a9cb2ef097281d2d8b54ba4239ce447b31b8757df2"}
# tx with 1001 inputs, 1002 outputs
s = {'txid': 'e1dc93e7d1ee2a6a13a9d54183f91a5ae944297724bee53db00a0661badc3005'}
rpc_queue.put(s)
if __name__ == '__main__':
# initialise queues
interface_queue = Queue.Queue()
rpc_queue = Queue.Queue()
# parse commandline arguments
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config",
help="path to config file [bitcoin.conf]",
default="bitcoin.conf")
args = parser.parse_args()
# parse config file
try:
cfg = config.read_file(args.config)
except IOError:
cfg = {}
s = {'stop': "configuration file [" + args.config + "] does not exist or could not be read"}
interface_queue.put(s)
# initialise interrupt signal handler (^C)
signal.signal(signal.SIGINT, interrupt_signal)
# start RPC thread
rpc_thread = threading.Thread(target=rpc.loop, args = (interface_queue, rpc_queue, cfg))
rpc_thread.daemon = True
rpc_thread.start()
#debug(rpc_queue)
# main loop
interface.loop(interface_queue, rpc_queue)
# ensure RPC thread exits cleanly
rpc_thread.join()
|
Python
| 0.000001
|
@@ -438,24 +438,23 @@
ort
-threading, Queue
+multiprocessing
, Co
@@ -1005,37 +1005,47 @@
terface_queue =
-Queue
+multiprocessing
.Queue()%0A rpc
@@ -1053,21 +1053,31 @@
queue =
-Queue
+multiprocessing
.Queue()
@@ -1730,24 +1730,31 @@
d =
-threading.Thread
+multiprocessing.Process
(tar
|
46db44a83d7683c985e0637956674e4e0506b28f
|
support custom figure names
|
main.py
|
main.py
|
#!/usr/bin/env python3
import numpy as np
import torch
import matplotlib
import matplotlib.pyplot as plt
from torch.autograd import Variable
import atexit
class PhnSpkGenerator():
def __init__(self, mu, cov, phn, spk):
self._mu = mu
self._cov = cov
self._phn = phn
self._spk = spk
def generate(self, N):
X = np.random.multivariate_normal(self._mu, self._cov, size=N)
phn = torch.ones(N) * self._phn
spk = torch.ones(N) * self._spk
return torch.from_numpy(X).float(), phn, spk
class Plotter():
def __init__(self):
self._cmap = matplotlib.colors.ListedColormap([
(1, 0, 0),
(0, 1, 0),
(0, 0, 1)
])
def plot(self, X, phn, spk, transform = lambda x:x):
plt.figure()
for i, m in enumerate(['o', '+', 'x']):
mask = (spk.numpy() == i)
spk_set = X.numpy()[mask]
spk_set = Variable(torch.from_numpy(spk_set).float())
spk_set = transform(spk_set).data.numpy()
plt.scatter(spk_set[:,0], spk_set[:,1],
c=t_phn.numpy()[mask], cmap=self._cmap, marker=m)
plt.show(block=False)
if __name__ == '__main__':
atexit.register(plt.show)
phn_mus = []
phn_mus.append(np.asarray([1,1]))
phn_mus.append(np.asarray([3,-2]))
phn_mus.append(np.asarray([6,4]))
phn_covs = []
phn_covs.append(np.asarray([[1,0], [0,1]]))
phn_covs.append(np.asarray([[1,0], [0,1]]))
phn_covs.append(np.asarray([[1,0], [0,1]]))
spk_mus = []
spk_mus.append(np.asarray([0, 3]))
spk_mus.append(np.asarray([0, 6]))
spk_mus.append(np.asarray([0, 9]))
gens = []
for phn, (phn_mu, phn_cov) in enumerate(zip(phn_mus, phn_covs)):
for spk, spk_mu in enumerate(spk_mus):
gens.append(PhnSpkGenerator(phn_mu+spk_mu, phn_cov, phn, spk))
X = torch.zeros((0, 2))
t_phn = torch.zeros((0,))
t_spk = torch.zeros((0,))
for g in gens:
X_g, phn_g, spk_g = g.generate(100)
X = torch.cat([X, X_g], 0)
t_phn = torch.cat([t_phn, phn_g], 0)
t_spk = torch.cat([t_spk, spk_g], 0)
plotter = Plotter()
plotter.plot(X, t_phn, t_spk)
bn_extractor = torch.nn.Sequential(
torch.nn.Linear(2, 10),
torch.nn.ReLU(),
torch.nn.Linear(10, 10),
torch.nn.ReLU(),
torch.nn.Linear(10, 2),
)
plotter.plot(X, t_phn, t_spk, bn_extractor)
|
Python
| 0
|
@@ -757,16 +757,28 @@
hn, spk,
+ name=%22fig%22,
transfo
@@ -814,16 +814,20 @@
.figure(
+name
)%0A%0A
@@ -2274,16 +2274,33 @@
n, t_spk
+, name=%22Raw data%22
)%0A%0A b
@@ -2522,16 +2522,46 @@
t_spk,
+name=%22BN features%22, transform=
bn_extra
|
401a2ff9f12837965050b117fcd05a07fb3a8928
|
Implement autoindent
|
main.py
|
main.py
|
#!/usr/bin/env python
import os.path
import sys
import tkinter as tk
import tkinter.filedialog
import tkinter.scrolledtext
VERSION = [0, 0, 0]
class Application(tk.Frame):
def __init__(self, master=None):
tk.Frame.__init__(self, master)
self.pack(expand=1, fill='both')
self.createWidgets()
self.filename = None
self.settitle()
def createWidgets(self):
self.menu = tk.Menu(self)
filemenu = tk.Menu(self.menu, tearoff=0)
filemenu.add_command(label='Save', underline=0, command=self.save,
accelerator='Ctrl+S')
self.bind_all('<Control-s>', self.save)
filemenu.add_command(label='Save As...', underline=5,
command=self.saveas, accelerator='Ctrl+Shift+S')
self.bind_all('<Control-S>', self.saveas)
filemenu.add_separator()
filemenu.add_command(label='Exit', underline=1, command=root.quit)
self.menu.add_cascade(label='File', underline=0, menu=filemenu)
root.config(menu=self.menu)
self.status = tkinter.Label(self, text='', relief='sunken',
anchor='w')
self.status.pack(side='bottom', fill='x')
self.textin = tk.Text(self)
self.textin['height'] = 0
self.textin.bind('<Return>', self.sendtext)
self.textin.pack(side='bottom', fill='x')
self.textout = tkinter.scrolledtext.ScrolledText(self)
self.textout.pack(side='bottom', expand=1, fill='both')
def settitle(self):
if self.filename:
self.master.title(os.path.basename(self.filename))
else:
self.master.title('{} {}.{}.{}'.format(sys.argv[0], *VERSION))
def save(self, event=None):
if self.filename:
self.writeout()
else:
self.saveas(event)
def saveas(self, event=None):
filename = tkinter.filedialog.asksaveasfilename()
if filename:
self.filename = filename
self.settitle()
self.writeout()
def writeout(self):
self.status['text'] = 'Saving...'
with open(self.filename, 'w') as f:
f.write(self.textout.get('1.0', 'end'))
self.status['text'] = 'Saved.'
def sendtext(self, event):
self.textout.insert('end', self.textin.get('1.0', 'end')[:-1])
self.textin.delete('1.0', 'end')
root = tk.Tk()
app = Application(master=root)
app.mainloop()
|
Python
| 0.000002
|
@@ -31,16 +31,26 @@
os.path%0A
+import re%0A
import s
@@ -1470,32 +1470,87 @@
olledText(self)%0A
+ self.textout.bind('%3CReturn%3E', self.autoindent)%0A
self.tex
@@ -2441,13 +2441,8 @@
nd')
-%5B:-1%5D
)%0A
@@ -2480,16 +2480,271 @@
'end')%0A
+ return 'break'%0A%0A def autoindent(self, event):%0A line = self.textout.get('insert linestart', 'insert lineend')%0A indent = re.match('%5E%5B%5Ct %5D*', line).group(0)%0A self.textout.insert('insert', '%5Cn' + indent)%0A return 'break'%0A
%0A%0Aroot =
|
50a025032cfa07a842291637cb4d8240edcb60ea
|
Rename TabConverter to TapConverter
|
main.py
|
main.py
|
import os
import re
from TapConverter import TapConverter
import settings
import sandschreiber
from werkzeug import secure_filename
from flask import Flask, render_template, request, redirect, jsonify
app = Flask(__name__)
app.jinja_env.filters['basename'] = os.path.basename
ss = sandschreiber.AsyncSandschreiber(settings.device, 115200)
def get_gcodes(directory):
gcodes = []
for filename in os.listdir(directory):
filename = os.path.join(directory, filename)
if os.path.isfile(filename) and filename[-5:] == 'gcode':
gcodes.append(filename)
return gcodes
@app.route("/")
def index():
return render_template('index.jinja2',
files=get_gcodes(settings.gcode_directory),
playlist=ss.playlist,
connected=ss.is_connected(),
printing=ss.printing
)
@app.route('/connect', methods=["POST"])
def connect():
try:
ss.connect()
return 'OK'
except:
return 'FAIL', 500
@app.route('/disconnect', methods=["POST"])
def disconnect():
try:
ss.disconnect()
return 'OK'
except:
return 'FAIL', 500
@app.route('/emergencyStop', methods=["POST"])
def emergency_stop():
ss.stop_print()
return 'OK'
@app.route('/upload', methods=["POST"])
def upload_gcode():
upload_file = request.files['file']
filename = secure_filename(upload_file.filename)
if filename.endswith(".tap"):
filename = re.sub(r'tap$', 'gcode', filename)
converter = TabConverter(upload_file.read())
open(os.path.join(settings.gcode_directory, filename), 'w+').write(converter.get_gcode())
else:
upload_file.save(os.path.join(settings.gcode_directory, filename))
return redirect('/', code=301)
@app.route('/print', methods=["POST"])
def start_print():
ss.start_print()
return 'OK'
@app.route('/print', methods=["DELETE"])
def stop_print():
ss.stop_print()
ss.playlist.clear()
return 'OK'
@app.route("/playlist", methods=["GET"])
def playlist_get():
return jsonify(playlist=ss.playlist.as_json())
@app.route('/playlist', methods=["POST"])
def playlist_add():
filename = request.form.get('filename')
if filename:
filenames = [filename]
else:
filenames = request.form.getlist('filename[]')
for filename in filenames:
pl_item = sandschreiber.PlaylistItem(os.path.join(settings.gcode_directory, filename))
ss.playlist.add(pl_item)
return 'OK'
@app.route('/playlist', methods=["DELETE"])
def playlist_remove():
index = request.form.get('index')
if index:
ss.playlist.remove(int(index))
else:
ss.playlist.clear()
return 'OK'
if __name__ == "__main__":
app.run(host=settings.listen, port=settings.port)
|
Python
| 0.000024
|
@@ -1495,17 +1495,17 @@
ter = Ta
-b
+p
Converte
|
34b8285657190fc310cc319e05a1afb3f98bc5e6
|
add hint
|
main.py
|
main.py
|
#功能:自动生成每周的 IRC 会议记录邮件
#
#输入:结束会议时 bot 的提示
# 示例:
'''
<zodbot> Minutes: http://meetbot.fedoraproject.org/fedora-zh/2014-01-31/fedora-zh.2014-01-31-13.02.html
<zodbot> Minutes (text): http://meetbot.fedoraproject.org/fedora-zh/2014-01-31/fedora-zh.2014-01-31-13.02.txt
<zodbot> Log: http://meetbot.fedoraproject.org/fedora-zh/2014-01-31/fedora-zh.2014-01-31-13.02.log.html
'''
#
#输出:.eml 文件
#
#程序结构:
# 1. 匹配 (获取用户输入,得到链接)
# 2. 抓取
# 3. 输出 (按照 .eml 文件的格式)
#
# 注意:
# 1. 所有编码都是 UTF-8
# 2. 仅保证 python3 能运行该程序
#config
TO="chinese@lists.fedoraproject.org"
CC=("meetingminutes@lists.fedoraproject.org", )
SUBJECT="Fedora Chinese Meeting Minutes"
GREETING="""Hi all,
The IRC meeting minutes yesterday are available at the links below. Thanks
everyone for attending the meeting."""
import re
import email
import email.mime.text
import datetime
import sys
import urllib.request
#debug config
ENABLE_TRACE = True
def trace(s):
if ENABLE_TRACE:
print(s)
def get_user_input(gui=False, file='link'):
'''获取用户的输入,未来可能支持 gui
返回一个 tuple,里面有三个不带换行符的字符串
'''
if gui:
print("GUI stub")
else:
fin = open(file, "r")
result = fin.readlines()
fin.close()
trace(result)
return result
def get_url(s):
'''把形如 "<xxxbot>:abc http://server.org/log.html" 的字符串处理成
("abc", "http://server.org/log.html") 的 tuple
'''
trace("get_url() from : " + s)
bot_pattern = re.compile('^<\w+bot>')
#answer from http://stackoverflow.com/a/6883094
link_pattern = re.compile(\
'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
match = re.search(link_pattern, s)
if not match:
sys.exit("Url Match Error!")
url = match.group(0)
trace(url)
s = s.replace(url, "")
match = re.search(bot_pattern, s)
if not match:
sys.exit("Url Match Error!")
bot = match.group(0)
trace("bot name: " + bot)
s = s.replace(bot, "")
s = s.strip()
s = s.rstrip(':')
s = s.strip()
trace("Description: " + s)
return (s, url)
def fetch_data(url):
'''该函数内不做多线程,就是用对应的库抓取信息
返回一个列表,是解码后的字符串
'''
trace("Fetching "+url)
req = urllib.request.urlopen(url)
lines = req.readlines()
strs = [line.decode('utf-8').rstrip('\n') for line in lines]
trace(strs)
return strs
def make_eml(to, cc, subject, message, log):
'''to: string
cc: list/tuple of strings
subject: string (without date)
message: string
log: list of strings
return: a string
'''
msg = email.message.Message()
msg.add_header('To', to)
cc = ','.join(cc)
msg.add_header('Cc', cc)
now = str(datetime.date.today())
trace(now)
subject += " (" + now + ")"
msg.add_header('Subject', subject)
content = message + '\n' + '\n'.join(log)
trace(content)
msg.set_payload(content, "utf-8")
return msg.as_string()
if __name__ == '__main__':
user_input = get_user_input()
#urls: description -> url
urls = dict()
for uinpt in user_input:
url = get_url(uinpt)
urls[ url[0] ] = url[1]
print("Fetching data from server......")
#log = fetch_data(urls['Minutes (text)'])
log=["a", "b","c", "你好", "用fedora的朋友"]
eml = make_eml(TO, CC, SUBJECT, GREETING, log)
file = open('irc_meeting_log.eml', 'w')
file.write(eml)
file.close()
|
Python
| 0.000014
|
@@ -3372,18 +3372,41 @@
file.close()
+%0A print(%22Finished!%22)
%0A%0A
|
2033d494cba2c61e868ded2d5a5e65cc23d4edf6
|
Remove test failing due to lack of TLS support
|
main.py
|
main.py
|
#!/usr/bin/python
from __future__ import print_function
import httplib2
from dateutil.parser import parse
import re
import hashlib
from time import sleep
from apiclient import discovery, errors
import oauth2client
from oauth2client import client
from oauth2client import tools
import config
def get_credentials():
""" Gets credentials to access gCal API """
store = oauth2client.file.Storage(config.credential_store)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(config.client_secret, 'https://www.googleapis.com/auth/calendar')
flow.user_agent = config.application
parser = argparse.ArgumentParser(parents=[tools.argparser])
flags = parser.parse_args()
credentials = tools.run_flow(flow, store, flags)
print('Storing credentials to ' + config.credential_store)
return credentials
def get_calendar_service():
""" Gets a service object to use to query gCal API """
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
return discovery.build('calendar', 'v3', http=http)
def load_ical(url):
""" Loads an iCal file from a URL and returns an events object
>>> events = load_ical("https://www.houghton.edu/events/?ical=1&tribe_display=month")
>>> len(events) > 50
True
>>> 'summary' in events.itervalues().next()
True
>>> 'start' in events.itervalues().next()
True
>>> 'end' in events.itervalues().next()
True
"""
resp, content = httplib2.Http().request(url)
assert(resp['status'] == '200')
events = {}
for event in re.findall("BEGIN:VEVENT.*?END:VEVENT", content, re.M|re.I|re.DOTALL):
start = re.search("dtstart;TZID=(.*?):(.*)", event, re.I)
end = re.search("dtend;TZID=(.*?):(.*)", event, re.I)
summary = re.search("summary:(.*)", event, re.I).group(1)
hash = hashlib.sha256("%s%s%s" % (start.group(2),end.group(2),summary)).hexdigest()
if parse(start.group(2).replace('Z','')) >= parse(config.start_date):
events[hash] = {
'summary': summary,
'start': {
'dateTime': str(parse(start.group(2).replace('Z',''))).replace(' ','T'),
'timeZone': 'America/New_York',
},
'end': {
'dateTime': str(parse(end.group(2).replace('Z',''))).replace(' ','T'),
'timeZone': 'America/New_York',
},
'id': hash
}
return events
def handle_existing_events(service, new_events):
""" Examines existing gCal events and prunes as needed """
if config.erase_all:
print("Clearing calendar...")
service.calendars().clear(calendarId=config.gcal_id).execute()
for event in service.events().list(calendarId=config.gcal_id, maxResults=2500).execute()['items']:
if event['id'] in new_events:
del new_events[event['id']]
elif config.remove_stale:
print("Deleting stale event %s..." % (event['id'][0:8]))
service.events().delete(calendarId=config.gcal_id, eventId=event['id']).execute()
def add_ical_to_gcal(service, events):
""" Adds all events in event list to gCal """
for i, event in enumerate(events):
print("Adding %d/%d %s" % (i+1,len(events),events[event]['summary']))
try:
sleep(.3)
service.events().insert(calendarId=config.gcal_id, body=events[event]).execute()
except errors.HttpError, e:
if e.resp.status == 409:
print("Event already exists. Updating...")
sleep(.3)
service.events().update(calendarId=config.gcal_id, eventId=event, body=events[event]).execute()
print("Event updated.")
else:
raise e
if __name__ == '__main__':
new_events = load_ical(config.ical_url)
service = get_calendar_service()
handle_existing_events(service, new_events)
add_ical_to_gcal(service, new_events)
|
Python
| 0.000001
|
@@ -1186,282 +1186,8 @@
ject
-%0A%0A %3E%3E%3E events = load_ical(%22https://www.houghton.edu/events/?ical=1&tribe_display=month%22)%0A %3E%3E%3E len(events) %3E 50%0A True%0A %3E%3E%3E 'summary' in events.itervalues().next()%0A True%0A %3E%3E%3E 'start' in events.itervalues().next()%0A True%0A %3E%3E%3E 'end' in events.itervalues().next()%0A True%0A
%22%22%22
|
affc8e0f0be765f5adc31113ad535852e01cc75a
|
Fix unique ID Verisure alarm control panel (#51087)
|
homeassistant/components/verisure/alarm_control_panel.py
|
homeassistant/components/verisure/alarm_control_panel.py
|
"""Support for Verisure alarm control panels."""
from __future__ import annotations
import asyncio
from homeassistant.components.alarm_control_panel import (
FORMAT_NUMBER,
AlarmControlPanelEntity,
)
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import ALARM_STATE_TO_HA, CONF_GIID, DOMAIN, LOGGER
from .coordinator import VerisureDataUpdateCoordinator
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Verisure alarm control panel from a config entry."""
async_add_entities([VerisureAlarm(coordinator=hass.data[DOMAIN][entry.entry_id])])
class VerisureAlarm(CoordinatorEntity, AlarmControlPanelEntity):
"""Representation of a Verisure alarm status."""
coordinator: VerisureDataUpdateCoordinator
_attr_name = "Verisure Alarm"
_attr_supported_features = SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY
_changed_by: str | None = None
@property
def device_info(self) -> DeviceInfo:
"""Return device information about this entity."""
return {
"name": "Verisure Alarm",
"manufacturer": "Verisure",
"model": "VBox",
"identifiers": {(DOMAIN, self.coordinator.entry.data[CONF_GIID])},
}
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY
@property
def code_format(self) -> str:
"""Return one or more digits/characters."""
return FORMAT_NUMBER
@property
def changed_by(self) -> str | None:
"""Return the last change triggered by."""
return self._changed_by
async def _async_set_arm_state(self, state: str, code: str | None = None) -> None:
"""Send set arm state command."""
arm_state = await self.hass.async_add_executor_job(
self.coordinator.verisure.set_arm_state, code, state
)
LOGGER.debug("Verisure set arm state %s", state)
transaction = {}
while "result" not in transaction:
await asyncio.sleep(0.5)
transaction = await self.hass.async_add_executor_job(
self.coordinator.verisure.get_arm_state_transaction,
arm_state["armStateChangeTransactionId"],
)
await self.coordinator.async_refresh()
async def async_alarm_disarm(self, code: str | None = None) -> None:
"""Send disarm command."""
await self._async_set_arm_state("DISARMED", code)
async def async_alarm_arm_home(self, code: str | None = None) -> None:
"""Send arm home command."""
await self._async_set_arm_state("ARMED_HOME", code)
async def async_alarm_arm_away(self, code: str | None = None) -> None:
"""Send arm away command."""
await self._async_set_arm_state("ARMED_AWAY", code)
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
self._attr_state = ALARM_STATE_TO_HA.get(
self.coordinator.data["alarm"]["statusType"]
)
self._changed_by = self.coordinator.data["alarm"].get("name")
super()._handle_coordinator_update()
async def async_added_to_hass(self) -> None:
"""When entity is added to hass."""
await super().async_added_to_hass()
self._handle_coordinator_update()
|
Python
| 0
|
@@ -1244,88 +1244,8 @@
rm%22%0A
- _attr_supported_features = SUPPORT_ALARM_ARM_HOME %7C SUPPORT_ALARM_ARM_AWAY%0A%0A
@@ -1776,16 +1776,169 @@
M_AWAY%0A%0A
+ @property%0A def unique_id(self) -%3E str:%0A %22%22%22Return the unique ID for this entity.%22%22%22%0A return self.coordinator.entry.data%5BCONF_GIID%5D%0A%0A
@pro
|
34a3b5c626e077907c46835b1759a818b3fc332a
|
Make 2-legged calls with the help of tweepy, Twitter API lib.
|
uservoice/__init__.py
|
uservoice/__init__.py
|
from Crypto.Cipher import AES
import base64
import hashlib
import urllib
import operator
import array
import simplejson as json
import urllib
import urllib2
import datetime
import pytz
from tweepy import oauth
def generate_sso_token(subdomain_name, sso_key, user_attributes):
current_time = (datetime.datetime.now(pytz.utc) + datetime.timedelta(minutes=5)).strftime('%Y-%m-%d %H:%M:%S')
user_attributes.setdefault('expires', current_time)
user_json = json.dumps(user_attributes, separators=(',',':'))
iv = "OpenSSL for Ruby"
block_size = 16
salted = sso_key + subdomain_name
saltedHash = hashlib.sha1(salted).digest()[:16]
json_bytes = array.array('b', user_json[0 : len(user_json)])
iv_bytes = array.array('b', iv[0 : len(iv)])
# # xor the iv into the first 16 bytes.
for i in range(0, 16):
json_bytes[i] = operator.xor(json_bytes[i], iv_bytes[i])
pad = block_size - len(json_bytes.tostring()) % block_size
data = json_bytes.tostring() + pad * chr(pad)
aes = AES.new(saltedHash, AES.MODE_CBC, iv)
encrypted_bytes = aes.encrypt(data)
return urllib.quote(base64.b64encode(encrypted_bytes))
|
Python
| 0
|
@@ -134,19 +134,17 @@
t urllib
-
%0A
+
import u
@@ -202,18 +202,16 @@
rt oauth
-
%0A%0Adef ge
@@ -711,17 +711,16 @@
_json)%5D)
-
%0A iv_
@@ -766,52 +766,8 @@
%5D)%0A%0A
- # # xor the iv into the first 16 bytes.%0A
@@ -1057,16 +1057,16 @@
(data)%0A%0A
-
retu
@@ -1115,8 +1115,675 @@
_bytes))
+%0A%0Aclass OAuth:%0A def __init__(self, subdomain_name, api_key, api_secret):%0A self.api_url = %22https://%22 + subdomain_name + %22.uservoice.com%22%0A self.consumer = oauth.OAuthConsumer(api_key, api_secret)%0A%0A def request(self, method, path, params=%7B%7D):%0A url = self.api_url + path%0A request = oauth.OAuthRequest.from_consumer_and_token(%0A self.consumer, http_method=method.upper(), http_url=url, parameters=%7B%7D)%0A request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), self.consumer, None)%0A%0A headers = request.to_header()%0A%0A req = urllib2.Request(url, None, headers)%0A%0A return json.load(urllib2.urlopen(req))%0A
|
98f986aaa938f5aa43183042d2a4b0ad58c3f03d
|
remove debugging
|
sbudget/sbudget.py
|
sbudget/sbudget.py
|
import os
import sqlite3
import string
import random
import time
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash
app = Flask(__name__)
app.config.from_object(__name__)
app.config.update(dict(
DATABASE=os.path.join(app.root_path, 'sbudget.db'),
SECRET_KEY=''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(32)),
USERNAME='admin',
PASSWORD='default'
))
app.config.from_envvar('FLASKR_SETTINGS', silent=True)
def connect_db():
rv = sqlite3.connect(app.config['DATABASE'])
rv.row_factory = sqlite3.Row
return rv
def get_db():
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db
@app.teardown_appcontext
def close_db(error):
if hasattr(g, 'sqlite_db'):
g.sqlite_db.close()
@app.route('/')
def index():
db = get_db()
cur = db.execute('SELECT id, name FROM types')
types = cur.fetchall()
return render_template('index.html', types=types)
@app.route('/addAmount', methods=['POST'])
def addAmount():
db = get_db()
db.execute('INSERT INTO entries (date, monthcode, daycode, type, amount) VALUES (?,?,?,?,?)',
[request.form['date'], request.form['monthcode'], request.form['daycode'], request.form['type'], request.form['amount']])
db.commit()
cur = db.execute('SELECT name FROM types WHERE id = ?', [request.form['type']])
typeName = cur.fetchall()[0][0]
print typeName
flash('{} added to {}'.format(request.form['amount'], typeName))
return redirect(url_for('index'))
@app.route('/report')
def report():
monthcode = time.strftime("%Y%m")
daycode = time.strftime("%d")
db = get_db()
cur = db.execute('SELECT * FROM settings');
settings = cur.fetchall()
monthBudget = settings[0]['monthlyBudget']
cur = db.execute('SELECT SUM(amount) AS total FROM entries WHERE monthcode = ?', [monthcode]);
monthSpent = cur.fetchone()['total']
cur = db.execute('SELECT AVG(amount) AS total FROM entries WHERE monthcode = ?', [monthcode]);
dailyAvgSpent = cur.fetchone()['total']
report = {
"monthbudget": monthBudget,
"monthspent": monthSpent,
"monthleft": monthBudget-monthSpent,
"dailyavgspent": dailyAvgSpent
}
return render_template('report.html', report=report)
@app.route('/settigs')
def settings():
db = get_db()
cur = db.execute('SELECT * FROM settings');
settings = cur.fetchall()
return render_template('settings.html', settings=settings[0])
@app.route('/settigs/update', methods=['POST'])
def settingsUpdate():
db = get_db()
db.execute('UPDATE settings SET monthlyBudget = ?', [request.form['monthlybudget']])
db.commit()
return redirect(url_for('report'))
|
Python
| 0.000065
|
@@ -1453,27 +1453,8 @@
%5B0%5D%0A
- print typeName%0A
|
3be6ed2f32492d79b639e657cbf5782451b527e7
|
Disable broken upload test
|
tests/frontend/views/upload_test.py
|
tests/frontend/views/upload_test.py
|
import os
from io import BytesIO
import pytest
from skylines.database import db
from skylines.model import User
pytestmark = pytest.mark.usefixtures('db_session', 'files_folder')
HERE = os.path.dirname(__file__)
DATADIR = os.path.join(HERE, '..', '..', 'data')
@pytest.fixture(scope='function')
def bill(app):
bill = User(first_name='bill',
email_address='bill@example.com',
password='pass')
with app.app_context():
db.session.add(bill)
db.session.commit()
return bill
@pytest.fixture(scope='function')
def logged_in_browser(browser, bill):
form = browser.getForm(index=1)
form.getControl(name='email_address').value = bill.email_address
form.getControl(name='password').value = 'pass'
form.submit()
return browser
def test_upload_broken_igc(logged_in_browser):
b = logged_in_browser
b.open('/flights/upload')
# we should be logged in now
assert 'IGC or ZIP file(s)' in b.contents
b.getControl('IGC or ZIP file(s)').add_file(BytesIO('broken'),
'text/plain',
'/tmp/broken.igc')
b.getControl('Upload').click()
assert 'No flight was saved.' in b.contents
def test_upload_single(logged_in_browser, bill):
assert bill.id is not None
b = logged_in_browser
b.open('/flights/upload')
# we should be logged in now
assert 'IGC or ZIP file(s)' in b.contents
f_igc = open(os.path.join(DATADIR, 'simple.igc'))
b.getControl('IGC or ZIP file(s)').add_file(f_igc,
'text/plain',
'/tmp/simple.igc')
b.getControl('Upload').click()
assert 'Your flights have been saved.' in b.contents
|
Python
| 0
|
@@ -1266,16 +1266,83 @@
tents%0A%0A%0A
+@pytest.mark.skip(reason=%22endless loop in airspace analysis code%22)%0A
def test
|
71a1d2b40a03bde4969f0eea5f2c48d4ba7ace1b
|
Fix batch tests on Python 3
|
tests/integration/cli/test_batch.py
|
tests/integration/cli/test_batch.py
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Nicole Thomas <nicole@saltstack.com>`
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Testing Libs
from tests.support.case import ShellCase
class BatchTest(ShellCase):
'''
Integration tests for the salt.cli.batch module
'''
def test_batch_run(self):
'''
Tests executing a simple batch command to help catch regressions
'''
ret = 'Executing run on [u\'sub_minion\']'
cmd = self.run_salt('\'*minion\' test.echo \'batch testing\' -b 50%')
self.assertIn(ret, cmd)
def test_batch_run_number(self):
'''
Tests executing a simple batch command using a number division instead of
a percentage with full batch CLI call.
'''
ret = "Executing run on [u'minion', u'sub_minion']"
cmd = self.run_salt('\'*minion\' test.ping --batch-size 2')
self.assertIn(ret, cmd)
def test_batch_run_grains_targeting(self):
'''
Tests executing a batch command using a percentage divisor as well as grains
targeting.
'''
os_grain = ''
sub_min_ret = "Executing run on [u'sub_minion']"
min_ret = "Executing run on [u'minion']"
for item in self.run_salt('minion grains.get os'):
if item != 'minion':
os_grain = item
os_grain = os_grain.strip()
cmd = self.run_salt('-C \'G@os:{0} and not localhost\' -b 25% test.ping'.format(os_grain))
self.assertIn(sub_min_ret, cmd)
self.assertIn(min_ret, cmd)
def test_batch_exit_code(self):
'''
Test that a failed state returns a non-zero exit code in batch mode
'''
cmd = self.run_salt(' "*minion" state.single test.fail_without_changes name=test_me -b 33%', with_retcode=True)
self.assertEqual(cmd[-1], 2)
|
Python
| 0.000679
|
@@ -514,10 +514,26 @@
on %5B
-u%5C
+%7B0%7D%5D'.format(repr(
'sub
@@ -543,12 +543,11 @@
nion
-%5C'%5D'
+'))
%0A%0A
@@ -878,25 +878,47 @@
run on %5B
-u
+%7B0%7D, %7B1%7D%5D%22.format(repr(
'minion'
, u'sub_
@@ -909,19 +909,24 @@
'minion'
+)
,
-u
+repr(
'sub_min
@@ -921,34 +921,34 @@
epr('sub_minion'
-%5D%22
+))
%0A cmd = s
@@ -1271,17 +1271,34 @@
run on %5B
-u
+%7B0%7D%5D%22.format(repr(
'sub_min
@@ -1301,18 +1301,18 @@
_minion'
-%5D%22
+))
%0A
@@ -1345,17 +1345,34 @@
on %5B
-u
+%7B0%7D%5D%22.format(repr(
'minion'
%5D%22%0A%0A
@@ -1367,18 +1367,18 @@
'minion'
-%5D%22
+))
%0A%0A
|
6f391d4113b55f538cfeed26c36b17846c7b758f
|
fix alt-svc test
|
tests/level4/test_http3_response.py
|
tests/level4/test_http3_response.py
|
import pytest
import os
import socket
import time
import sys
#@pytest.mark.skip
def test_http2 (launch):
serve = './examples/http3.py'
with launch (serve, port = 30371, quic = 30371, ssl = True) as engine:
resp = engine.http2.get ('/hello?num=1')
assert resp.text == 'hello'
assert 'alt-svc' in resp.headers
assert resp.headers ['alt-svc'] == 'h3-23=":30371"; ma=86400'
resp = engine.http2.get ('/hello?num=2')
assert resp.text == 'hello\nhello'
resp = engine.http2.post ('/hello', {'num': 2})
assert resp.text == 'hello\nhello'
resp = engine.http2.get ('/lb/project/rs4/')
assert 'pip install rs4' in resp.text
resp = engine.http2.post ('/post', {'username': 'a' * 1000000})
assert len (resp.text) == 1000006
def test_http3 (launch):
if sys.version_info.major == 3 and sys.version_info.minor < 6:
return
serve = './examples/http3.py'
with launch (serve, port = 30371, quic = 30371, ssl = True) as engine:
resp = engine.http3.get ('/hello?num=1')
assert resp.text == 'hello'
resp = engine.http3.get ('/hello?num=2')
assert resp.text == 'hello\nhello'
resp = engine.http3.post ('/hello', {'num': 2})
assert resp.text == 'hello\nhello'
resp = engine.http3.post ('/hello', {'num': 2})
assert resp.text == 'hello\nhello'
resp = engine.http3.post ('/hello', {'num': 1})
assert resp.text == 'hello'
resp = engine.http3.get ('/lb/project/rs4/')
assert 'pip install rs4' in resp.text
resp = engine.http3.post ('/post', {'username': 'a' * 1000000})
assert len (resp.text) == 1000006
|
Python
| 0.000002
|
@@ -305,41 +305,108 @@
-assert 'alt-svc' in resp.headers%0A
+if sys.version_info.major %3E 3 or (sys.version_info.major == 3 and sys.version_info.minor %3E= 6):%0A
|
ff24f5eb908739708496bef5a2402baac9b3680e
|
Fix up tests.
|
IPython/core/tests/test_history.py
|
IPython/core/tests/test_history.py
|
# coding: utf-8
"""Tests for the IPython tab-completion machinery.
"""
#-----------------------------------------------------------------------------
# Module imports
#-----------------------------------------------------------------------------
# stdlib
import os
import sys
import unittest
# third party
import nose.tools as nt
# our own packages
from IPython.utils.tempdir import TemporaryDirectory
from IPython.core.history import HistoryManager, extract_hist_ranges
def setUp():
nt.assert_equal(sys.getdefaultencoding(), "ascii")
def test_history():
ip = get_ipython()
with TemporaryDirectory() as tmpdir:
# Make a new :memory: DB.
hist_manager_ori = ip.history_manager
try:
ip.history_manager = HistoryManager(shell=ip, hist_file=':memory:')
hist = ['a=1', 'def f():\n test = 1\n return test', u"b='€Æ¾÷ß'"]
for i, h in enumerate(hist, start=1):
ip.history_manager.store_inputs(i, h)
ip.history_manager.db_log_output = True
# Doesn't match the input, but we'll just check it's stored.
ip.history_manager.output_hist_reprs[3].append("spam")
ip.history_manager.store_output(3)
nt.assert_equal(ip.history_manager.input_hist_raw, [''] + hist)
# Check lines were written to DB
c = ip.history_manager.db.execute("SELECT source_raw FROM history")
nt.assert_equal([x for x, in c], hist)
# New session
ip.history_manager.reset()
newcmds = ["z=5","class X(object):\n pass", "k='p'"]
for i, cmd in enumerate(newcmds, start=1):
ip.history_manager.store_inputs(i, cmd)
gothist = ip.history_manager.get_range(start=1, stop=4)
nt.assert_equal(list(gothist), zip([0,0,0],[1,2,3], newcmds))
# Previous session:
gothist = ip.history_manager.get_range(-1, 1, 4)
nt.assert_equal(list(gothist), zip([1,1,1],[1,2,3], hist))
# Check get_hist_tail
gothist = ip.history_manager.get_tail(4, output=True,
include_latest=True)
expected = [(1, 3, (hist[-1], ["spam"])),
(2, 1, (newcmds[0], None)),
(2, 2, (newcmds[1], None)),
(2, 3, (newcmds[2], None)),]
nt.assert_equal(list(gothist), expected)
gothist = ip.history_manager.get_tail(2)
expected = [(2, 1, newcmds[0]),
(2, 2, newcmds[1])]
nt.assert_equal(list(gothist), expected)
# Check get_hist_search
gothist = ip.history_manager.search("*test*")
nt.assert_equal(list(gothist), [(1,2,hist[1])] )
gothist = ip.history_manager.search("b*", output=True)
nt.assert_equal(list(gothist), [(1,3,(hist[2],["spam"]))] )
# Cross testing: check that magic %save can get previous session.
testfilename = os.path.realpath(os.path.join(tmpdir, "test.py"))
ip.magic_save(testfilename + " ~1/1-3")
testfile = open(testfilename, "r")
nt.assert_equal(testfile.read().decode("utf-8"),
"# coding: utf-8\n" + "\n".join(hist))
# Duplicate line numbers - check that it doesn't crash, and
# gets a new session
ip.history_manager.store_inputs(1, "rogue")
nt.assert_equal(ip.history_manager.session_number, 3)
finally:
# Restore history manager
ip.history_manager = hist_manager_ori
def test_extract_hist_ranges():
instr = "1 2/3 ~4/5-6 ~4/7-~4/9 ~9/2-~7/5"
expected = [(0, 1, 2), # 0 == current session
(2, 3, 4),
(-4, 5, 7),
(-4, 7, 10),
(-9, 2, None), # None == to end
(-8, 1, None),
(-7, 1, 6)]
actual = list(extract_hist_ranges(instr))
nt.assert_equal(actual, expected)
def test_magic_rerun():
"""Simple test for %rerun (no args -> rerun last line)"""
ip = get_ipython()
ip.run_cell("a = 10")
ip.run_cell("a += 1")
nt.assert_equal(ip.user_ns["a"], 11)
ip.run_cell("%rerun")
nt.assert_equal(ip.user_ns["a"], 12)
|
Python
| 0.000002
|
@@ -625,42 +625,8 @@
ir:%0A
- # Make a new :memory: DB.%0A
@@ -659,32 +659,91 @@
history_manager%0A
+ hist_file = os.path.join(tmpdir, 'history.sqlite')%0A
try:%0A
@@ -811,18 +811,17 @@
ile=
-':memory:'
+hist_file
)%0A
@@ -1376,186 +1376,8 @@
-# Check lines were written to DB%0A c = ip.history_manager.db.execute(%22SELECT source_raw FROM history%22)%0A nt.assert_equal(%5Bx for x, in c%5D, hist)%0A
%0A
@@ -3448,16 +3448,64 @@
rogue%22)%0A
+ ip.history_manager.writeout_cache()%0A
|
137e4580bd1472658a40b44b72e946ae80d98fed
|
Add test for list_bundled_profiles
|
IPython/core/tests/test_profile.py
|
IPython/core/tests/test_profile.py
|
"""Tests for profile-related functions.
Currently only the startup-dir functionality is tested, but more tests should
be added for:
* ipython profile create
* ipython profile list
* ipython profile create --parallel
* security dir permissions
Authors
-------
* MinRK
"""
from __future__ import absolute_import
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import shutil
import sys
import tempfile
from unittest import TestCase
import nose.tools as nt
from nose import SkipTest
from IPython.core.profileapp import list_profiles_in, list_bundled_profiles
from IPython.core.profiledir import ProfileDir
from IPython.testing import decorators as dec
from IPython.testing import tools as tt
from IPython.utils import py3compat
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
TMP_TEST_DIR = tempfile.mkdtemp()
HOME_TEST_DIR = os.path.join(TMP_TEST_DIR, "home_test_dir")
IP_TEST_DIR = os.path.join(HOME_TEST_DIR,'.ipython')
#
# Setup/teardown functions/decorators
#
def setup():
"""Setup test environment for the module:
- Adds dummy home dir tree
"""
# Do not mask exceptions here. In particular, catching WindowsError is a
# problem because that exception is only defined on Windows...
os.makedirs(IP_TEST_DIR)
def teardown():
"""Teardown test environment for the module:
- Remove dummy home dir tree
"""
# Note: we remove the parent test dir, which is the root of all test
# subdirs we may have created. Use shutil instead of os.removedirs, so
# that non-empty directories are all recursively removed.
shutil.rmtree(TMP_TEST_DIR)
#-----------------------------------------------------------------------------
# Test functions
#-----------------------------------------------------------------------------
def win32_without_pywin32():
if sys.platform == 'win32':
try:
import pywin32
except ImportError:
return True
return False
class ProfileStartupTest(TestCase):
def setUp(self):
# create profile dir
self.pd = ProfileDir.create_profile_dir_by_name(IP_TEST_DIR, 'test')
self.options = ['--ipython-dir', IP_TEST_DIR, '--profile', 'test']
self.fname = os.path.join(TMP_TEST_DIR, 'test.py')
def tearDown(self):
# We must remove this profile right away so its presence doesn't
# confuse other tests.
shutil.rmtree(self.pd.location)
def init(self, startup_file, startup, test):
# write startup python file
with open(os.path.join(self.pd.startup_dir, startup_file), 'w') as f:
f.write(startup)
# write simple test file, to check that the startup file was run
with open(self.fname, 'w') as f:
f.write(py3compat.doctest_refactor_print(test))
def validate(self, output):
tt.ipexec_validate(self.fname, output, '', options=self.options)
@dec.skipif(win32_without_pywin32(), "Test requires pywin32 on Windows")
def test_startup_py(self):
self.init('00-start.py', 'zzz=123\n',
py3compat.doctest_refactor_print('print zzz\n'))
self.validate('123')
@dec.skipif(win32_without_pywin32(), "Test requires pywin32 on Windows")
def test_startup_ipy(self):
self.init('00-start.ipy', '%profile\n', '')
self.validate('test')
def test_list_profiles_in():
# No need to remove these directories and files, as they will get nuked in
# the module-level teardown.
prof_file = tempfile.NamedTemporaryFile(prefix='profile_', dir=IP_TEST_DIR)
prof_dir = tempfile.mkdtemp(prefix='profile_', dir=IP_TEST_DIR)
# Now, check that the profile listing doesn't get confused by files named
# profile_X
prof_name = os.path.split(prof_dir)[1].split('profile_')[1]
profiles = list_profiles_in(IP_TEST_DIR)
nt.assert_equals(profiles, [prof_name])
#def test_list_bundled_profiles():
|
Python
| 0.000001
|
@@ -4168,17 +4168,16 @@
)%0A %0A%0A
-#
def test
@@ -4201,10 +4201,248 @@
files():
+%0A # This variable will need to be updated when a new profile gets bundled%0A bundled_true = %5Bu'cluster', u'math', u'pysh', u'python3', u'sympy'%5D%0A bundled = sorted(list_bundled_profiles())%0A nt.assert_equals(bundled, bundled_true)
%0A%0A
|
1e9ebf139ae76eddfe8dd01290e41735e7d1011b
|
Rewrite syntax to be Python 3.5+
|
IPython/utils/tests/test_openpy.py
|
IPython/utils/tests/test_openpy.py
|
import io
import os.path
import nose.tools as nt
from IPython.utils import openpy
mydir = os.path.dirname(__file__)
nonascii_path = os.path.join(mydir, '../../core/tests/nonascii.py')
def test_detect_encoding():
with open(nonascii_path, 'rb') as f:
enc, lines = openpy.detect_encoding(f.readline)
nt.assert_equal(enc, 'iso-8859-5')
def test_read_file():
with io.open(nonascii_path, encoding='iso-8859-5') as f:
read_specified_enc = f.read()
read_detected_enc = openpy.read_py_file(nonascii_path, skip_encoding_cookie=False)
nt.assert_equal(read_detected_enc, read_specified_enc)
assert u'coding: iso-8859-5' in read_detected_enc
read_strip_enc_cookie = openpy.read_py_file(nonascii_path, skip_encoding_cookie=True)
assert u'coding: iso-8859-5' not in read_strip_enc_cookie
def test_source_to_unicode():
with io.open(nonascii_path, 'rb') as f:
source_bytes = f.read()
nt.assert_equal(openpy.source_to_unicode(source_bytes, skip_encoding_cookie=False).splitlines(),
source_bytes.decode('iso-8859-5').splitlines())
source_no_cookie = openpy.source_to_unicode(source_bytes, skip_encoding_cookie=True)
nt.assert_not_in(u'coding: iso-8859-5', source_no_cookie)
|
Python
| 0.999892
|
@@ -615,33 +615,32 @@
enc)%0A assert
-u
'coding: iso-885
@@ -771,17 +771,16 @@
assert
-u
'coding:
@@ -1208,17 +1208,16 @@
_not_in(
-u
'coding:
|
a5b111833f3edd050c9d45553d9e21afa9fa1d57
|
fix mysql bug
|
everyclass/__init__.py
|
everyclass/__init__.py
|
import logging
from flask import Flask, g, render_template, send_from_directory, session
from flask_cdn import CDN
from htmlmin import minify
from termcolor import cprint
from raven.contrib.flask import Sentry
from elasticapm.contrib.flask import ElasticAPM
from elasticapm.handlers.logging import LoggingHandler
from everyclass.config import load_config
from everyclass.utils import monkey_patch
from everyclass.db_operations import init_db
config = load_config()
ElasticAPM.request_finished = monkey_patch.ElasticAPM.request_finished(ElasticAPM.request_finished)
def create_app():
app = Flask(__name__, static_folder='static', static_url_path='')
# load config
app.config.from_object(config)
cprint('App created. Running under `{}` config'.format(app.config['CONFIG_NAME']), color='blue')
# CDN
CDN(app)
# Sentry
sentry = Sentry(app)
# Elastic APM
apm = ElasticAPM(app)
# 初始化数据库
init_db(app)
# logging
handler = LoggingHandler(client=apm.client)
handler.setLevel(logging.WARN)
app.logger.addHandler(handler)
# 导入并注册 blueprints
from everyclass.cal import cal_blueprint
from everyclass.query import query_blueprint
from everyclass.views import main_blueprint as main_blueprint
from everyclass.api import api_v1 as api_blueprint
app.register_blueprint(cal_blueprint)
app.register_blueprint(query_blueprint)
app.register_blueprint(main_blueprint)
app.register_blueprint(api_blueprint, url_prefix='/api/v1')
@app.before_request
def set_user_id():
"""在请求之前设置 session uid,方便 Elastic APM 记录用户请求"""
if not session.get('user_id', None):
# 数据库中生成唯一 ID,参考 https://blog.csdn.net/longjef/article/details/53117354
conn = db_operations.get_conn()
cursor = conn.cursor()
cursor.execute("REPLACE INTO user_id_sequence (stub) VALUES ('a'); SELECT LAST_INSERT_ID();")
session['user_id'] = cursor.fetchall()[0][0]
cursor.close()
@app.teardown_request
def close_db(error):
"""结束时关闭数据库连接"""
if hasattr(g, 'mysql_db'):
g.mysql_db.close()
@app.after_request
def response_minify(response):
"""用 htmlmin 压缩 HTML,减轻带宽压力"""
if app.config['HTML_MINIFY'] and response.content_type == u'text/html; charset=utf-8':
response.set_data(minify(response.get_data(as_text=True)))
return response
@app.template_filter('versioned')
def version_filter(filename):
"""
模板过滤器。如果 STATIC_VERSIONED,返回类似 'style-v1-c012dr.css' 的文件,而不是 'style-v1.css'
:param filename: 文件名
:return: 新的文件名
"""
if app.config['STATIC_VERSIONED']:
if filename[:4] == 'css/':
new_filename = app.config['STATIC_MANIFEST'][filename[4:]]
return 'css/' + new_filename
elif filename[:3] == 'js/':
new_filename = app.config['STATIC_MANIFEST'][filename[3:]]
return new_filename
else:
return app.config['STATIC_MANIFEST'][filename]
return filename
@app.errorhandler(500)
def internal_server_error(error):
return render_template('500.html',
event_id=g.sentry_event_id,
public_dsn=sentry.client.get_public_dsn('https'))
return app
|
Python
| 0
|
@@ -1927,16 +1927,28 @@
T_ID();%22
+, multi=True
)%0A
|
64ed1185fca6ba60e06d508ac401f68d5be1ce56
|
bring tests up to #442 change
|
tests/python_tests/load_map_test.py
|
tests/python_tests/load_map_test.py
|
#!/usr/bin/env python
from nose.tools import *
from utilities import execution_path
import os, sys, glob, mapnik
def setup():
# All of the paths used are relative, if we run the tests
# from another directory we need to chdir()
os.chdir(execution_path('.'))
# We expect these files to not raise any
# exceptions at all
def assert_loads_successfully(file):
m = mapnik.Map(512, 512)
strict = True
mapnik.load_map(m, file, strict)
# libxml2 is not smart about paths, and clips the last directory off
# of a path if it does not end in a trailing slash
base_path = os.path.dirname(file) + '/'
mapnik.load_map_from_string(m,open(file,'rb').read(),strict,base_path)
# We expect these files to raise a UserWarning
# and fail if there isn't one (or a different type
# of exception)
@raises(UserWarning)
def assert_raises_userwarning(file):
m = mapnik.Map(512, 512)
strict = True
mapnik.load_map(m, file, strict)
def test_broken_files():
broken_files = glob.glob("../data/broken_maps/*.xml")
# Add a filename that doesn't exist
broken_files.append("../data/broken/does_not_exist.xml")
for file in broken_files:
yield assert_raises_userwarning, file
def test_good_files():
good_files = glob.glob("../data/good_maps/*.xml")
for file in good_files:
yield assert_loads_successfully, file
|
Python
| 0
|
@@ -743,27 +743,28 @@
raise a
-UserWarning
+RuntimeError
%0A# and f
@@ -835,19 +835,20 @@
ses(
-UserWarning
+RuntimeError
)%0Ade
@@ -863,27 +863,29 @@
_raises_
-userwarning
+runtime_error
(file):%0A
@@ -1219,19 +1219,21 @@
ses_
-userwarning
+runtime_error
, fi
|
558846fc67ac40d444ed4f64d35bc50e5d2e057b
|
Remove the description as it is optional.
|
billing/gateways/stripe_gateway.py
|
billing/gateways/stripe_gateway.py
|
from billing import Gateway
from billing.utils.credit_card import InvalidCard, Visa, MasterCard, \
AmericanExpress, Discover
import stripe
from django.conf import settings
class StripeGateway(Gateway):
supported_cardtypes = [Visa, MasterCard, AmericanExpress, Discover]
supported_countries = ['US']
default_currency = "USD"
homepage_url = "https://stripe.com/"
display_name = "Stripe"
def __init__(self):
stripe.api_key = settings.STRIPE_API_KEY
self.stripe = stripe
def purchase(self, amount, credit_card, options=None):
if not self.validate_card(credit_card):
raise InvalidCard("Invalid Card")
try:
response = self.stripe.Charge.create(
amount=amount * 100,
currency=self.default_currency.lower(),
card={
'number': credit_card.number,
'exp_month': credit_card.month,
'exp_year': credit_card.year,
'cvc': credit_card.verification_value
},)
except self.stripe.CardError, error:
return {'status': 'FAILURE', 'response': error}
return {'status': 'SUCCESS', 'response': response}
def store(self, credit_card, options=None):
if not self.validate_card(credit_card):
raise InvalidCard("Invalid Card")
customer = self.stripe.Customer.create(
card={
'number': credit_card.number,
'exp_month': credit_card.month,
'exp_year': credit_card.year,
'cvc': credit_card.verification_value
},
description="Storing for future use"
)
return {'status': 'SUCCESS', 'response': customer}
def recurring(self, credit_card, options=None):
if not self.validate_card(credit_card):
raise InvalidCard("Invalid Card")
response = None
try:
plan_id = options['plan_id']
self.stripe.Plan.retrieve(options['plan_id'])
try:
response = self.stripe.Customer.create(
card={
'number': credit_card.number,
'exp_month': credit_card.month,
'exp_year': credit_card.year,
'cvc': credit_card.verification_value
},
plan=plan_id,
description="Thanks for subscribing"
)
return {"status": "SUCCESS", "response": response}
except self.stripe.CardError, error:
return {"status": "FAILURE", "response": error}
except self.stripe.InvalidRequestError, error:
return {"status": "FAILURE", "response": error}
except TypeError:
return {"status": "FAILURE", "response": "please give a plan id"}
def unstore(self, identification, options=None):
try:
customer = self.stripe.Customer.retrieve(identification)
response = customer.delete()
return {"status": "SUCCESS", "response": response}
except self.stripe.InvalidRequestError, error:
return {"status": "FAILURE", "response": error}
def credit(self, identification, money=None, options=None):
try:
charge = self.stripe.Charge.retrieve(identification)
response = charge.refund(amount=money)
return {"status": "SUCCESS", "response": response}
except self.stripe.InvalidRequestError, error:
return {"status": "FAILURE", "error": error}
def authorize(self, money, credit_card, options=None):
if not self.validate_card(credit_card):
raise InvalidCard("Invalid Card")
try:
token = self.stripe.Token.create(
card={
'number': credit_card.number,
'exp_month': credit_card.month,
'exp_year': credit_card.year,
'cvc': credit_card.verification_value
},
amount=money * 100
)
return {'status': "SUCCESS", "response": token}
except self.stripe.InvalidRequestError, error:
return {"status": "FAILURE", "response": error}
def capture(self, money, authorization, options=None):
try:
response = self.stripe.Charge.create(
amount=money * 100,
card=authorization,
currency="usd"
)
return {'status': "SUCCESS", "response": response}
except self.stripe.InvalidRequestError, error:
return {"status": "FAILURE", "response": error}
|
Python
| 0.000003
|
@@ -1071,11 +1071,10 @@
%7D
-,
)%0A
+
@@ -1681,75 +1681,8 @@
%7D
-,%0A description=%22Storing for future use%22%0A
)%0A
@@ -2362,32 +2362,32 @@
%7D,%0A
+
@@ -2406,66 +2406,8 @@
n_id
-,%0A description=%22Thanks for subscribing%22
%0A
|
24b7d3d0904a75e1e26ccfc34834ae495edb8146
|
Fix for skipping django-configurations tests when running on Python < 2.6
|
tests/test_django_configurations.py
|
tests/test_django_configurations.py
|
"""Tests which check the various ways you can set DJANGO_SETTINGS_MODULE
If these tests fail you probably forgot to install django-configurations.
"""
import pytest
pytest.importorskip('configurations')
pytestmark = pytest.mark.skipif("sys.version_info < (2,6) ")
BARE_SETTINGS = '''
from configurations import Settings
class MySettings(Settings):
# At least one database must be configured
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
},
}
SECRET_KEY = 'foobar'
'''
def test_dc_env(testdir, monkeypatch):
monkeypatch.setenv('DJANGO_SETTINGS_MODULE', 'tpkg.settings_env')
monkeypatch.setenv('DJANGO_CONFIGURATION', 'MySettings')
pkg = testdir.mkpydir('tpkg')
settings = pkg.join('settings_env.py')
settings.write(BARE_SETTINGS)
testdir.makepyfile("""
import os
def test_settings():
assert os.environ['DJANGO_SETTINGS_MODULE'] == 'tpkg.settings_env'
assert os.environ['DJANGO_CONFIGURATION'] == 'MySettings'
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines(['*1 passed*'])
def test_dc_ini(testdir, monkeypatch):
monkeypatch.setenv('DJANGO_SETTINGS_MODULE', 'DO_NOT_USE')
monkeypatch.setenv('DJANGO_CONFIGURATION', 'DO_NOT_USE')
testdir.makeini("""\
[pytest]
DJANGO_SETTINGS_MODULE = tpkg.settings_ini
DJANGO_CONFIGURATION = MySettings
""")
pkg = testdir.mkpydir('tpkg')
settings = pkg.join('settings_ini.py')
settings.write(BARE_SETTINGS)
testdir.makepyfile("""
import os
def test_ds():
assert os.environ['DJANGO_SETTINGS_MODULE'] == 'tpkg.settings_ini'
assert os.environ['DJANGO_CONFIGURATION'] == 'MySettings'
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines(['*1 passed*'])
def test_dc_option(testdir, monkeypatch):
monkeypatch.setenv('DJANGO_SETTINGS_MODULE', 'DO_NOT_USE_env')
monkeypatch.setenv('DJANGO_CONFIGURATION', 'DO_NOT_USE_env')
testdir.makeini("""\
[pytest]
DJANGO_SETTINGS_MODULE = DO_NOT_USE_ini
DJANGO_CONFIGURATION = DO_NOT_USE_ini
""")
pkg = testdir.mkpydir('tpkg')
settings = pkg.join('settings_opt.py')
settings.write(BARE_SETTINGS)
testdir.makepyfile("""
import os
def test_ds():
assert os.environ['DJANGO_SETTINGS_MODULE'] == 'tpkg.settings_opt'
assert os.environ['DJANGO_CONFIGURATION'] == 'MySettings'
""")
result = testdir.runpytest('--ds=tpkg.settings_opt', '--dc=MySettings')
result.stdout.fnmatch_lines(['*1 passed*'])
|
Python
| 0.000002
|
@@ -156,38 +156,39 @@
ort
-pytest%0A%0A%0A
+sys%0Aimport
pytest
-.
+%0A%0A#
import
-orskip('
+ing
conf
@@ -201,44 +201,60 @@
ions
-')%0Apytestmark = pytest.mark.skipif(%22
+ fails on 2.5, even though it might be installed%0Aif
sys.
@@ -275,12 +275,124 @@
(2,
+
6)
- %22
+:%0A pytest.skip('django-configurations is not supported on Python %3C 2.6')%0A%0Apytest.importorskip('configurations'
)%0A%0A%0A
|
cfb6f65a12c88a4b497e787d03321ba8cfcbda27
|
Fix an error in attribute retrival
|
biothings/web/settings/__init__.py
|
biothings/web/settings/__init__.py
|
# -*- coding: utf-8 -*-
'''Settings objects used to configure the web API
These settings get passed into the handler.initialize() function,
of each request, and configure the web API endpoint. They are mostly
a container for the `Config module`_, and any other settings that
are the same across all handler types, e.g. the Elasticsearch client.'''
import asyncio
import json
import logging
import os
import socket
import types
from importlib import import_module
import elasticsearch
from elasticsearch import ConnectionSelector
from elasticsearch_async import AsyncElasticsearch
from elasticsearch_async.transport import AsyncTransport
from elasticsearch_dsl.connections import Connections
# Error class
class BiothingConfigError(Exception):
pass
class BiothingWebSettings(object):
'''
A container for the settings that configure the web API.
* Environment variables can override settings of the same names.
* Default values are defined in biothings.web.settings.default.
'''
def __init__(self, config='biothings.web.settings.default'):
'''
:param config: a module that configures this biothing or its fully qualified name.
'''
self._user = config if isinstance(config, types.ModuleType) else import_module(config)
self._default = import_module('biothings.web.settings.default')
# for metadata dev details
if os.path.isdir(os.path.join(self.APP_GIT_REPOSITORY, '.git')):
self._git_repo_path = self.APP_GIT_REPOSITORY
else:
self._git_repo_path = None
self.validate()
def __getattr__(self, name):
if hasattr(self._user, name) or hasattr(self._default, name):
# environment variables can override named settings
if name in os.environ:
return os.environ[name]
return getattr(self._user, name, getattr(self._default))
raise AttributeError("No setting named '{}' in configuration file.".format(name))
def generate_app_list(self):
'''
Generates the tornado.web.Application `(regex, handler_class, options) tuples
<http://www.tornadoweb.org/en/stable/web.html#application-configuration>`_ for this project.
'''
handlers = []
for rule in self.APP_LIST:
settings = {"web_settings": self}
if len(rule) == 3:
settings.update(rule[-1])
handlers.append((rule[0], rule[1], settings))
return handlers
def get_git_repo_path(self):
'''
Return the path of the codebase if the specified folder in settings exists or `None`.
'''
return self._git_repo_path
def validate(self):
'''
Validate the settings defined for this web server.
'''
for rule in self.APP_LIST:
if len(rule) == 2:
pass
elif len(rule) == 3:
assert isinstance(rule[-1], dict)
else:
raise BiothingConfigError()
#### COMPATIBILITY METHODS ####
def set_debug_level(self, debug=False):
pass
@property
def git_repo_path(self):
return self._git_repo_path
class KnownLiveSelecter(ConnectionSelector):
"""
Select the first connection all the time
"""
def select(self, connections):
return connections[0]
class BiothingESWebSettings(BiothingWebSettings):
'''
`BiothingWebSettings`_ subclass with functions specific to an elasticsearch backend.
* Use the known live ES connection if more than one is specified.
* Cache source metadata stored under the _meta field in es indices.
'''
ES_VERSION = elasticsearch.__version__[0]
def __init__(self, config='biothings.web.settings.default'):
'''
The ``config`` init parameter specifies a module that configures
this biothing. For more information see `config module`_ documentation.
'''
super(BiothingESWebSettings, self).__init__(config)
# elasticsearch connections
self._connections = Connections()
connection_settings = {
"hosts": self.ES_HOST,
"timeout": self.ES_CLIENT_TIMEOUT,
"max_retries": 1, # maximum number of retries before an exception is propagated
"timeout_cutoff": 1, # number of consecutive failures after which the timeout doesn’t increase
"selector_class": KnownLiveSelecter}
self._connections.create_connection(alias='sync', **connection_settings)
connection_settings.update(transport_class=AsyncTransport)
self._connections.create_connection(alias='async', **connection_settings)
# project metadata under index mappings
self._source_metadata = {}
# populate field notes if exist
try:
inf = open(self.AVAILABLE_FIELDS_NOTES_PATH, 'r')
self._fields_notes = json.load(inf)
inf.close()
except Exception:
self._fields_notes = {}
# initialize payload for standalone tracking batch
self.tracking_payload = []
def validate(self):
'''
Additional ES settings to validate.
'''
super().validate()
assert isinstance(self.ES_INDEX, str)
assert isinstance(self.ES_DOC_TYPE, str)
assert isinstance(self.ES_INDICES, dict)
assert '*' not in self.ES_DOC_TYPE
self.ES_INDICES[self.ES_DOC_TYPE] = self.ES_INDEX
def get_es_client(self):
'''
Return the default blocking elasticsearch client.
The connection is created upon first call.
'''
return self._connections.get_connection('sync')
def get_async_es_client(self):
'''
Return the async elasitcsearch client. API calls return awaitable objects.
The connection is created upon first call.
'''
return self._connections.get_connection('async')
def get_source_metadata(self, biothing_type='doc', latest=True):
'''
Get metadata defined in the ES index.
:param biothing_type: If multiple biothings are defined, specify which here.
:param latest: If set to `false`, return the cached copy. Otherwise retrieve latest.
'''
cached = biothing_type in self._source_metadata
if latest or not cached:
kwargs = {
'index': self.ES_INDICES[biothing_type],
'allow_no_indices': True,
'ignore_unavailable': True,
'local': not latest
}
if self.ES_VERSION < 7:
kwargs['doc_type'] = biothing_type
mappings = self.get_es_client().get_mapping(**kwargs)
metadata = {}
for index in mappings:
if self.ES_VERSION < 7:
_meta = mappings[index]['mappings'][biothing_type].get('_meta', {})
else:
_meta = mappings[index]['mappings'].get('_meta', {})
metadata.update(_meta)
self._source_metadata[biothing_type] = metadata
return self._source_metadata[biothing_type]
def get_field_notes(self):
'''
Return the cached field notes associated with this instance.
'''
return self._fields_notes
##### COMPATIBILITY METHODS #####
@property
def es_client(self):
return self.get_es_client()
@property
def async_es_client(self):
return self.get_async_es_client()
def source_metadata(self):
pass
def doc_url(self, bid):
return os.path.join(self.URL_BASE, self.API_VERSION, self.ES_DOC_TYPE, bid)
def available_fields_notes(self):
return self._fields_notes
|
Python
| 0.000027
|
@@ -1909,16 +1909,22 @@
_default
+, name
))%0A%0A
|
441c5e5a6a96544d3c34ed71ff37d93c3476524f
|
fix syntax issue;
|
biothings_explorer/id_converter.py
|
biothings_explorer/id_converter.py
|
# -*- coding: utf-8 -*-
"""
biothings_explorer.id_converter
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains code that biothings_explorer use to resolve
different identifiers
"""
import time
from .registry import Registry
from .apicall import BioThingsCaller
from .api_output_parser import OutputParser
class IDConverter():
def __init__(self, registry=None):
if not registry:
self.registry = Registry()
else:
self.registry = registry
self.caller = BioThingsCaller(batch_mode=True)
self.semantic_type_api_mapping = {'Gene': 'mygene.info',
'Variant': 'myvariant.info',
'ChemicalSubstance': 'mychem.info',
'DiseaseOrPhenotypicFeature': "mydisease.info",
"AnatomicalEntity": "semmedanatomy",
"PhenotypicFeature": "semmedphenotype",
"Pathway": "pathway",
"MolecularActivity": "mf",
"CellularComponent": "cc",
"BiologicalProcess": "bp",
}
def fetch_schema_mapping_file(self, api):
"""Fetch schema mapping file from the registry"""
return self.registry.registry[api]['mapping']
def subset_mapping_file(self, mapping_file):
return {k:v for (k,v) in mapping_file.items() if k in (self.registry.mp.id_list + ["bts:name"])}
def get_output_fields(self, mapping_file):
fields = []
for k, v in mapping_file.items():
if isinstance(v, list):
fields += v
elif isinstance(v, str):
fields.append(v)
return ','.join(fields)
def get_input_fields(self, mapping_file, _type):
input_fields = mapping_file.get(_type)
if isinstance(input_fields, list):
return input_fields[0]
else:
return input_fields
def convert_ids(self, inputs):
results = {}
api_call_inputs = []
mapping_files = []
apis = []
types = []
for _input in inputs:
ids, _type, semantic_type = _input
# convert id to list
if isinstance(ids, str):
ids = [ids]
# make sure all ids in id list is str
for _id in ids:
if ' ' in str(_id):
results[_type[4:] + ':' + str(_id)] = {_type: [str(_id)]}
if isinstance(ids, list):
ids = [str(i) for i in ids if ' ' not in str(i)]
#if _type == 'bts:efo':
# ids = [i.split(':')[-1] for i in ids]
api = self.semantic_type_api_mapping.get(semantic_type)
# if id can not be converted, the equivalent id is itself
if not api:
if _type.startswith("bts:"):
_type = _type[4:]
for _id in ids:
results[_type + ':' + _id] = {'bts:' + _type: [_id]}
else:
mapping_file = self.fetch_schema_mapping_file(api)
mapping_file = self.subset_mapping_file(mapping_file)
if self.get_input_fields(mapping_file, _type):
if type(ids) == list and len(ids) > 1000:
for i in range(0, len(ids), 1000):
api_call_inputs.append({"api": api,
"input": self.get_input_fields(mapping_file, _type),
"output": self.get_output_fields(mapping_file),
"values": ','.join(ids[i:i+1000]),
"batch_mode": True
})
types.append(_type)
mapping_files.append(mapping_file)
apis.append(api)
else:
api_call_inputs.append({"api": api,
"input": self.get_input_fields(mapping_file, _type),
"output": self.get_output_fields(mapping_file),
"values": ','.join(ids),
"batch_mode": True
})
types.append(_type)
mapping_files.append(mapping_file)
apis.append(api)
else:
if _type.startswith("bts:"):
_type = _type[4:]
for _id in ids:
results[_type + ':' + _id] = {'bts:' + _type: [_id]}
# make API calls asynchronously and gather all outputs
responses = self.caller.call_apis(api_call_inputs, size=10)
# loop through outputs
for _res, _map, _api, _type in zip(responses, mapping_files,
apis, types):
# restructure API output based on mapping file
new_res = OutputParser(_res, _map, True, _api).parse()
# remove "@context" and "@type" from result
for k, v in new_res.items():
if _type == 'bts:efo' or _type == 'efo':
k = 'EFO:' + k
if '@context' in v:
v.pop("@context")
if '@type' in v:
v.pop("@type")
if _type.startswith("bts:"):
_type = _type[4:]
# remove duplicates
for m, n in v.items():
if n and type(n) == list:
v[m] = list(set(n))
# after removing @context and @type, check if the dict is empty
if v:
results[_type + ':' + k] = v
# if the dict is empty, just return itself as its value
else:
results[_type + ':' + k] = {'bts:' + _type: [k]}
return results
|
Python
| 0
|
@@ -182,20 +182,8 @@
%22%22%22%0A
-import time%0A
from
@@ -1403,16 +1403,109 @@
registry
+.%0A %0A Parameters%0A ----------%0A api (str) : the name of API%0A
%22%22%22%0A
@@ -1784,27 +1784,24 @@
%0A for
- k,
v in mappin
@@ -1807,20 +1807,21 @@
ng_file.
-item
+value
s():%0A
@@ -3514,25 +3514,29 @@
if
-typ
+isinstanc
e(ids
-) ==
+,
list
+)
and
@@ -6016,23 +6016,27 @@
and
-type(n) ==
+isinstance(n,
list
+)
:%0A
|
cf2a9f0918bc56a9015c745108f6de5ae8c60773
|
Add status
|
my-ACG/update-episodes/anime1_me.py
|
my-ACG/update-episodes/anime1_me.py
|
# -*- coding: utf-8 -*-
import argparse
import importlib
import os
import sys
os.environ['PYWIKIBOT_DIR'] = os.path.dirname(os.path.realpath(__file__))
import pywikibot
sys.path.append('..')
animeSite = importlib.import_module('util.anime1_me', 'Anime1Me').Anime1Me()
site = pywikibot.Site()
site.login()
datasite = site.data_repository()
def updateEpisodes(title):
myitem = pywikibot.ItemPage(datasite, title)
print(title, myitem.get()['labels']['zh-tw'])
claims = myitem.get()['claims']
if 'P38' in claims:
claim = claims['P38'][0]
url = claim.getTarget()
data = animeSite.getData(url)
new_episodes = data['episodes']
print('\t url', url)
print('\t new_episodes', new_episodes)
if 'P27' in claims:
episodesValue = claims['P27'][0].getTarget()
old_episodes = episodesValue.amount
print('\t old_episodes', old_episodes)
if new_episodes > old_episodes:
episodesValue.amount = new_episodes
print('\t Update episodes from {} to {}'.format(old_episodes, new_episodes))
claims['P27'][0].changeTarget(episodesValue, summary='更新總集數')
else:
print('\t Not update')
if 'P31' in claims and claims['P31'][0].getTarget().id == 'Q57':
print('\t Update status to playing')
statusValue = pywikibot.ItemPage(datasite, 'Q56')
claims['P31'][0].changeTarget(statusValue, summary='更新播放狀態')
else:
new_claim = pywikibot.page.Claim(datasite, 'P27')
new_claim.setTarget(pywikibot.WbQuantity(new_episodes, site=datasite))
print('\t Add new episodes {}'.format(new_episodes))
myitem.addClaim(new_claim, summary='新增總集數')
if 'P31' in claims:
if data['end']:
print('\t Update status to end')
statusValue = pywikibot.ItemPage(datasite, 'Q58')
claims['P31'][0].changeTarget(statusValue, summary='更新播放狀態')
else:
print('\t Not anime1')
def main():
moegirlitem = pywikibot.ItemPage(datasite, 'Q56')
for backlink in moegirlitem.backlinks(namespaces=[120]):
updateEpisodes(backlink.title())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('title', nargs='?')
args = parser.parse_args()
if args.title is None:
main()
else:
updateEpisodes(args.title)
|
Python
| 0.000001
|
@@ -1258,282 +1258,8 @@
te')
-%0A%0A if 'P31' in claims and claims%5B'P31'%5D%5B0%5D.getTarget().id == 'Q57':%0A print('%5Ct Update status to playing')%0A statusValue = pywikibot.ItemPage(datasite, 'Q56')%0A claims%5B'P31'%5D%5B0%5D.changeTarget(statusValue, summary='%E6%9B%B4%E6%96%B0%E6%92%AD%E6%94%BE%E7%8B%80%E6%85%8B')
%0A
@@ -1579,16 +1579,278 @@
if
+claims%5B'P31'%5D%5B0%5D.getTarget().id == 'Q57':%0A print('%5Ct Update status to playing')%0A statusValue = pywikibot.ItemPage(datasite, 'Q56') # %E6%94%BE%E9%80%81%E4%B8%AD%0A claims%5B'P31'%5D%5B0%5D.changeTarget(statusValue, summary='%E6%9B%B4%E6%96%B0%E6%92%AD%E6%94%BE%E7%8B%80%E6%85%8B')%0A elif
data%5B'en
@@ -1968,16 +1968,23 @@
, 'Q58')
+ # %E5%B7%B2%E5%AE%8C%E7%B5%90
%0A
@@ -2049,24 +2049,352 @@
y='%E6%9B%B4%E6%96%B0%E6%92%AD%E6%94%BE%E7%8B%80%E6%85%8B')%0A
+ else:%0A itemid = 'Q56'%0A if data%5B'end'%5D:%0A itemid = 'Q58'%0A new_claim = pywikibot.page.Claim(datasite, 'P31')%0A new_claim.setTarget(pywikibot.ItemPage(datasite, itemid))%0A print('%5Ct Add new status')%0A myitem.addClaim(new_claim, summary='%E6%96%B0%E5%A2%9E%E6%92%AD%E6%94%BE%E7%8B%80%E6%85%8B')%0A
else:%0A
|
6334521dc2ed7023887f0cd3b7f3f7d005070dd4
|
version 0.3.0
|
scrape/__init__.py
|
scrape/__init__.py
|
__version__ = '0.2.10'
|
Python
| 0.000002
|
@@ -14,10 +14,9 @@
'0.
-2.1
+3.
0'%0A
|
b7aa789a1b49332d19d37d82430b48ed9dca8ddb
|
Include AsOf data item in data dict as well as attribute
|
v1pysdk/base_asset.py
|
v1pysdk/base_asset.py
|
from pprint import pformat as pf
from query import V1Query
class BaseAsset(object):
"""Provides common methods for the dynamically derived asset type classes
built by V1Meta.asset_class"""
@classmethod
def query(Class, where=None, sel=None):
'Takes a V1 Data query string and returns an iterable of all matching items'
return V1Query(Class, sel, where)
@classmethod
def select(Class, *selectlist):
return V1Query(Class).select(*selectlist)
@classmethod
def where(Class, **wherekw):
return V1Query(Class).where(**wherekw)
@classmethod
def filter(Class, filterexpr):
return V1Query(Class).filter(filterexpr)
@classmethod
def asof(Class, *asofs):
return V1Query(Class).asof(*asofs)
@classmethod
def from_query_select(Class, xml, asof=None):
"Find or instantiate an object and fill it with data that just came back from query"
idref = xml.get('id')
data = Class._v1_v1meta.unpack_asset(xml)
instance = Class._v1_v1meta.asset_from_oid(idref)
instance.AsOf = asof
return instance.with_data(data)
@classmethod
def create(Class, **newdata):
"create new asset on server and return created asset proxy instance"
return Class._v1_v1meta.create_asset(Class._v1_asset_type_name, newdata)
class IterableType(type):
def __iter__(Class):
for instance in Class.query():
instance.needs_refresh = True
yield instance
"The type that's instantiated to make THIS class must have an __iter__, "
"so we provide a metaclass (a thing that provides a class when instantiated) "
"that knows how to be iterated over, so we can say list(v1.Story)"
__metaclass__ = IterableType
def __new__(Class, oid):
"Tries to get an instance out of the cache first, otherwise creates one"
cache_key = (Class._v1_asset_type_name, int(oid))
cache = Class._v1_v1meta.global_cache
if cache.has_key(cache_key):
self = cache[cache_key]
else:
self = object.__new__(Class)
self._v1_oid = oid
self._v1_new_data = {}
self._v1_current_data = {}
self._v1_needs_refresh = True
cache[cache_key] = self
return self
@property
def intid(self):
return self._v1_oid
@property
def data(self):
return self._v1_current_data
def __getitem__(self, key):
return self._v1_current_data[key]
@property
def idref(self):
return self._v1_asset_type_name + ':' + str(self._v1_oid)
@property
def reprref(self):
return "{0}({1})".format(self._v1_asset_type_name, self._v1_oid)
@property
def url(self):
return self._v1_v1meta.server.build_url('/assetdetail.v1', query={'oid':self.idref})
class ReprDummy:
def __init__(self, value):
self.value = value
def __repr__(self):
return self.value.reprref
def repr_dummy(self, v):
if isinstance(v, list):
return [self.ReprDummy(item) if isinstance(item, BaseAsset) else item
for item in v]
elif isinstance(v, BaseAsset):
return self.ReprDummy(v)
else:
return v
def repr_shallow(self, d):
# patch up the dict that pformat sees to avoid repr loops
return pf( dict(
(k, self.repr_dummy(v))
for (k,v)
in d.items()
if v
)
)
def __repr__(self):
out = self.reprref
if self._v1_current_data:
out += '.with_data({0})'.format(self.repr_shallow(self._v1_current_data))
if self._v1_new_data:
out += '.pending({0})'.format(self.repr_shallow(self._v1_new_data))
return out
def _v1_getattr(self, attr):
"Intercept access to missing attribute names. "
"first return uncommitted data, then refresh if needed, then get single attr, else fail"
if self._v1_new_data.has_key(attr):
value = self._v1_new_data[attr]
else:
if self._v1_needs_refresh:
self._v1_refresh()
if attr not in self._v1_current_data.keys():
self._v1_current_data[attr] = self._v1_get_single_attr(attr)
value = self._v1_current_data[attr]
return value
def _v1_setattr(self, attr, value):
'Stores a new value for later commit'
if attr.startswith('_v1_'):
object.__setattr__(self, attr, value)
else:
self._v1_new_data[attr] = value
self._v1_v1meta.add_to_dirty_list(self)
self._v1_needs_commit = True
def set(self, **kw):
self.pending(kw)
return self
def with_data(self, newdata):
"bulk-set instance data"
self._v1_current_data.update(dict(newdata))
self._v1_needs_refresh = False
return self
def pending(self, newdata):
"bulk-set data to commit"
self._v1_new_data.update(dict(newdata))
self._v1_v1meta.add_to_dirty_list(self)
self._v1_needs_commit = True
def _v1_commit(self):
'Commits the object to the server and invalidates its sync state'
if self._v1_needs_commit:
self._v1_v1meta.update_asset(self._v1_asset_type_name, self._v1_oid, self._v1_new_data)
self._v1_needs_commit = False
self._v1_new_data = {}
self._v1_current_data = {}
self._v1_needs_refresh = True
def _v1_refresh(self):
'Syncs the objects from current server data'
self._v1_current_data = self._v1_v1meta.read_asset(self._v1_asset_type_name, self._v1_oid)
self._v1_needs_refresh = False
def _v1_get_single_attr(self, attr):
return self._v1_v1meta.get_attr(self._v1_asset_type_name, self._v1_oid, attr)
def _v1_execute_operation(self, opname):
result = self._v1_v1meta.execute_operation(self._v1_asset_type_name, self._v1_oid, opname)
self._v1_needs_refresh = True
return result
|
Python
| 0
|
@@ -1057,16 +1057,40 @@
= asof%0A
+ data%5B'AsOf'%5D = asof%0A
retu
|
c7e393d665cc89ebd653fbf3690a86a08b72d74a
|
parse correct voting date to datetime.date
|
scraper/votings.py
|
scraper/votings.py
|
import logging
import requests
import lxml.html
logger = logging.getLogger(__name__)
TWEEDEKAMER_URL = 'https://www.tweedekamer.nl'
SEARCH_URL = 'https://www.tweedekamer.nl/zoeken'
class Vote(object):
def __init__(self, vote_table_row):
self.vote_table_row = vote_table_row
self.details = ''
self.decision = ''
self.number_of_seats = 0
self.party_name = ''
self.create()
def create(self):
ncol = 0
for column in self.vote_table_row.iter():
if column.tag == 'td':
ncol += 1
if ncol == 1 and column.tag == 'a':
self.party_name = column.text
elif ncol == 2:
self.number_of_seats = int(column.text)
elif ncol == 3 and column.tag == 'img':
self.decision = 'FOR'
elif ncol == 4 and column.tag == 'img':
self.decision = 'AGAINST'
elif ncol == 5 and column.tag == 'h4':
self.details = column.text
def __str__(self):
return 'Vote: ' + self.party_name + ' (' + str(self.number_of_seats) + '): ' + self.decision
class VotingResult(object):
def __init__(self, result_tree):
self.result_tree = result_tree
self.votes = self.create_votes_from_table()
def get_property_elements(self):
return self.result_tree.xpath('div[@class="search-result-properties"]/p')
def get_table_rows(self):
votes_table = self.get_votes_table()
if votes_table is not None:
return votes_table.xpath('tbody/tr')
else:
return []
def create_votes_from_table(self):
table_rows = self.get_table_rows()
votes = []
for row in table_rows:
vote = Vote(row)
votes.append(vote)
return votes
def get_votes_table(self):
votes_tables = self.result_tree.xpath('div[@class="vote-result"]/table')
if len(votes_tables):
return self.result_tree.xpath('div[@class="vote-result"]/table')[0]
else:
print('WARNING: no votes table found')
return None
def get_document_id(self):
return self.get_property_elements()[0].text
def get_date(self):
return self.get_property_elements()[1].text
def get_result(self):
result_content_elements = self.result_tree.xpath('div[@class="search-result-content"]/p[@class="result"]/span')
return result_content_elements[0].text.replace('.', '')
def __str__(self):
return 'Voting for doc ' + self.get_document_id() + ', result: ' + self.get_result()
def print_votes(self):
for vote in self.votes:
print(vote)
def get_voting_pages_for_dossier(dossier_nr):
""" searches for votings within a dossier, returns a list of urls to pages with votings """
params = {
'qry': dossier_nr,
'fld_prl_kamerstuk': 'Stemmingsuitslagen',
'Type': 'Kamerstukken',
'clusterName': 'Stemmingsuitslagen',
}
page = requests.get(SEARCH_URL, params)
tree = lxml.html.fromstring(page.content)
elements = tree.xpath('//div[@class="search-result-content"]/h3/a')
voting_urls = []
for element in elements:
voting_urls.append(TWEEDEKAMER_URL + element.get('href'))
return voting_urls
def get_votings_for_page(votings_page_url):
"""
get voting results from a votings page
:param votings_page_url: the url of the votings page, example: https://www.tweedekamer.nl/kamerstukken/stemmingsuitslagen/detail?id=2016P10154
:return: a list of VotingResult
"""
page = requests.get(votings_page_url)
tree = lxml.html.fromstring(page.content)
search_results = tree.xpath('//ul[@class="search-result-list reset"]/li')
votings = []
for search_result in search_results:
result = VotingResult(search_result)
votings.append(result)
return votings
|
Python
| 0.999999
|
@@ -41,16 +41,34 @@
xml.html
+%0Aimport dateparser
%0A%0Alogger
@@ -1235,16 +1235,22 @@
ult_tree
+, date
):%0A
@@ -1283,16 +1283,41 @@
lt_tree%0A
+ self.date = date%0A
@@ -2292,85 +2292,8 @@
xt%0A%0A
- def get_date(self):%0A return self.get_property_elements()%5B1%5D.text%0A%0A
@@ -2614,16 +2614,52 @@
result()
+ + ', date: ' + str(self.get_date())
%0A%0A de
@@ -2755,37 +2755,278 @@
ting
-_pages_for_dossier(dossier_nr
+s_for_dossier(dossier_id):%0A %22%22%22 get votings for a given dossier %22%22%22%0A urls = get_voting_pages_for_dossier(dossier_id)%0A results = %5B%5D%0A for url in urls:%0A results += get_votings_for_page(url)%0A return results%0A%0A%0Adef get_voting_pages_for_dossier(dossier_id
):%0A
@@ -3162,10 +3162,10 @@
ier_
-nr
+id
,%0A
@@ -3665,23 +3665,16 @@
t voting
- result
s from a
@@ -3674,16 +3674,22 @@
from a
+given
votings
@@ -3968,24 +3968,192 @@
ge.content)%0A
+ date = tree.xpath('//p%5B@class=%22vote-info%22%5D/span%5B@class=%22date%22%5D')%5B0%5D.text%0A date = dateparser.parse(date).date() # dateparser needed because of Dutch month names%0A
search_r
@@ -4316,24 +4316,30 @@
earch_result
+, date
)%0A vo
|
86113d1b53827a2d0c106734c5fd04e9ad935529
|
disable layout optimizer for grappler, due to conv2d filter error
|
scripts/convert.py
|
scripts/convert.py
|
"""Run Grappler optimizers in the standalone mode.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import sys
import os
from absl import flags
from tensorflow.python.tools import freeze_graph
from tensorflow.core.protobuf import device_properties_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.grappler import cluster as gcluster
from tensorflow.python.grappler import tf_optimizer
flags.DEFINE_string('saved_model_dir', '', 'The saved model directory.')
flags.DEFINE_string('output_node_names', '',
'The names of the output nodes, comma separated.')
flags.DEFINE_string('output_graph', '', 'The name of the output graph file')
flags.DEFINE_string('input_checkpoint', '',
'TensorFlow variables file to load.')
flags.DEFINE_boolean("input_binary", True,
"Whether the input files are in binary format.")
flags.DEFINE_string('input_saver', '', 'TensorFlow saver file to load.')
flags.DEFINE_string('input_graph', '', 'TensorFlow GraphDef file to load.')
flags.DEFINE_string(
'saved_model_tags', 'serve',
'Tags of the MetaGraphDef to load, in comma separated string format.'
)
FLAGS = flags.FLAGS
def get_cluster():
named_device = device_properties_pb2.NamedDevice()
named_device.name = '/GPU:0'
named_device.properties.type = 'GPU'
named_device.properties.environment['architecture'] = '4'
cluster = gcluster.Cluster(devices=[named_device])
return cluster
def load_graph(graph_filename):
"""Loads GraphDef. Returns Python Graph object."""
with tf.gfile.Open(graph_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
# Set name to empty to avoid using the default name 'import'.
tf.import_graph_def(graph_def, name='')
for node in FLAGS.output_node_names.split(','):
graph.add_to_collection('train_op',graph.get_operation_by_name(node.strip()))
return graph
def optimize_graph(graph):
"""Takes a Python Graph object and optimizes the graph."""
rewriter_config = rewriter_config_pb2.RewriterConfig()
rewriter_config.optimizers[:] = ['pruning', 'constfold', 'arithmetic',
'dependency', 'layout', 'pruning',
'constfold', 'arithmetic','dependency']
meta_graph = tf.train.export_meta_graph(
graph_def=graph.as_graph_def(), graph=graph)
optimized_graph = tf_optimizer.OptimizeGraph(
rewriter_config, meta_graph, cluster=get_cluster())
if FLAGS.output_graph:
head, tail = os.path.split(FLAGS.output_graph)
tf.train.write_graph(
optimized_graph, head, tail, as_text=False)
def main(_):
# Freeze the graph
freeze_graph.freeze_graph(FLAGS.input_graph, FLAGS.input_saver,
FLAGS.input_binary, FLAGS.input_checkpoint,
FLAGS.output_node_names,
'', '',
FLAGS.output_graph + '.frozen', True, '',
saved_model_tags=FLAGS.saved_model_tags,
input_saved_model_dir=FLAGS.saved_model_dir)
graph = load_graph(FLAGS.output_graph + '.frozen')
optimize_graph(graph)
if __name__ == '__main__':
FLAGS(sys.argv)
tf.app.run(main)
|
Python
| 0
|
@@ -2334,18 +2334,8 @@
cy',
- 'layout',
'pr
|
ad14d77a137c924357bca51d39b91b4d502d2ce6
|
Improve pylint score
|
scripts/extract.py
|
scripts/extract.py
|
"""
## CODE OWNERS: Kyle Baird, Shea Parkes
### OWNERS ATTEST TO THE FOLLOWING:
* The `master` branch will meet Milliman QRM standards at all times.
* Deliveries will only be made from code in the `master` branch.
* Review/Collaboration notes will be captured in Pull Requests.
### OBJECTIVE:
Extract data from the EHR to feed the analytics
### DEVELOPER NOTES:
<none>
"""
import csv
from pathlib import Path
import prm_fhir.extractors
PATH_DATA = Path(prm_fhir.extractors.__file__).parents[2] / "data"
#==============================================================================
# LIBRARIES, LOCATIONS, LITERALS, ETC. GO ABOVE HERE
#==============================================================================
if __name__ == "__main__":
URLS = [
"https://open-ic.epic.com/FHIR/api/FHIR/DSTU2",
#"http://134.68.33.32/fhir/",
]
SEARCH_STRUCTS = [
{"family": "Argonaut", "given": "*"},
{"family": "Ragsdale", "given": "*"},
]
PATH_PATIENTS = PATH_DATA / "patients.csv"
with PATH_PATIENTS.open("w", newline="") as patients:
fieldnames = prm_fhir.extractors.extract_patients.fieldnames
writer = csv.DictWriter(
patients,
fieldnames=fieldnames,
)
writer.writeheader()
for url in URLS:
for search_struct in SEARCH_STRUCTS:
writer.writerows(prm_fhir.extractors.extract_patients(url, search_struct))
|
Python
| 0
|
@@ -1109,26 +1109,26 @@
-fieldnames
+FIELDNAMES
= prm_f
@@ -1178,22 +1178,22 @@
-writer
+WRITER
= csv.D
@@ -1248,26 +1248,26 @@
ldnames=
-fieldnames
+FIELDNAMES
,%0A
@@ -1278,30 +1278,30 @@
)%0A
-writer
+WRITER
.writeheader
@@ -1393,22 +1393,22 @@
-writer
+WRITER
.writero
@@ -1410,16 +1410,37 @@
iterows(
+%0A
prm_fhir
@@ -1487,10 +1487,31 @@
_struct)
+%0A
)%0A
|
8861cc5e94538d1685dc4ddaae0dc8d436e2e7fc
|
debug statement for cleaning
|
scripts/scraper.py
|
scripts/scraper.py
|
# -*- coding:Utf-8 -*-
import re
from os.path import exists
from urllib import urlopen, quote
from BeautifulSoup import BeautifulSoup
from deputies.models import Deputy, Party, CommissionMembership, Document, Question, Analysis
LACHAMBRE_PREFIX="http://www.lachambre.be/kvvcr/"
def lame_url(url):
# convert super lame urls of lachambre.be into something uzable
return quote(url.encode("iso-8859-1"), safe="%/:=&?~#+!$,;'@()*[]")
def get_or_create(klass, _id=None, **kwargs):
if _id is None:
object = klass.objects.filter(**kwargs)
else:
object = klass.objects.filter(**{_id : kwargs[_id]})
if object:
return object[0]
else:
print "add new", klass.__name__, kwargs
return klass.objects.create(**kwargs)
def read_or_dl(url, name, reset=False):
print "parsing", url
if not reset and exists('dump/%s' % name):
text = open('dump/%s' % name).read()
else:
text = urlopen(url).read()
open('dump/%s' % name, "w").write(text)
return BeautifulSoup(text)
def clean():
map(lambda x: x.objects.all().delete(), (Deputy, Party, CommissionMembership, Document, Question, Analysis))
def deputies_list():
soup = read_or_dl("http://www.lachambre.be/kvvcr/showpage.cfm?section=/depute&language=fr&rightmenu=right_depute&cfm=/site/wwwcfm/depute/cvlist.cfm", "deputies")
for dep in soup('table')[4]('tr'):
items = dep('td')
full_name = re.sub(' +', ' ', items[0].a.text)
url = items[0].a['href']
party = get_or_create(Party, name=items[1].a.text, url=dict(items[1].a.attrs)['href'])
email = items[2].a.text
website = items[3].a['href'] if items[3].a else None
# yes, one deputies key contains a O instead of an 0, I'm not joking
lachambre_id = re.search('key=([0-9O]+)', url).groups()[0]
Deputy.objects.create(full_name=full_name,
party=party,
url=url,
websites=[website] if website else [],
lachambre_id=lachambre_id,
emails=[email])
print 'adding new deputy', lachambre_id, full_name, party, email, website if website else ''
def each_deputies():
for deputy in Deputy.objects.all():
print "parsing", deputy.full_name, deputy.url
soup = read_or_dl(LACHAMBRE_PREFIX + deputy.url, deputy.full_name)
deputy.language = soup.i.parent.text.split(":")[1]
deputy.cv = re.sub(' +', ' ', soup('table')[5].p.text)
# here we will walk in a list of h4 .. h5 .. div+ .. h5 .. div+
# look at the bottom of each deputies' page
membership = soup.find('td', rowspan="1")
item = membership.h4
role = None
while item.nextSibling:
if hasattr(item, 'tag'):
if item.name == 'h5':
role = item.text[6:-1]
elif item.name == 'div':
deputy.commissions.append(CommissionMembership.objects.create(name=item.a.text, role=role, url=item.a['href']))
print "add commission", role, item.a.text
item = item.nextSibling
deputy_documents(soup, deputy)
deputy.save()
def get_deputy_documents(url, deputy, role, type=None):
print "working on %s %sdocuments" % (role, type + " " if type else '') #, LACHAMBRE_PREFIX + lame_url(urls[index])
soupsoup = read_or_dl(LACHAMBRE_PREFIX + lame_url(url), '%s %s %s' % (deputy.full_name, type if type else '', role))
setattr(deputy, "documents_%s%s_url" % (role, type + "_" if type else ''), url)
setattr(deputy, "documents_%s%s_list" % (role, type + "_" if type else ''), [])
for i in soupsoup('table')[3]('tr', valign="top"):
print "add", type if type else '', role, i.tr('td')[1].text
getattr(deputy, "documents_%s%s_list" % (role, type + "_" if type else '')).append(Document.objects.create(url=i.a['href'], type=type))
def get_deputy_questions(url, deputy, type):
soupsoup = read_or_dl(LACHAMBRE_PREFIX + lame_url(url), '%s %s' % (deputy.full_name, type))
setattr(deputy, "questions_%s_url" % type, url)
setattr(deputy, "questions_%s_list" % type, [])
for i in soupsoup('table')[3]('tr', valign="top"):
print "add", type, i.tr('td')[1].text.strip()
getattr(deputy, "questions_%s_list" % type).append(Question.objects.create(url=i.a['href'], type=type))
def get_deputy_analysis(url, deputy, type):
soupsoup = read_or_dl(LACHAMBRE_PREFIX + lame_url(url), '%s %s' % (deputy.full_name, type))
setattr(deputy, "analysis_%s_url" % type, url)
setattr(deputy, "analysis_%s_list" % type, [])
for i in soupsoup('table')[3]('tr', valign="top"):
print "add", type, i.tr('td')[1].text.strip()
getattr(deputy, "analysis_%s_list" % type).append(Analysis.objects.create(url=i.a['href'], type=type))
def deputy_documents(soup, deputy):
# here we are in the grey black box
urls = map(lambda x: x['href'], soup('div', **{'class': 'linklist_1'})[1]('a'))
get_deputy_documents(urls[0], deputy, "author", "principal")
get_deputy_documents(urls[1], deputy, "signator", "principal")
get_deputy_documents(urls[2], deputy, "author", "next")
get_deputy_documents(urls[3], deputy, "signator", "next")
get_deputy_documents(urls[4], deputy, "rapporter")
get_deputy_questions(urls[5], deputy, "written")
# no one seems to do any interpellations nor motions or maybe the website is just broken
get_deputy_questions(urls[8], deputy, "oral_plenary")
get_deputy_questions(urls[9], deputy, "oral_commission")
get_deputy_analysis(urls[10], deputy, "legislatif_work")
get_deputy_analysis(urls[11], deputy, "parlimentary_control")
get_deputy_analysis(urls[12], deputy, "divers")
def deputies():
clean()
deputies_list()
each_deputies()
def run():
deputies()
|
Python
| 0
|
@@ -1055,24 +1055,48 @@
ef clean():%0A
+ print %22cleaning db%22%0A
map(lamb
|
f276d6fdb412b8ad93de8ba6d921d29a57710077
|
Update usage message
|
server/messages.py
|
server/messages.py
|
'''Endpoints messages.'''
from protorpc import messages
class Status(messages.Enum):
OK = 1
MISSING_DATA = 2
EXISTS = 3
BAD_DATA = 4
ERROR = 5
NO_DEVICE = 6
class DataMessage(messages.Message):
device_id = messages.StringField(1)
status = messages.EnumField(Status, 2)
class StatusResponse(messages.Message):
status = messages.EnumField(Status, 1)
class ScheduledWater(messages.Message):
'''Request to add to watering schedule'''
valve = messages.IntegerField(1)
start_time = messages.IntegerField(2)
duration_seconds = messages.IntegerField(3)
status = messages.EnumField(Status, 4)
class ScheduleResponse(messages.Message):
status = messages.EnumField(Status, 1)
schedule = messages.MessageField(ScheduledWater, 2, repeated=True)
class UsageResponse(messages.Message):
usage = messages.StringField(1)
class SetupRequest(messages.Message):
device_id = messages.StringField(1)
lat = messages.FloatField(2)
lng = messages.FloatField(3)
class Valve(messages.Message):
number = messages.IntegerField(1)
name = messages.StringField(2, required=False)
device_id = messages.StringField(3)
status = messages.EnumField(Status, 4)
start_time = messages.IntegerField(5)
duration_seconds = messages.IntegerField(6)
crop_id = messages.IntegerField(7)
class ValveDataResponse(messages.Message):
valves = messages.MessageField(Valve, 1, repeated=True)
status = messages.EnumField(Status, 2)
class ScheduleAdd(messages.Message):
device_id = messages.StringField(1)
valve = messages.IntegerField(2)
seconds_per_day = messages.IntegerField(3)
crop_id = messages.IntegerField(4)
start_time = messages.IntegerField(5)
|
Python
| 0
|
@@ -862,31 +862,220 @@
essages.
-StringField(1)%0A
+IntegerField(1, repeated=True)%0A datapoint_num = messages.IntegerField(2)%0A datapoint_freq = messages.EnumField()%0A class Frequency(messages.Enum):%0A DAY = 1%0A WEEK = 2%0A MONTH = 3
%0A%0Aclass
|
d79fa2bee44bb0ee2bfebd8d9353f6b467702a59
|
Fix missing key error for LVAN
|
vat_ladder/src/val.py
|
vat_ladder/src/val.py
|
"""Virtual Adversarial Ladder"""
import tensorflow as tf
from src.utils import count_trainable_params, preprocess, get_batch_ops
from src.vat import Adversary
from src.ladder import Ladder, Encoder, Decoder
class LadderWithVAN(Ladder):
def get_corrupted_encoder(self, inputs, bn, train_flag, params,
start_layer=0, update_batch_stats=False,
scope='enc', reuse=True):
return VANEncoder(
inputs, bn, train_flag, params, self.clean.logits,
this_encoder_noise=params.corrupt_sd,
start_layer=start_layer, update_batch_stats=update_batch_stats,
scope=scope, reuse=reuse)
class VANEncoder(Encoder):
def __init__(
self, inputs, bn, is_training, params, clean_logits,
this_encoder_noise=0.0, start_layer=0, update_batch_stats=True,
scope='enc', reuse=None):
self.params = params
self.clean_logits = clean_logits
super(VANEncoder, self).__init__(
inputs, bn, is_training, params,
this_encoder_noise=this_encoder_noise,
start_layer=start_layer,
update_batch_stats=update_batch_stats,
scope=scope, reuse=reuse
)
def get_vadv_noise(self, inputs, l_out):
join, split_lu, labeled, unlabeled = get_batch_ops(self.batch_size)
adv = Adversary(
bn=self.bn,
params=self.params,
layer_eps=self.params.epsilon[l_out],
start_layer=l_out
)
x = unlabeled(inputs)
logit = unlabeled(self.clean_logits)
ul_noise = adv.generate_virtual_adversarial_perturbation(
x=x, logit=logit, is_training=self.is_training)
return join(tf.zeros(tf.shape(labeled(inputs))), ul_noise)
def print_progress(self, l_out):
el = self.encoder_layers
print("Layer {}: {} -> {}, epsilon {}".format(l_out, el[l_out - 1], el[l_out],
self.params.epsilon[l_out - 1]))
def generate_noise(self, inputs, l_out):
print("Generating noise for layer", l_out)
if self.noise_sd > 0.0:
noise = tf.random_normal(tf.shape(inputs)) * self.noise_sd
if self.params.model == "n" and l_out==0:
noise += self.get_vadv_noise(inputs, l_out)
elif self.params.model == "nlw":
noise += self.get_vadv_noise(inputs, l_out)
else:
noise = tf.zeros(tf.shape(inputs))
return inputs + noise
def get_vat_cost(ladder, train_flag, params):
unlabeled = lambda x: x[params.batch_size:] if x is not None else x
def get_layer_vat_cost(l):
adv = Adversary(bn=ladder.bn,
params=params,
layer_eps=params.epsilon[l],
start_layer=l)
# VAT on unlabeled only
return (
adv.virtual_adversarial_loss(
x=ladder.corr.unlabeled.z[l],
logit=unlabeled(ladder.corr.logits), # should this be clean?
is_training=train_flag)
)
if params.model == "clw":
vat_costs = []
for l in range(ladder.num_layers):
vat_costs.append(get_layer_vat_cost(l))
vat_cost = tf.add_n(vat_costs)
elif params.model == "c":
vat_cost = get_layer_vat_cost(0)
else:
vat_cost = 0.0
return vat_cost
def build_graph(params):
model = params.model
# -----------------------------
# Placeholder setup
inputs_placeholder = tf.placeholder(
tf.float32, shape=(None, params.encoder_layers[0]))
inputs = preprocess(inputs_placeholder, params)
outputs = tf.placeholder(tf.float32)
train_flag = tf.placeholder(tf.bool)
if model == "c" or model == "clw":
ladder = Ladder(inputs, outputs, train_flag, params)
vat_cost = get_vat_cost(ladder, train_flag, params)
elif model == "n" or model == "nlw":
ladder = LadderWithVAN(inputs, outputs, train_flag, params)
vat_cost = 0.0
else:
ladder = Ladder(inputs, outputs, train_flag, params)
vat_cost = 0.0
# -----------------------------
# Loss, accuracy and training steps
loss = ladder.cost + ladder.u_cost + vat_cost
accuracy = tf.reduce_mean(
tf.cast(
tf.equal(ladder.predict, tf.argmax(outputs, 1)),
"float")) * tf.constant(100.0)
learning_rate = tf.Variable(params.initial_learning_rate, trainable=False)
beta1 = tf.Variable(params.beta1, trainable=False)
train_step = tf.train.AdamOptimizer(learning_rate,
beta1=beta1).minimize(loss)
# add the updates of batch normalization statistics to train_step
bn_updates = tf.group(*ladder.bn.bn_assigns)
with tf.control_dependencies([train_step]):
train_step = tf.group(bn_updates)
saver = tf.train.Saver(keep_checkpoint_every_n_hours=0.5,
max_to_keep=5)
# Graph
g = dict()
g['images'] = inputs_placeholder
g['labels'] = outputs
g['train_flag'] = train_flag
g['ladder'] = ladder
g['saver'] = saver
g['train_step'] = train_step
g['lr'] = learning_rate
g['beta1'] = beta1
# Metrics
m = dict()
m['loss'] = loss
m['cost'] = ladder.cost
m['uc'] = ladder.u_cost
m['vc'] = vat_cost
m['acc'] = accuracy
trainable_params = count_trainable_params()
return g, m, trainable_params
|
Python
| 0.000061
|
@@ -2017,16 +2017,26 @@
out - 1%5D
+.get(None)
))%0A%0A
|
917dde63ece9e552427487c7639be64e1b113d3d
|
Update zibra download fields.
|
vdb/zibra_download.py
|
vdb/zibra_download.py
|
import os, re, time, datetime, csv, sys
import rethinkdb as r
from Bio import SeqIO
from download import download
from download import parser
class zibra_download(download):
def __init__(self, **kwargs):
download.__init__(self, **kwargs)
self.virus_specific_fasta_fields = []
if __name__=="__main__":
args = parser.parse_args()
fasta_fields = ['strain', 'amplicon_concentration', 'citation', 'ct', 'country', 'date', 'division', 'location',
'onset_date', 'patient_age', 'patient_sex', 'public', 'region', 'rt_positive', 'timestamp', 'virus']
setattr(args, 'fasta_fields', fasta_fields)
connVDB = zibra_download(**args.__dict__)
connVDB.download(**args.__dict__)
|
Python
| 0
|
@@ -409,32 +409,9 @@
, 'c
-itation', 'ct', 'country
+t
', '
@@ -440,16 +440,32 @@
cation',
+ 'microcephaly',
%0A
@@ -526,61 +526,34 @@
', '
-public', 'region', 'rt_positive', 'timestamp', 'virus
+rt_positive', 'sample_type
'%5D%0A
|
bf90f726da9954edb69f4c0cb29206ff82444d63
|
Add custom admin classes
|
src/recipi/food/admin.py
|
src/recipi/food/admin.py
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from recipi.food.models import (
FoodGroup, Food, Language, LanguageDescription, Nutrient,
Weight, Footnote)
admin.site.register(FoodGroup)
admin.site.register(Food)
admin.site.register(Language)
admin.site.register(LanguageDescription)
admin.site.register(Nutrient)
admin.site.register(Weight)
admin.site.register(Footnote)
|
Python
| 0
|
@@ -170,16 +170,372 @@
note)%0A%0A%0A
+class FoodGroupAdmin(admin.ModelAdmin):%0A pass%0A%0A%0Aclass FoodAdmin(admin.ModelAdmin):%0A pass%0A%0A%0Aclass LanguageAdmin(admin.ModelAdmin):%0A pass%0A%0A%0Aclass LanguageDescriptionAdmin(admin.ModelAdmin):%0A pass%0A%0A%0Aclass NutrientAdmin(admin.ModelAdmin):%0A pass%0A%0A%0Aclass WeightAdmin(admin.ModelAdmin):%0A pass%0A%0A%0Aclass FootnoteAdmin(admin.ModelAdmin):%0A pass%0A%0A%0A
admin.si
@@ -555,16 +555,32 @@
oodGroup
+, FoodGroupAdmin
)%0Aadmin.
@@ -597,16 +597,27 @@
ter(Food
+, FoodAdmin
)%0Aadmin.
@@ -638,16 +638,31 @@
Language
+, LanguageAdmin
)%0Aadmin.
@@ -694,16 +694,42 @@
cription
+, LanguageDescriptionAdmin
)%0Aadmin.
@@ -750,16 +750,31 @@
Nutrient
+, NutrientAdmin
)%0Aadmin.
@@ -793,16 +793,29 @@
r(Weight
+, WeightAdmin
)%0Aadmin.
@@ -828,18 +828,33 @@
egister(Footnote
+, FootnoteAdmin
)%0A
|
cf7f5dc359bb49743750c9ace6c317092b275653
|
remove the use of refine_results because it is changed to private method
|
mmrp.py
|
mmrp.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import logging.config
import json
LOGGING_CONF_FILE = 'logging.json'
DEFAULT_LOGGING_LVL = logging.INFO
path = LOGGING_CONF_FILE
value = os.getenv('LOG_CFG', None)
if value:
path = value
if os.path.exists(path):
with open(path, 'rt') as f:
config = json.load(f)
logging.config.dictConfig(config)
else:
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
from flask import Flask
PROJECT_DIR, PROJECT_MODULE_NAME = os.path.split(
os.path.dirname(os.path.realpath(__file__))
)
FLASK_JSONRPC_PROJECT_DIR = os.path.join(PROJECT_DIR, os.pardir)
if os.path.exists(FLASK_JSONRPC_PROJECT_DIR) \
and FLASK_JSONRPC_PROJECT_DIR not in sys.path:
sys.path.append(FLASK_JSONRPC_PROJECT_DIR)
from flask_cors import CORS
from flask_jsonrpc import JSONRPC
from pymmrouting.routeplanner import MultimodalRoutePlanner
from pymmrouting.inferenceengine import RoutingPlanInferer
app = Flask(__name__)
cors = CORS(app)
jsonrpc = JSONRPC(app, '/api', enable_web_browsable_api=True)
@jsonrpc.method('mmrp.index')
def index():
return u'Welcome using Multimodal Route Planner (mmrp) JSON-RPC API'
@jsonrpc.method('mmrp.echo')
def echo(input):
logger.debug("input value: %s", input)
return u'Receive {0}'.format(input)
@jsonrpc.method('mmrp.findMultimodalPaths')
def find_multimodal_paths(options):
inferer = RoutingPlanInferer()
inferer.load_routing_options(options)
plans = inferer.generate_routing_plan()
planner = MultimodalRoutePlanner()
rough_results = planner.batch_find_path(plans)
results = planner.refine_results(rough_results)
return results
@jsonrpc.method('mmrp.fails')
def fails(string):
raise ValueError
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
|
Python
| 0.000001
|
@@ -1664,22 +1664,16 @@
r()%0A
-rough_
results
@@ -1709,60 +1709,8 @@
ns)%0A
- results = planner.refine_results(rough_results)%0A
|
e701286478d0c460d0a8a2e2fd5b73bf124a90ec
|
make a better error message when removing reservations that does not exist
|
opennsa/backends/common/calendar.py
|
opennsa/backends/common/calendar.py
|
"""
Backend reservation calendar.
Inteded usage is for NRM backend which does not have their own reservation calendar.
Right now it is very minimal, but should be enough for basic service.
Author: Henrik Thostrup Jensen <htj@nordu.net>
Copyright: NORDUnet (2011)
"""
import datetime
from opennsa import error
class ReservationCalendar:
def __init__(self):
self.reservations = [] # [ ( resource, start_time, end_time ) ]
def addReservation(self, resource, start_time, end_time):
# does no checking, assuming checkReservation has been called
reservation = (resource, start_time, end_time)
self.reservations.append(reservation)
def removeReservation(self, resource, start_time, end_time):
reservation = (resource, start_time, end_time)
self.reservations.remove(reservation)
def checkReservation(self, resource, start_time, end_time):
# check types
if not type(start_time) is datetime.datetime and type(end_time) is datetime.datetime:
raise ValueError('Reservation start and end types must be datetime types')
# sanity checks
if start_time > end_time:
raise error.PayloadError('Invalid request: Reverse duration (end time before start time)')
now = datetime.datetime.utcnow()
if start_time < now:
delta = now - start_time
stamp = str(start_time).rsplit('.')[0]
raise error.PayloadError('Invalid request: Start time in the past (Startime: %s Delta: %s)' % (stamp, str(delta)))
if start_time > datetime.datetime(2025, 1, 1):
raise error.PayloadError('Invalid request: Start time after year 2025')
for (c_resource, c_start_time, c_end_time) in self.reservations:
if resource == c_resource:
if self._resourceOverlap(c_start_time, c_end_time, start_time, end_time):
raise error.STPUnavailableError('Resource %s not available in specified time span' % resource)
# all good
# resourceort temporal availability
def _resourceOverlap(self, res1_start_time, res1_end_time, res2_start_time, res2_end_time):
assert res1_start_time < res1_end_time, 'Refusing to detect overlap for backwards reservation (1)'
assert res2_start_time < res2_end_time, 'Refusing to detect overlap for backwards reservation (2)'
if res2_end_time < res1_start_time:
return False # res2 ends before res1 starts so it is ok
if res2_start_time > res1_end_time:
return False # res2 starts after res1 ends so it is ok
# resources overlap in time
return True
|
Python
| 0.000002
|
@@ -783,32 +783,49 @@
time, end_time)%0A
+ try:%0A
self.res
@@ -846,32 +846,182 @@
ve(reservation)%0A
+ except ValueError:%0A raise ValueError('Reservation (%25s, %25s, %25s) does not exists. Cannot remove' %25 (resource, start_time, end_time))%0A
%0A%0A def checkR
|
8e6662a4aaf654ddf18c1c4e733c58db5b9b5579
|
Add cache in opps menu list via context processors
|
opps/channels/context_processors.py
|
opps/channels/context_processors.py
|
# -*- coding: utf-8 -*-
from django.utils import timezone
from django.conf import settings
from django.contrib.sites.models import get_current_site
from .models import Channel
def channel_context(request):
""" Channel context processors
"""
site = get_current_site(request)
opps_menu = Channel.objects.filter(site=site,
date_available__lte=timezone.now(),
published=True,
show_in_menu=True).order_by('order')
return {'opps_menu': opps_menu,
'opps_channel_conf_all': settings.OPPS_CHANNEL_CONF,
'site': site}
|
Python
| 0
|
@@ -140,16 +140,52 @@
ent_site
+%0Afrom django.core.cache import cache
%0A%0Afrom .
@@ -338,57 +338,121 @@
u =
-Channel.objects.filter(site=site,%0A
+cache.get('opps_menu')%0A if not opps_menu:%0A opps_menu = %5Bchannel for channel in Channel.objects.filter(%0A
@@ -451,32 +451,43 @@
er(%0A
+site=site,%0A
date
@@ -522,35 +522,8 @@
(),%0A
-
@@ -550,35 +550,8 @@
ue,%0A
-
@@ -577,16 +577,27 @@
u=True).
+distinct().
order_by
@@ -605,16 +605,87 @@
'order')
+%5D%0A cache.set('opps_menu', opps_menu, settings.OPPS_CACHE_EXPIRE)
%0A%0A re
|
eeef09628a877e769dd1f55e627d3047bedf7f3d
|
Fix edit of news.
|
news.py
|
news.py
|
# -*- coding: UTF-8 -*-
# Copyright (C) 2008 Matthieu France <matthieu.france@itaapy.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Import from the Standard Library
from datetime import date, datetime
# Import from itools
from itools.datatypes import Date, DateTime, String, Unicode
from itools.handlers import checkid
from itools.gettext import MSG
from itools.stl import stl
from itools.web import STLView, STLForm
from itools.xapian import KeywordField
from itools.xml import XMLError, XMLParser
# Import from ikaaro
from ikaaro.folder import Folder
from ikaaro.forms import DateWidget, RTEWidget
from ikaaro.html import WebPage
from ikaaro.messages import *
from ikaaro.registry import register_resource_class
from ikaaro.views import NewInstanceForm
###########################################################################
# Views
###########################################################################
rte = RTEWidget('html', rte_template='/ui/hforge/rte.xml')
class NewsNewInstance(NewInstanceForm):
access = 'is_allowed_to_add'
template = '/ui/hforge/News_edit.xml'
schema = {
'title': Unicode(mandatory=True),
'html': String,
'date': Date,
}
def get_namespace(self, resource, context):
root = context.root
# Build the namespace
default = date.today().isoformat()
release_date = context.get_form_value('date', Date, default=default)
return {
'action': ';new_resource?type=%s' % News.class_id,
'submit': MSG(u'Add'),
'title': context.get_form_value('title', Unicode),
'html': rte.to_html(String, None),
'date': DateWidget('date').to_html(Date, release_date),
'class_title': News.class_title.gettext(),
'timestamp': DateTime.encode(datetime.now()),
}
def action(self, resource, context, form):
title = form['title']
html = form['html']
release_date = form['date']
name = checkid(title)
if name is None:
context.message = MSG_BAD_NAME
return
# Check the name is free
if resource.has_resource(name):
context.message = MSG_NAME_CLASH
return
# Make Object
language = resource.get_content_language(context)
object = News.make_resource(News, resource, name, body=html,
language=language)
metadata = object.metadata
metadata.set_property('title', title, language=language)
metadata.set_property('date', release_date)
goto = './%s/' % name
return context.come_back(MSG_NEW_RESOURCE, goto=goto)
class NewsView(STLView):
access = 'is_allowed_to_view'
title = MSG(u'View')
template = '/ui/hforge/News_view.xml'
def get_namespace(self, resource, context):
language = resource.get_content_language(context)
return {
'title': resource.get_property('title', language=language),
'html': resource.handler.events,
'date': resource.get_property('date'),
}
class NewsEdit(STLForm):
access = 'is_allowed_to_edit'
title = MSG(u'Edit')
template = '/ui/hforge/News_edit.xml'
schema = {
'timestamp': DateTime,
'title': Unicode(mandatory=True),
'date': Date,
'html': String,
}
def get_namespace(self, resource, context):
language = resource.get_content_language(context)
widget = DateWidget('date')
release_date = resource.get_property('date')
return {
'action': ';edit',
'submit': MSG(u'Change'),
'title': resource.get_property('title', language=language),
'date': widget.to_html(Date, release_date),
'html': rte.to_html(String, resource.handler.events),
'class_title': resource.class_title,
'timestamp': DateTime.encode(datetime.now()),
}
def action(self, resource, context, form):
# Check the timestamp
timestamp = form['timestamp']
if timestamp is None:
context.message = MSG_EDIT_CONFLICT
return
document = resource.get_html_document()
if document.timestamp is not None and timestamp < document.timestamp:
context.message = MSG_EDIT_CONFLICT
return
# Check the html is good
html = form['html']
try:
html = list(XMLParser(html))
except XMLError:
context.message = MSG(u'Invalid HTML code.')
return
# Title
title = form['title']
language = resource.get_content_language(context)
resource.set_property('title', title, language=language)
# Date
release_date = form['date']
resource.set_property('date', release_date)
# Body
resource.handler.events = html
# Ok
context.message = MSG_CHANGES_SAVED
###########################################################################
# Resource
###########################################################################
class News(WebPage):
class_id = 'news'
class_title = MSG(u'News')
class_description = MSG(u'Create and publich News')
class_views = ['view', 'edit', 'state_form', 'history_form']
@classmethod
def get_metadata_schema(cls):
schema = WebPage.get_metadata_schema()
schema['date'] = Date
return schema
def get_catalog_fields(self):
base_fields = WebPage.get_catalog_fields(self)
field = KeywordField('date', is_stored=True)
base_fields.append(field)
return base_fields
def get_catalog_values(self):
indexes = WebPage.get_catalog_values(self)
indexes['date'] = self.get_property('date').isoformat()
return indexes
# Views
new_instance = NewsNewInstance()
view = NewsView()
edit = NewsEdit()
###########################################################################
# Register
###########################################################################
register_resource_class(News)
Folder.register_document_type(News)
|
Python
| 0
|
@@ -5527,38 +5527,33 @@
-resource.handler.
+document.set_
events
- =
+(
html
+)
%0A%0A
|
a7593b5c90a0cf6ae2fb14cf8935d1f3af58dff0
|
rename md5sum to nspc_md5sum
|
tools/gen_playlist_from_seed.py
|
tools/gen_playlist_from_seed.py
|
#!/usr/bin/python3
import sys, os, json, hashlib
# we're in directory 'tools/' we have to update sys.path
sys.path.append(os.path.dirname(sys.path[0]))
from rom.rom import RealROM, snes_to_pc, pc_to_snes
from rom.rompatcher import MusicPatcher,RomTypeForMusic
from utils.parameters import appDir
from utils.utils import removeChars
seed=sys.argv[1]
baseDir=os.path.join(appDir+'/..', 'varia_custom_sprites', 'music')
rom=RealROM(seed)
p=MusicPatcher(rom, RomTypeForMusic.VariaSeed, baseDir=baseDir)
vanillaTracks=p.vanillaTracks
allTracks=p.allTracks
tableAddr=p.musicDataTableAddress-3
def readNspcData(rom, addr):
# songs can have two tracks
nspcCount = 0
step = 0
data = []
maxSize = 64*1024
rom.seek(addr)
for i in range(maxSize):
b = rom.readByte()
data.append(b)
if step == 0:
if b == 0x00:
step = 1
elif step == 1:
if b == 0x00:
step = 2
else:
step = 0
elif step == 2:
if b == 0x00:
step = 3
else:
step = 0
elif step == 3:
if b == 0x15:
# end found
if nspcCount == 0:
firstData = data[:]
nspcCount = 1
step = 0
else:
return (firstData, data)
elif b != 0x00:
step = 0
if nspcCount == 0:
#with open("{}.nspc".format(hex(addr)), 'wb') as f:
# f.write(bytes(data))
return (None, None)
else:
return (firstData, None)
def getMd5Sum(data):
return hashlib.md5(bytes(data)).hexdigest()
# read table
tracksTable = {}
for trackName, data in vanillaTracks.items():
if 'pc_addresses' not in data:
continue
addr = data['pc_addresses'][0]
dataId = rom.readByte(addr)
addr = snes_to_pc(rom.readLong(tableAddr+dataId))
#print("dataId: {} - addr: {} for song: {}".format(hex(dataId), hex(addr), trackName))
tracksTable[addr] = {"trackName": trackName, "dataId": dataId}
# get nspc data in rom and compute its md5 sum
for addr in sorted(tracksTable.keys()):
trackData = tracksTable[addr]
#print("{} {:4} {}".format(hex(pc_to_snes(addr)), hex(trackData["dataId"]), trackData["trackName"]))
nspcData = readNspcData(rom, addr)
if nspcData[0] is None and nspcData[1] is None:
print(" Warning: no nspc end found for {}".format(trackData["trackName"]))
tracksTable[addr]["nspcData"] = [None]
tracksTable[addr]["md5sum"] = [None]
continue
md5sum = getMd5Sum(nspcData[0])
tracksTable[addr]["nspcData"] = [nspcData[0]]
tracksTable[addr]["md5sum"] = [md5sum]
if nspcData[1] is not None:
md5sum = getMd5Sum(nspcData[1])
tracksTable[addr]["nspcData"].append(nspcData[1])
tracksTable[addr]["md5sum"].append(md5sum)
# index by md5sum
allTracksMd5 = {}
for songName, data in allTracks.items():
allTracksMd5[data["md5sum"]] = songName
playlist = {}
for data in tracksTable.values():
md5sums = data["md5sum"]
if md5sums[0] in allTracksMd5 or (len(md5sums) > 1 and md5sums[1] in allTracksMd5):
md5sum = md5sums[0] if md5sums[0] in allTracksMd5 else md5sums[1]
#print("$%02x %s replaced with: %s" % (data["dataId"], data["trackName"], allTracksMd5[md5sum]))
playlist[removeChars(data["trackName"], ' ,()-/')] = allTracksMd5[md5sum]
else:
print(" Warning: replacement not found for {} - {} - {}".format(hex(data["dataId"]), data["trackName"], data["md5sum"]))
playlistName = sys.argv[1][:-4]+'.json'
with open(playlistName, 'w') as f:
json.dump(playlist, f, indent=4)
print("playlist generated: {}".format(playlistName))
|
Python
| 0.003597
|
@@ -2591,32 +2591,37 @@
cksTable%5Baddr%5D%5B%22
+nspc_
md5sum%22%5D = %5BNone
@@ -2614,32 +2614,32 @@
5sum%22%5D = %5BNone%5D%0A
-
continue
@@ -2741,32 +2741,37 @@
cksTable%5Baddr%5D%5B%22
+nspc_
md5sum%22%5D = %5Bmd5s
@@ -2932,16 +2932,21 @@
%5Baddr%5D%5B%22
+nspc_
md5sum%22%5D
@@ -3058,24 +3058,29 @@
ksMd5%5Bdata%5B%22
+nspc_
md5sum%22%5D%5D =
@@ -3153,24 +3153,29 @@
ums = data%5B%22
+nspc_
md5sum%22%5D%0A
@@ -3522,32 +3522,32 @@
5sum%5D%0A else:%0A
-
print(%22
@@ -3649,16 +3649,21 @@
, data%5B%22
+nspc_
md5sum%22%5D
|
baa710088488ba15e7219f341aff173ecf4ea636
|
update DataGen
|
org/tradesafe/data/dataGenerator.py
|
org/tradesafe/data/dataGenerator.py
|
# encoding:utf-8
from numpy import array
# from sklearn.cross_validation import train_test_split
from sklearn.model_selection import train_test_split
from org.tradesafe.data.history_data import HistoryData
from org.tradesafe.utils.utils import mylog
class DataGen(object):
'''
trian data generator
'''
def __init__(self,
codes,
batch_size=64,
time_step=15,
pred_day=1,
column_names=None,
sort_by=['date'],
group_by=['code'],
label_column='high',
split_rate=0.05,
seed=7):
'''
:param codes:
:param batch_size:
:param time_step:
:param pred_day:
:param column_names:
:param sort_by:
:param group_by:
:param split_rate:
:param seed:
'''
self.codes = codes
self.batch_size = batch_size
self.time_step = time_step
self.pred_day = pred_day
self.column_names = column_names
self.label_column = label_column
self.train_codes, self.val_codes = train_test_split(
array(self.codes), random_state=seed, test_size=split_rate)
feats = self.column_names[:]
for n in set(sort_by + group_by):
feats.remove(n)
self.feats = feats
self.dim = len(self.feats)
self.hd = HistoryData()
mylog.info('loading all data')
df = self.hd.get_history_data_all()
df = df.sort_values(by=sort_by)
dfg = df.groupby(by=group_by)
self.all_data = dfg
mylog.info('data load complete')
def next_val_batch(self):
'''
:return: batch examples of validate dataset
'''
while 1:
for k, g in self.all_data:
if k in self.val_codes:
if g is not None and not g.empty:
if len(
g
) < self.time_step + self.batch_size + self.pred_day:
continue
i = 0
batch_X = []
batch_y = []
ALL_X = g[self.feats].as_matrix().astype(float)
ALL_y = g[[self.label_column
]].as_matrix().astype(float)
batch_y_ = []
while i < len(g) - self.pred_day - max(
self.time_step, self.batch_size):
X = ALL_X[i:i + self.time_step]
y = ALL_y[i + self.time_step:
i + self.time_step + self.pred_day]
y_today = ALL_y[i + self.time_step]
batch_X.append(X)
batch_y.append((y.max() - y_today) / y_today)
batch_y_.append(y.max())
i += 1
if len(batch_y) == self.batch_size:
yield array(batch_X), array(batch_y).reshape(
self.batch_size, 1)
batch_X = []
batch_y = []
def next_train_batch(self):
'''
:return: batch examples
'''
while 1:
for k, g in self.all_data:
if k in self.train_codes:
if g is not None and not g.empty:
if len(
g
) < self.time_step + self.batch_size + self.pred_day:
continue
i = 0
batch_X = []
batch_y = []
ALL_X = g[self.feats].as_matrix().astype(float)
ALL_y = g[[self.label_column
]].as_matrix().astype(float)
while i < len(g) - self.pred_day - max(
self.time_step, self.batch_size):
X = ALL_X[i:i + self.time_step]
y = ALL_y[i + self.time_step:
i + self.time_step + self.pred_day]
y_today = ALL_y[i + self.time_step]
batch_X.append(X)
batch_y.append((y.max() - y_today) / y_today)
batch_y.append(y.max())
i += 1
if len(batch_y) == self.batch_size:
yield array(batch_X), array(batch_y).reshape(
self.batch_size, 1)
batch_X = []
batch_y = []
def next_predict_example(self):
'''
:return: batch examples
'''
for k, g in self.all_data:
if g is not None and not g.empty:
batch_X = []
ALL_X = g[self.feats].as_matrix().astype(float)
X = ALL_X[0-self.time_step:]
batch_X.append(X)
yield k, array(batch_X)
batch_X = []
if __name__ == '__main__':
names = 'date,code,open,close,chg,chg_r,low,high,vibration,volume,amount,turnover'.split(
',')
feats = names[:]
feats.remove('date')
feats.remove('code')
hd = HistoryData()
codes = hd.get_all_stock_code()
dg = DataGen(codes, 64, 15, 5, names)
for x, y in dg.next_val_batch():
print(x.shape)
print(y.shape)
print(x)
print(y)
break
|
Python
| 0
|
@@ -4568,60 +4568,8 @@
ay)%0A
- batch_y.append(y.max())%0A
|
acf1c9db347917ef3bf31aec27d2635a8b4e1c68
|
Add timezone to prettified time.
|
paasta_tools/chronos_serviceinit.py
|
paasta_tools/chronos_serviceinit.py
|
#!/usr/bin/env python
import datetime
import logging
import sys
import humanize
import isodate
import requests_cache
import chronos_tools
from paasta_tools.utils import datetime_from_utc_to_local
from paasta_tools.utils import _log
from paasta_tools.utils import PaastaColors
log = logging.getLogger("__main__")
log.addHandler(logging.StreamHandler(sys.stdout))
# Calls the 'manual start' endpoint in Chronos (https://mesos.github.io/chronos/docs/api.html#manually-starting-a-job),
# running the job now regardless of its 'schedule' and 'disabled' settings. The job's 'schedule' is left unmodified.
def start_chronos_job(service, instance, job_id, client, cluster, job_config):
name = PaastaColors.cyan(job_id)
_log(
service_name=service,
line="EmergencyStart: sending job %s to Chronos" % name,
component='deploy',
level='event',
cluster=cluster,
instance=instance
)
client.update(job_config)
client.run(job_id)
def stop_chronos_job(service, instance, client, cluster, existing_jobs):
for job in existing_jobs:
name = PaastaColors.cyan(job['name'])
_log(
service_name=service,
line="EmergencyStop: killing all tasks for job %s" % name,
component='deploy',
level='event',
cluster=cluster,
instance=instance
)
job['disabled'] = True
client.update(job)
client.delete_tasks(job['name'])
def restart_chronos_job(service, instance, job_id, client, cluster, matching_jobs, job_config, immediate_start):
stop_chronos_job(service, instance, client, cluster, matching_jobs)
start_chronos_job(service, instance, job_id, client, cluster, job_config)
def _get_disabled_status(job):
status = PaastaColors.red("UNKNOWN")
if job.get("disabled", False):
status = PaastaColors.red("Disabled")
else:
status = PaastaColors.green("Enabled")
return status
def _get_last_result(job):
last_result = PaastaColors.red("UNKNOWN")
last_result_when = PaastaColors.red("UNKNOWN")
fail_result = PaastaColors.red("Fail")
ok_result = PaastaColors.green("OK")
last_error = job.get("lastError", "")
last_success = job.get("lastSuccess", "")
if not last_error and not last_success:
last_result = PaastaColors.yellow("New")
last_result_when = "never"
elif not last_error:
last_result = ok_result
last_result_when = isodate.parse_datetime(last_success)
elif not last_success:
last_result = fail_result
last_result_when = isodate.parse_datetime(last_error)
else:
fail_dt = isodate.parse_datetime(last_error)
ok_dt = isodate.parse_datetime(last_success)
if ok_dt > fail_dt:
last_result = ok_result
last_result_when = ok_dt
else:
last_result = fail_result
last_result_when = fail_dt
# Prettify datetime objects further. Ignore hardcoded values like "never".
pretty_last_result_when = last_result_when
if isinstance(last_result_when, datetime.datetime):
last_result_when_localtime = datetime_from_utc_to_local(last_result_when)
pretty_last_result_when = "%s, %s" % (
last_result_when_localtime.strftime("%Y-%m-%dT%H:%M"),
humanize.naturaltime(last_result_when_localtime),
)
return (last_result, pretty_last_result_when)
def format_chronos_job_status(job, desired_state):
"""Given a job, returns a pretty-printed human readable output regarding
the status of the job.
:param job: dictionary of the job status
:param desired_state: a pretty-formatted string representing the
job's started/stopped state as set with paasta emergency-[stop|start], e.g.
the result of get_desired_state_human()
"""
disabled_state = _get_disabled_status(job)
(last_result, last_result_when) = _get_last_result(job)
return (
"Status: %(disabled_state)s, %(desired_state)s\n"
"Last: %(last_result)s (%(last_result_when)s)" % {
"disabled_state": disabled_state,
"desired_state": desired_state,
"last_result": last_result,
"last_result_when": last_result_when,
}
)
def status_chronos_job(jobs, job_config):
"""Returns a formatted string of the status of a list of chronos jobs
:param jobs: list of dicts of chronos job info as returned by the chronos
client
"""
if jobs == []:
return "%s: chronos job is not setup yet" % PaastaColors.yellow("Warning")
else:
desired_state = job_config.get_desired_state_human()
output = [format_chronos_job_status(job, desired_state) for job in jobs]
return "\n".join(output)
def perform_command(command, service, instance, cluster, verbose, soa_dir):
chronos_config = chronos_tools.load_chronos_config()
client = chronos_tools.get_chronos_client(chronos_config)
complete_job_config = chronos_tools.create_complete_config(service, instance, soa_dir=soa_dir)
job_id = complete_job_config['name']
# We add SPACER to the end as an anchor to prevent catching
# "my_service my_job_extra" when looking for "my_service my_job".
job_pattern = "^%s%s" % (chronos_tools.compose_job_id(service, instance), chronos_tools.SPACER)
matching_jobs = chronos_tools.lookup_chronos_jobs(job_pattern, client, include_disabled=True)
if command == "start":
start_chronos_job(service, instance, job_id, client, cluster, complete_job_config)
elif command == "stop":
stop_chronos_job(service, instance, client, cluster, matching_jobs)
elif command == "restart":
restart_chronos_job(service, instance, job_id, client, cluster, matching_jobs, complete_job_config)
elif command == "status":
# Setting up transparent cache for http API calls
requests_cache.install_cache("paasta_serviceinit", backend="memory")
job_config = chronos_tools.load_chronos_job_config(service, instance, cluster, soa_dir=soa_dir)
print "Job id: %s" % job_id
print status_chronos_job(matching_jobs, job_config)
else:
# The command parser shouldn't have let us get this far...
raise NotImplementedError("Command %s is not implemented!" % command)
return 0
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
Python
| 0
|
@@ -3326,16 +3326,18 @@
%25dT%25H:%25M
+%25Z
%22),%0A
|
84242433388797951c715eaf35d9fdb4045ff5b2
|
Rearrange some code
|
dataset/data_pretreatment.py
|
dataset/data_pretreatment.py
|
import cv2
import tensorflow as tf
class Frames:
videofile_name = ''
frames = []
grouped_frames = []
grouped_frames_decoded = []
group_numbers_sequence = []
frames_number_in_group = []
number_of_frames = int
number_of_grouped_frames = int
def __init__(self, videofile_name, group_length=3, step=1):
self.videofile_name = videofile_name
self.number_of_frames = 0
self.video_to_frames()
self.group_frames(group_length=group_length, step=step)
self.decode_frames()
def video_to_frames(self):
videofile = cv2.VideoCapture(self.videofile_name)
readSucceded = True
while readSucceded:
readSucceded, frame = videofile.read()
if not readSucceded:
break
self.frames.append(frame)
assert len(self.frames) is not 0
self.number_of_frames = len(self.frames)
def group_frames(self, group_length, step):
current_frame_group = 0
start_group_index = 0
end_group_index = group_length
while end_group_index <= self.number_of_frames:
number_in_group = 0
for frame in self.frames[start_group_index:end_group_index]:
self.grouped_frames.append(frame)
self.group_numbers_sequence.append(current_frame_group)
self.frames_number_in_group.append(number_in_group)
number_in_group += 1
start_group_index += step
end_group_index += step
current_frame_group += 1
self.number_of_grouped_frames = len(self.grouped_frames)
def decode_frames(self):
with tf.Session():
for frame in self.grouped_frames:
self.grouped_frames_decoded.append(
tf.image.encode_jpeg(frame, format='rgb', quality=100).eval())
class Dataset:
VIDEOS_PATH_PATTERN = ''
videofiles_names_list = []
def __init__(self, input_path):
self.VIDEOS_PATH_PATTERN = input_path
self.videofiles_names_list = tf.gfile.Glob(input_path)
def create_dataset(self, group_length=3, step=1):
for vname in self.videofiles_names_list:
with open('{vname}.tfrecord'.format(vname=vname), 'w') as tfrecord:
writer = tf.python_io.TFRecordWriter(tfrecord.name)
frames = Frames(vname, group_length=group_length, step=step)
example = self.make_sequence_example(frames)
writer.write(example.SerializeToString())
writer.close()
tfrecord.close()
@staticmethod
def make_sequence_example(frames):
example_sequence = tf.train.SequenceExample()
example_sequence.context.feature["length"].int64_list.value.append(frames.number_of_grouped_frames)
frames_sequence = example_sequence.feature_lists.feature_list["frame"]
groups = example_sequence.feature_lists.feature_list["group"]
number_in_group = example_sequence.feature_lists.feature_list["number_in_group"]
for frame, group, number in zip(frames.grouped_frames_decoded, frames.group_numbers_sequence,
frames.frames_number_in_group):
if frame is not None:
frames_sequence.feature.add().bytes_list.value.append(frame)
if group is not None:
groups.feature.add().int64_list.value.append(group)
if number is not None:
number_in_group.feature.add().int64_list.value.append(number)
return example_sequence
@staticmethod
def parse_single_example(filename):
filename_queue = tf.train.string_input_producer([filename], num_epochs=1)
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
context_features = {
"length": tf.FixedLenFeature([], dtype=tf.int64)
}
sequence_features = {
"frame": tf.FixedLenSequenceFeature([], dtype=tf.string),
"group": tf.FixedLenSequenceFeature([], dtype=tf.int64),
"number_in_group": tf.FixedLenSequenceFeature([], dtype=tf.int64),
}
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized=serialized_example,
context_features=context_features,
sequence_features=sequence_features
)
return context_parsed, sequence_parsed
if __name__ == '__main__':
dataset = Dataset('src/*.avi')
# dataset.create_dataset()
context, sequence = dataset.parse_single_example('src/1.avi.tfrecord')
context_features = tf.contrib.learn.run_n(context, n=1, feed_dict=None)
sequence_features_length = context_features[0]['length']
sequence_features = tf.contrib.learn.run_n(sequence, n=1, feed_dict=None)
print(sequence_features[0]['frame'][1])
|
Python
| 0.999999
|
@@ -4423,472 +4423,394 @@
-return context_parsed, sequence_parsed%0A%0A%0Aif __name__ == '__main__':%0A dataset = Dataset('src/*.avi')%0A # dataset.create_dataset()%0A context, sequence = dataset.parse_single_example('src/1.avi.tfrecord')%0A context_features = tf.contrib.learn.run_n(context, n=1, feed_dict=None)%0A sequence_features_length = context_features%5B0%5D%5B'length'%5D%0A sequence_features = tf.contrib.learn.run_n(sequence, n=1, feed_dict=None)%0A print(sequence_features%5B0%5D%5B'frame'%5D%5B1%5D
+context_features = tf.contrib.learn.run_n(context_parsed, n=1, feed_dict=None)%0A sequence_features = tf.contrib.learn.run_n(sequence_parsed, n=1, feed_dict=None)%0A%0A return context_features, sequence_features%0A%0A%0Aif __name__ == '__main__':%0A dataset = Dataset('src/*.avi')%0A # dataset.create_dataset()%0A context, sequence = dataset.parse_single_example('src/1.avi.tfrecord'
)%0A
|
621565e0daa4e06ff6a67f985af124fa7f101d77
|
Refactor dbaas test helpers
|
dbaas/dbaas/tests/helpers.py
|
dbaas/dbaas/tests/helpers.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from physical.tests import factory as factory_physical
class InstanceHelper(object):
@staticmethod
def check_instance_is_master(instance):
"""
Method for mock the real check_instance_is_master.
This method return master if the last digit minus 1 of address
is divisible by 2
Ex. Address = '127.0.0.1' the last char is 1. Now subtract 1 and we
have 0. Now check if 0 is divisible by 2. This case return True
Ex. Address = '127.0.0.2' the last char is 2. Now subtract 1 and we
have 1. Now check if 1 is divisible by 2. This case return False
Ex. Address = '127.0.0.3' the last char is 3. Now subtract 1 and we
have 2. Now check if 2 is divisible by 2. This case return True
"""
n = int(instance.address.split('.')[-1]) - 1
return n % 2 == 0
@staticmethod
def create_instances_by_quant(infra, port=3306, qt=1, total_size_in_bytes=50,
used_size_in_bytes=25, instance_type=1):
"""
Helper create instances by quantity
"""
def _create(n):
return factory_physical.InstanceFactory(
databaseinfra=infra,
address='127.7{0}.{1}.{1}'.format(infra.id, n), port=port,
instance_type=instance_type,
total_size_in_bytes=total_size_in_bytes,
used_size_in_bytes=used_size_in_bytes
)
return map(_create, range(1, qt + 1))
|
Python
| 0
|
@@ -98,75 +98,937 @@
ests
- import factory as factory_physical%0A%0A%0Aclass InstanceHelper(object):
+.factory import InstanceFactory%0A%0A%0Aclass UsedAndTotalValidator(object):%0A%0A @staticmethod%0A def assertEqual(a, b):%0A assert a == b, %22%7B%7D NOT EQUAL %7B%7D%22.format(a, b)%0A%0A @classmethod%0A def instances_sizes(cls, instances=None, expected_used_size=40, expected_total_size=90):%0A for instance in instances:%0A cls.assertEqual(instance.used_size_in_bytes, expected_used_size)%0A cls.assertEqual(instance.total_size_in_bytes, expected_total_size)%0A%0A%0Aclass InstanceHelper(object):%0A%0A model = InstanceFactory.FACTORY_FOR%0A quantity_of_masters = 1%0A%0A @classmethod%0A def kill_instances(cls, instances):%0A for instance in instances:%0A instance.status = cls.model.DEAD%0A instance.save()%0A%0A @staticmethod%0A def change_instances_type(instances, instance_type):%0A for instance in instances:%0A instance.instance_type = instance_type%0A instance.save()
%0A%0A
@@ -1776,16 +1776,30 @@
-n
+quantity_of_masters
=
-int(
inst
@@ -1807,62 +1807,192 @@
nce.
-address.split('.')%5B-1%5D) - 1%0A%0A return n %25 2 == 0
+databaseinfra.instances.count() / 2%0A%0A return instance.id in (instance.databaseinfra.instances.values_list(%0A 'id', flat=True)%5Bquantity_of_masters:%5D)
%0A%0A
@@ -2161,16 +2161,70 @@
e_type=1
+,%0A base_address='127'
):%0A
@@ -2337,25 +2337,8 @@
urn
-factory_physical.
Inst
@@ -2416,23 +2416,23 @@
ss='
-127.7%7B0%7D.%7B1%7D.%7B1
+%7B0%7D.7%7B1%7D.%7B2%7D.%7B2
%7D'.f
@@ -2437,16 +2437,30 @@
.format(
+base_address,
infra.id
|
56aa00210b5adb663abea62ecd297f094dcbfeb0
|
remove prodigal from the subcommand module
|
diagnostic_primers/scripts/subcommands/__init__.py
|
diagnostic_primers/scripts/subcommands/__init__.py
|
# -*- coding: utf-8 -*-
"""Module providing subcommands for pdp."""
from .subcmd_config import subcmd_config
from .subcmd_prodigal import subcmd_prodigal
from .subcmd_filter import subcmd_filter
from .subcmd_eprimer3 import subcmd_eprimer3
from .subcmd_primersearch import subcmd_primersearch
from .subcmd_dedupe import subcmd_dedupe
from .subcmd_blastscreen import subcmd_blastscreen
from .subcmd_classify import subcmd_classify
from .subcmd_extract import subcmd_extract
from .subcmd_plot import subcmd_plot
|
Python
| 0.000002
|
@@ -107,53 +107,8 @@
fig%0A
-from .subcmd_prodigal import subcmd_prodigal%0A
from
|
ebb7f4ca18e099fb2902fa66cbb68c29baa98917
|
fix download_chromedriver.py to return fast when file exists
|
dev/download_chromedriver.py
|
dev/download_chromedriver.py
|
#!/usr/bin/env python
import os, stat
import requests
import zipfile
DESTINATION_DIR = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'files')
DOWNLOAD_URL = "http://chromedriver.storage.googleapis.com"
MAC_DRIVER_NAME = 'chromedriver_mac64.zip'
if not os.path.exists(DESTINATION_DIR):
os.mkdir(DESTINATION_DIR)
def get_chromedriver_path():
destination_unzip_path = os.path.join(DESTINATION_DIR, 'chromedriver')
if not os.path.exists(destination_unzip_path):
return False
return destination_unzip_path
def get_chromedriver_latest_version():
url = DOWNLOAD_URL + '/LATEST_RELEASE'
return str(requests.get(url).content.strip()).replace("'", '')[1:]
def download(version='LATEST'):
if version == 'LATEST':
download_version = get_chromedriver_latest_version()
else:
download_version = version
latest_path = "%s/%s/%s" % (DOWNLOAD_URL,
download_version, MAC_DRIVER_NAME)
destination_file_path = os.path.join(DESTINATION_DIR, MAC_DRIVER_NAME)
destination_unzip_path = os.path.join(DESTINATION_DIR, 'chromedriver')
with open(destination_file_path, 'wb') as f:
for chunk in requests.get(latest_path, stream=True).iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
with zipfile.ZipFile(destination_file_path, 'r') as f:
with open(destination_unzip_path, 'wb') as d:
d.write(f.read('chromedriver'))
st = os.stat(destination_unzip_path)
os.chmod(destination_unzip_path, (st.st_mode | stat.S_IEXEC))
return destination_unzip_path
if __name__ == '__main__':
print(download())
|
Python
| 0
|
@@ -729,16 +729,278 @@
TEST'):%0A
+ destination_file_path = os.path.join(DESTINATION_DIR, MAC_DRIVER_NAME)%0A destination_unzip_path = os.path.join(DESTINATION_DIR, 'chromedriver')%0A if os.path.exists(destination_unzip_path):%0A return %22%7B%7D driver exists%22.format(destination_unzip_path)%0A
if v
@@ -1242,158 +1242,8 @@
ME)%0A
- destination_file_path = os.path.join(DESTINATION_DIR, MAC_DRIVER_NAME)%0A destination_unzip_path = os.path.join(DESTINATION_DIR, 'chromedriver')%0A
|
a7116bca501c04c85b9b8563d94b9e0ce9b6f511
|
Revert "fixed translation"
|
topaz/objects/functionobject.py
|
topaz/objects/functionobject.py
|
import copy
from topaz.frame import BuiltinFrame
from topaz.objects.objectobject import W_BaseObject
class W_FunctionObject(W_BaseObject):
_immutable_fields_ = ["name", "w_class", "visibility"]
PUBLIC = 0
PROTECTED = 1
PRIVATE = 2
def __init__(self, name, w_class=None, visibility=PUBLIC):
self.name = name
self.w_class = w_class
self.visibility = visibility
def __deepcopy__(self, memo):
obj = super(W_FunctionObject, self).__deepcopy__(memo)
obj.name = self.name
obj.w_class = copy.deepcopy(self.w_class, memo)
return obj
def arity(self, space):
return space.newint(0)
class W_UserFunction(W_FunctionObject):
_immutable_fields_ = ["bytecode", "lexical_scope"]
def __init__(self, name, bytecode, lexical_scope, visibility=W_FunctionObject.PUBLIC):
W_FunctionObject.__init__(self, name, visibility=visibility)
self.bytecode = bytecode
self.lexical_scope = lexical_scope
def __deepcopy__(self, memo):
obj = super(W_UserFunction, self).__deepcopy__(memo)
obj.bytecode = copy.deepcopy(self.bytecode, memo)
obj.lexical_scope = copy.deepcopy(self.lexical_scope, memo)
return obj
def change_visibility(self, visibility):
return W_UserFunction(self.name, self.bytecode, self.lexical_scope, self.visibility)
def call(self, space, w_receiver, args_w, block):
frame = space.create_frame(
self.bytecode,
w_self=w_receiver,
lexical_scope=self.lexical_scope,
block=block,
)
with space.getexecutioncontext().visit_frame(frame):
frame.handle_args(space, self.bytecode, args_w, block)
return space.execute_frame(frame, self.bytecode)
def arity(self, space):
return space.newint(self.bytecode.arity(negative_defaults=True))
class W_BuiltinFunction(W_FunctionObject):
_immutable_fields_ = ["func"]
def __init__(self, name, w_class, func, visibility=W_FunctionObject.PUBLIC):
W_FunctionObject.__init__(self, name, w_class, visibility=visibility)
self.func = func
def __deepcopy__(self, memo):
obj = super(W_BuiltinFunction, self).__deepcopy__(memo)
obj.func = self.func
return obj
def change_visibility(self, visibility):
return W_BuiltinFunction(self.name, self.w_class, self.func, visibility)
def call(self, space, w_receiver, args_w, block):
frame = BuiltinFrame(self.name)
ec = space.getexecutioncontext()
ec.invoke_trace_proc(space, "c-call", self.name, self.w_class.name)
with ec.visit_frame(frame):
w_res = self.func(w_receiver, space, args_w, block)
ec.invoke_trace_proc(space, "c-return", self.name, self.w_class.name)
return w_res
|
Python
| 0
|
@@ -898,27 +898,16 @@
, name,
-visibility=
visibili
@@ -2103,27 +2103,16 @@
_class,
-visibility=
visibili
|
97e274399651ef0191e0e5294ac9874eb158973e
|
Add DEBUG level loggint to root logger in case of verbose mode
|
test/run_tests.py
|
test/run_tests.py
|
#!/usr/bin/env python
# Copyright 2015 Lajos Gerecs, Janos Czentye
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
import sys
import unittest
from xmlrunner import XMLTestRunner
from testframework.builder import TestSuitBuilder, TestCaseReader
from testframework.runner import CommandRunner, Tee
logging.basicConfig(format="%(message)s",
level=logging.INFO)
log = logging.getLogger()
CWD = os.path.dirname(os.path.abspath(__file__))
REPORT_FILE = "results.xml"
def main (args):
"""
Main function which runs the tests and generate the result file.
:return: result value for the CI environments
:rtype: int
"""
# Print header
log.info("Start ESCAPE test")
log.info("-" * 70)
if args.timeout:
log.info("Set kill timeout for test cases: %ds\n" % args.timeout)
# Create overall test suite
test_suite = create_test_suite(tests_dir=CWD,
show_output=args.show_output,
run_only_tests=args.testcases,
kill_timeout=args.timeout)
sum_test_cases = test_suite.countTestCases()
log.info("-" * 70)
log.info("Read %d test cases" % sum_test_cases)
if not sum_test_cases:
# Footer
log.info("-" * 70)
log.info("End ESCAPE test")
return 0
# Run test suite in the specific context
results = []
if args.verbose:
output_context_manager = Tee(filename=REPORT_FILE)
else:
output_context_manager = open(REPORT_FILE, 'w', buffering=0)
with output_context_manager as output:
# Create the Runner class which runs the test cases collected in a
# TestSuite object
test_runner = XMLTestRunner(output=output,
verbosity=2,
failfast=args.failfast)
try:
# Run the test cases and collect the results
results.append(test_runner.run(test_suite))
except KeyboardInterrupt:
log.warning("\n\nReceived KeyboardInterrupt! Abort running test suite...")
# Evaluate results values
was_success = all(map(lambda res: res.wasSuccessful(), results))
# Print footer
log.info("-" * 70)
log.info("End ESCAPE test")
return 0 if was_success else 1
def create_test_suite (tests_dir, show_output=False, run_only_tests=None,
kill_timeout=None):
"""
Create the container TestSuite class based on the config values.
:param tests_dir: main test dir containes the test cases
:type tests_dir: str
:param show_output: print te test oputput on the console
:type show_output: bool
:param run_only_tests: only run the given test cases
:type run_only_tests: list[str]
:param kill_timeout: kill timeout
:type kill_timeout: int
:return: created test suite object
:rtype: unittest.TestSuite
"""
log.info("Loading test cases...\n")
reader = TestCaseReader(tests_dir=tests_dir)
builder = TestSuitBuilder(cwd=CWD,
show_output=show_output,
kill_timeout=kill_timeout)
test_suite = builder.to_suite(reader.read_from(run_only_tests))
return test_suite
def parse_cmd_args ():
"""
Parse the commandline arguments.
"""
parser = argparse.ArgumentParser(description="ESCAPE Test runner",
add_help=True,
prog="run_tests.py")
parser.add_argument("--failfast", "-f", action="store_true", default=False,
help="Stop on first failure")
parser.add_argument("--show-output", "-o", action="store_true", default=False,
help="Show ESCAPE output")
parser.add_argument("testcases", nargs="*",
help="list test case names you want to run. Example: "
"./run_tests.py case05 case03 --show-output")
parser.add_argument("--timeout", "-t", metavar="t", type=int,
help="define explicit timeout in sec (default: %ss)" %
CommandRunner.KILL_TIMEOUT)
parser.add_argument("--verbose", "-v", action="store_true", default=False,
help="Run in verbose mode and show output")
return parser.parse_args()
if __name__ == "__main__":
args = parse_cmd_args()
result = main(args)
sys.exit(result)
|
Python
| 0
|
@@ -4753,16 +4753,67 @@
_args()%0A
+ if args.verbose:%0A log.setLevel(logging.DEBUG)%0A
result
|
f9b38aa0f38e86a718d851057c26f945e6b872a9
|
Update BatteryAlarm.py
|
20140707-ProgramaDeAlertaBateria/BatteryAlarm.py
|
20140707-ProgramaDeAlertaBateria/BatteryAlarm.py
|
#!usr/bin/env python
#coding=utf-8
# Es necesario editar
# sudo vim /etc/crontab
# Edicionar: */15 * * * * root python /JAIMEANDRES/ArchivosSistema/BatteryAlarm.py
#
# Reiniciar servicio de cron: sudo service cron stop / start
#
# Este archivo requiere tener en su misma carpeta el archivo
# ReproductorDeSonidos.py
#
# Para generar los archivos de sonido .WAV usar:
# espeak -s 150 -v es-la -w BateriaCargada.wav "La bateria esta cargada, por favor desconectar el cargador."
#
# O tambien usar:
# echo The battery is at 15% of charge, please plug the charger now. | text2wave >job.wav
# Y para abrir el archivo .wav desde la consola:
# aplay job.wav
# Leo el archivo que almacena el valor actual de carga en la bateria
with open("/sys/class/power_supply/BAT0/capacity") as f:
content = f.readlines()
# Convierto el texto a entero
valor = int(content[0])
#~ print valor
# Cierro el archivo
f.close()
# Obtengo el estado actual
with open("/sys/class/power_supply/BAT0/status") as f:
content = f.readlines()
estado = str(content[0])
# Cierro el archivo
f.close()
# Quito caracteres adicionales de la variable "estado"
estado = estado.replace(" ", "")
estado = estado.replace("\r", "")
estado = estado.replace("\n", "")
from ReproductorDeSonidos import ReproductorWAV
# Creo las funciones de reproduccion de sonido
def BateriaCargada():
Rep = ReproductorWAV("/JAIMEANDRES/ArchivosSistema/BateriaCargada.wav")
Rep.Reproducir()
def BateriaDescargada():
Rep = ReproductorWAV("/JAIMEANDRES/ArchivosSistema/BatteryAlert.wav")
Rep.Reproducir()
# Analizo el estado de la bateria
if valor > 95:
if estado != "Discharging":
BateriaCargada()
if valor <= 15:
if estado == "Discharging":
BateriaDescargada()
|
Python
| 0
|
@@ -1381,37 +1381,8 @@
AV(%22
-/JAIMEANDRES/ArchivosSistema/
Bate
@@ -1475,37 +1475,8 @@
AV(%22
-/JAIMEANDRES/ArchivosSistema/
Batt
|
f4931b4d35a63d1542963ccd369efa64e0133e2e
|
Make this test faster
|
jacquard/storage/tests/test_cloned_redis.py
|
jacquard/storage/tests/test_cloned_redis.py
|
import functools
import unittest.mock
import pytest
import hypothesis
import hypothesis.strategies
from jacquard.storage.exceptions import Retry
from jacquard.storage.cloned_redis import ClonedRedisStore, resync_all_connections
try:
import fakeredis
except ImportError:
fakeredis = None
arbitrary_key = hypothesis.strategies.characters()
arbitrary_json = hypothesis.strategies.recursive(
hypothesis.strategies.floats(allow_nan=False, allow_infinity=False) |
hypothesis.strategies.booleans() |
hypothesis.strategies.text() |
hypothesis.strategies.none(),
lambda children: (
hypothesis.strategies.lists(children) |
hypothesis.strategies.dictionaries(
hypothesis.strategies.text(),
children,
)
),
).filter(lambda x: x is not None)
def cloned_redis_test(**kwargs):
def decorator(fn):
@functools.wraps(fn)
@unittest.mock.patch('redis.StrictRedis', fakeredis.FakeStrictRedis)
def wrapper(*args, **kwargs):
try:
fn(*args, **kwargs)
finally:
fakeredis.FakeStrictRedis().flushall()
resync_all_connections()
if kwargs:
wrapper = hypothesis.given(**kwargs)(wrapper)
wrapper = pytest.mark.skipif(
fakeredis is None,
reason="fakeredis is not installed",
)(wrapper)
return wrapper
return decorator
@cloned_redis_test()
def test_smoke():
# Check no exceptions appear
storage = ClonedRedisStore('')
with storage.transaction() as store:
pass
@cloned_redis_test(
key=arbitrary_key,
)
def test_get_nonexistent_key(key):
# Just test this works without errors
storage = ClonedRedisStore('')
with storage.transaction() as store:
assert store.get(key) is None
@cloned_redis_test(
key=arbitrary_key,
value=arbitrary_json,
)
def test_simple_write(key, value):
storage = ClonedRedisStore('')
with storage.transaction() as store:
store[key] = value
with storage.transaction() as store:
assert store[key] == value
@cloned_redis_test(
values=hypothesis.strategies.dictionaries(
arbitrary_key,
arbitrary_json,
),
)
def test_enumerate_keys(values):
storage = ClonedRedisStore('')
with storage.transaction() as store:
store.update(values)
with storage.transaction(read_only=True) as store:
assert set(store.keys()) == set(values.keys())
@cloned_redis_test(
key=arbitrary_key,
value1=arbitrary_json,
value2=arbitrary_json,
)
def test_update_key(key, value1, value2):
storage = ClonedRedisStore('')
with storage.transaction() as store:
store[key] = value1
with storage.transaction() as store:
store[key] = value2
with storage.transaction() as store:
assert store[key] == value2
@cloned_redis_test(
key=arbitrary_key,
value=arbitrary_json,
)
def test_delete_key(key, value):
storage = ClonedRedisStore('')
with storage.transaction() as store:
store[key] = value
with storage.transaction() as store:
del store[key]
with storage.transaction() as store:
assert key not in store
@cloned_redis_test(
key=arbitrary_key,
value=arbitrary_json,
)
def test_exceptions_back_out_writes(key, value):
storage = ClonedRedisStore('')
try:
with storage.transaction() as store:
store[key] = value
raise RuntimeError()
except RuntimeError:
pass
with storage.transaction() as store:
assert key not in store
@cloned_redis_test(
key=arbitrary_key,
value1=arbitrary_json,
value2=arbitrary_json,
replacement_state=hypothesis.strategies.binary(),
)
def test_raises_retry_on_concurrent_write(
key,
value1,
value2,
replacement_state,
):
storage = ClonedRedisStore('')
with storage.transaction() as store:
store[key] = value1
with pytest.raises(Retry):
with storage.transaction() as store:
fakeredis.FakeStrictRedis().set(
b'jacquard-store:state-key',
replacement_state,
)
store[key] = value2
|
Python
| 0.009968
|
@@ -772,16 +772,35 @@
%0A ),%0A
+ max_leaves=10,%0A
).filter
|
59c03b9113266830bfca272dafb17e7c0b4933ba
|
Create empty memory map if one is not provided by target subclass.
|
pyOCD/target/target.py
|
pyOCD/target/target.py
|
"""
mbed CMSIS-DAP debugger
Copyright (c) 2006-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class Target(object):
TARGET_RUNNING = 1 # Core is executing code.
TARGET_HALTED = 2 # Core is halted in debug mode.
TARGET_RESET = 3 # Core is being held in reset.
TARGET_SLEEPING = 4 # Core is sleeping due to a wfi or wfe instruction.
TARGET_LOCKUP = 5 # Core is locked up.
# Types of breakpoints.
#
# Auto will select the best type given the
# address and available breakpoints.
BREAKPOINT_HW = 1
BREAKPOINT_SW = 2
BREAKPOINT_AUTO = 3
WATCHPOINT_READ = 1
WATCHPOINT_WRITE = 2
WATCHPOINT_READ_WRITE = 3
def __init__(self, link, memoryMap=None):
self.link = link
self.flash = None
self.part_number = ""
self.memory_map = memoryMap
self.halt_on_connect = True
self.has_fpu = False
self._svd_location = None
self._svd_device = None
@property
def svd_device(self):
return self._svd_device
def setAutoUnlock(self, doAutoUnlock):
pass
def isLocked(self):
return False
def setHaltOnConnect(self, halt):
self.halt_on_connect = halt
def setFlash(self, flash):
self.flash = flash
def init(self):
raise NotImplementedError()
def disconnect(self):
pass
def info(self, request):
return self.link.info(request)
def flush(self):
self.link.flush()
def readIDCode(self):
raise NotImplementedError()
def halt(self):
raise NotImplementedError()
def step(self, disable_interrupts=True):
raise NotImplementedError()
def resume(self):
raise NotImplementedError()
def writeMemory(self, addr, value, transfer_size=32):
raise NotImplementedError()
# @brief Shorthand to write a 32-bit word.
def write32(self, addr, value):
self.writeMemory(addr, value, 32)
# @brief Shorthand to write a 16-bit halfword.
def write16(self, addr, value):
self.writeMemory(addr, value, 16)
# @brief Shorthand to write a byte.
def write8(self, addr, value):
self.writeMemory(addr, value, 8)
def readMemory(self, addr, transfer_size=32, now=True):
raise NotImplementedError()
# @brief Shorthand to read a 32-bit word.
def read32(self, addr, now=True):
return self.readMemory(addr, 32, now)
# @brief Shorthand to read a 16-bit halfword.
def read16(self, addr, now=True):
return self.readMemory(addr, 16, now)
# @brief Shorthand to read a byte.
def read8(self, addr, now=True):
return self.readMemory(addr, 8, now)
def writeBlockMemoryUnaligned8(self, addr, value):
raise NotImplementedError()
def writeBlockMemoryAligned32(self, addr, data):
raise NotImplementedError()
def readBlockMemoryUnaligned8(self, addr, size):
raise NotImplementedError()
def readBlockMemoryAligned32(self, addr, size):
raise NotImplementedError()
def readCoreRegister(self, id):
raise NotImplementedError()
def writeCoreRegister(self, id, data):
raise NotImplementedError()
def readCoreRegisterRaw(self, reg):
raise NotImplementedError()
def readCoreRegistersRaw(self, reg_list):
raise NotImplementedError()
def writeCoreRegisterRaw(self, reg, data):
raise NotImplementedError()
def writeCoreRegistersRaw(self, reg_list, data_list):
raise NotImplementedError()
def findBreakpoint(self, addr):
raise NotImplementedError()
def setBreakpoint(self, addr, type=BREAKPOINT_AUTO):
raise NotImplementedError()
def getBreakpointType(self, addr):
raise NotImplementedError()
def removeBreakpoint(self, addr):
raise NotImplementedError()
def setWatchpoint(self, addr, size, type):
raise NotImplementedError()
def removeWatchpoint(self, addr, size, type):
raise NotImplementedError()
def reset(self, software_reset=None):
raise NotImplementedError()
def resetStopOnReset(self, software_reset=None):
raise NotImplementedError()
def setTargetState(self, state):
raise NotImplementedError()
def getState(self):
raise NotImplementedError()
def isRunning(self):
return self.getState() == Target.TARGET_RUNNING
def isHalted(self):
return self.getState() == Target.TARGET_HALTED
def getMemoryMap(self):
return self.memory_map
def setVectorCatchFault(self, enable):
raise NotImplementedError()
def getVectorCatchFault(self):
raise NotImplementedError()
def setVectorCatchReset(self, enable):
raise NotImplementedError()
def getVectorCatchReset(self):
raise NotImplementedError()
# GDB functions
def getTargetXML(self):
raise NotImplementedError()
def getMemoryMapXML(self):
if self.memory_map:
return self.memory_map.getXML()
elif hasattr(self, 'memoryMapXML'):
return self.memoryMapXML
else:
return None
def getRegisterContext(self):
raise NotImplementedError()
def setRegisterContext(self, data):
raise NotImplementedError()
def setRegister(self, reg, data):
raise NotImplementedError()
def getTResponse(self, gdbInterrupt=False):
raise NotImplementedError()
def getSignalValue(self):
raise NotImplementedError()
|
Python
| 0
|
@@ -598,16 +598,51 @@
e.%0A%22%22%22%0A%0A
+from .memory_map import MemoryMap%0A%0A
class Ta
@@ -1377,16 +1377,31 @@
emoryMap
+ or MemoryMap()
%0A
|
1256f695a441049438565285f48c9119e5211cf5
|
Enable follow redirection.
|
pyaem/bagofrequests.py
|
pyaem/bagofrequests.py
|
import cStringIO
from handlers import unexpected as handle_unexpected
import pycurl
import requests
import urllib
def request(method, url, params, handlers, **kwargs):
curl = pycurl.Curl()
body_io = cStringIO.StringIO()
if method == 'post':
curl.setopt(pycurl.POST, 1)
curl.setopt(pycurl.POSTFIELDS, urllib.urlencode(params))
else:
url = '{0}?{1}'.format(url, urllib.urlencode(params))
curl.setopt(pycurl.URL, url)
curl.setopt(pycurl.WRITEFUNCTION, body_io.write)
curl.perform()
response = {
'http_code': curl.getinfo(pycurl.HTTP_CODE),
'body' : body_io.getvalue()
}
curl.close()
if response['http_code'] in handlers:
return handlers[response['http_code']](response, **kwargs)
else:
handle_unexpected(response, **kwargs)
def download_file(url, params, handlers, **kwargs):
curl = pycurl.Curl()
url = '{0}?{1}'.format(url, urllib.urlencode(params))
file = open(kwargs['file_name'], 'wb')
curl.setopt(pycurl.URL, url)
curl.setopt(pycurl.WRITEDATA, file)
curl.perform()
response = {
'http_code': curl.getinfo(pycurl.HTTP_CODE)
}
curl.close()
file.close()
if response['http_code'] in handlers:
return handlers[response['http_code']](response, **kwargs)
else:
handle_unexpected(response, **kwargs)
def upload_file(url, params, handlers, **kwargs):
curl = pycurl.Curl()
body_io = cStringIO.StringIO()
_params = []
for key, value in params.iteritems():
_params.append((key, value))
curl.setopt(pycurl.POST, 1)
curl.setopt(pycurl.HTTPPOST, _params)
curl.setopt(pycurl.URL, url)
curl.setopt(pycurl.WRITEFUNCTION, body_io.write)
curl.perform()
response = {
'http_code': curl.getinfo(pycurl.HTTP_CODE),
'body' : body_io.getvalue()
}
curl.close()
if response['http_code'] in handlers:
return handlers[response['http_code']](response, **kwargs)
else:
handle_unexpected(response, **kwargs)
|
Python
| 0
|
@@ -419,32 +419,71 @@
ycurl.URL, url)%0A
+%09curl.setopt(pycurl.FOLLOWLOCATION, 1)%0A
%09curl.setopt(pyc
@@ -508,32 +508,32 @@
body_io.write)%0A
-
%09%0A%09curl.perform(
@@ -993,32 +993,71 @@
ycurl.URL, url)%0A
+%09curl.setopt(pycurl.FOLLOWLOCATION, 1)%0A
%09curl.setopt(pyc
@@ -1621,32 +1621,71 @@
ycurl.URL, url)%0A
+%09curl.setopt(pycurl.FOLLOWLOCATION, 1)%0A
%09curl.setopt(pyc
@@ -1950,32 +1950,32 @@
*kwargs)%0A%09else:%0A
-
%09%09handle_unexpec
@@ -1977,28 +1977,29 @@
expected(response, **kwargs)
+%0A
|
7f7f32d032c68197b2152eeb8d9189f3d1493b57
|
Bump version number for development
|
pybinding/__about__.py
|
pybinding/__about__.py
|
"""Package for numerical tight-binding calculations in solid state physics"""
__title__ = "pybinding"
__version__ = "0.8.1"
__summary__ = "Package for tight-binding calculations"
__url__ = "https://github.com/dean0x7d/pybinding"
__author__ = "Dean Moldovan"
__copyright__ = "2015-2016, " + __author__
__email__ = "dean0x7d@gmail.com"
__license__ = "BSD"
|
Python
| 0
|
@@ -116,11 +116,15 @@
%220.
-8.1
+9.0.dev
%22%0A__
|
0c44f657dd8ad285fa2713d0ab6a367c50a7da4c
|
If empty string, then is ok
|
pybossa/auditlogger.py
|
pybossa/auditlogger.py
|
# -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2014 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
from pybossa.model.auditlog import Auditlog
class AuditLogger(object):
def __init__(self, auditlog_repo, caller='web'):
self.repo = auditlog_repo
self.caller = caller
def log_event(self, app, user, action, attribute, old_value, new_value):
log = Auditlog(
app_id=app.id,
app_short_name=app.short_name,
user_id=user.id,
user_name=user.name,
action=action,
caller=self.caller,
attribute=attribute,
old_value=old_value,
new_value=new_value)
self.repo.save(log)
def get_project_logs(self, project_id):
return self.repo.filter_by(app_id=project_id)
def add_log_entry(self, old_project, new_project, user):
if old_project is None:
self.log_event(new_project, user, 'create', 'project',
'Nothing', 'New project')
return
if new_project is None:
self.log_event(old_project, user, 'delete', 'project', 'Saved', 'Deleted')
return
old = old_project.dictize()
new = new_project.dictize()
attributes = (set(old.keys()) | set(new.keys())) - set(['updated'])
changes = {attr: (old.get(attr), new.get(attr)) for attr in attributes
if old.get(attr) != new.get(attr)}
for attr in changes:
old_value = changes[attr][0]
new_value = changes[attr][1]
if attr == 'info':
old_value = old_value if old_value is not None else {}
self._manage_info_keys(new_project, user, old_value, new_value)
else:
if old_value is None or '':
old_value = ''
if new_value is None or '':
new_value = ''
if (unicode(old_value) != unicode(new_value)):
self.log_event(new_project, user, 'update', attr,
old_value, new_value)
def _manage_info_keys(self, project, user, old_value, new_value, action='update'):
s_o = set(old_value.keys())
s_n = set(new_value.keys())
# For new keys
for new_key in (s_n - s_o):
# only log changed keys
if new_value.get(new_key) is None:
continue
self.log_event(project, user, action, new_key,
old_value.get(new_key),
new_value.get(new_key))
# For updated keys
for same_key in (s_n & s_o):
# only log changed keys
if old_value.get(same_key) == new_value.get(same_key):
continue
self.log_event(project, user, action, same_key,
old_value.get(same_key), new_value.get(same_key))
|
Python
| 0.999999
|
@@ -2438,38 +2438,32 @@
ld_value is None
- or ''
:%0A
@@ -2523,14 +2523,8 @@
None
- or ''
:%0A
|
3d930d6191f176313e551b2a0fbe0f94ac8b08d3
|
add test condition for forking test
|
tests/test_tactical_cortex_caddy.py
|
tests/test_tactical_cortex_caddy.py
|
import unittest
from OnStage.tactical_cortex_caddy import *
class TacticalCortexTestCase(unittest.TestCase):
def setUp(self):
self.cortex = TacticalCortex()
self.ana_f = [0,0,0,0,0,0,0,0]
self.ana_0 = [2,0,0,0,0,0,0,0]
self.ana_1 = [0,2,0,0,0,0,0,0]
self.ana_2 = [0,0,2,0,0,0,0,0]
self.ana_3 = [0,0,0,2,0,0,0,0]
self.ana_4 = [0,0,0,0,2,0,0,0]
self.ana_5 = [0,0,0,0,0,2,0,0]
self.ana_6 = [0,0,0,0,0,0,2,0]
self.ana_7 = [0,0,0,0,0,0,0,2]
self.opt_f = [ 0,1,2,
3,4,5,
6,7,8 ]
self.row_0 = [ 2,
3,4,5,
6,7,8 ]
self.row_1 = [ 0,1,2,
5,
6,7,8 ]
self.row_2 = [ 0,1,2,
3,4,5,
7 ]
self.col_3 = [ 1,2,
3,4,5,
7,8 ]
self.col_4 = [ 0,1,2,
3, 5,
6, 8 ]
self.col_5 = [ 0,1,
3,4,
6,7,8 ]
self.diag6 = [ 0,1,2,
3, 5,
6,7, ]
self.diag7 = [ 0,1,
3, 5,
6,7,8 ]
self.expected = [False, 2,5,7, 3,1,8, 0,6]
def test_take_win_chance(self):
dic_f = { 'analysis': self.ana_f,'options': self.opt_f,
'marker_code': 1 }
dic_0 = { 'analysis': self.ana_0,'options': self.row_0,
'marker_code': 1 }
dic_1 = { 'analysis': self.ana_1,'options': self.row_1,
'marker_code': 1 }
dic_2 = { 'analysis': self.ana_2,'options': self.row_2,
'marker_code': 1 }
dic_3 = { 'analysis': self.ana_3,'options': self.col_3,
'marker_code': 1 }
dic_4 = { 'analysis': self.ana_4,'options': self.col_4,
'marker_code': 1 }
dic_5 = { 'analysis': self.ana_5,'options': self.col_5,
'marker_code': 1 }
dic_6 = { 'analysis': self.ana_6,'options': self.diag6,
'marker_code': 1 }
dic_7 = { 'analysis': self.ana_7,'options': self.diag7,
'marker_code': 1 }
tf = self.cortex.take_win_chance(dic_f)
t0 = self.cortex.take_win_chance(dic_0)
t1 = self.cortex.take_win_chance(dic_1)
t2 = self.cortex.take_win_chance(dic_2)
t3 = self.cortex.take_win_chance(dic_3)
t4 = self.cortex.take_win_chance(dic_4)
t5 = self.cortex.take_win_chance(dic_5)
t6 = self.cortex.take_win_chance(dic_6)
t7 = self.cortex.take_win_chance(dic_7)
test_yields = [tf, t0,t1,t2, t3,t4,t5, t6,t7]
self.assertEqual(test_yields, self.expected)
def test_avoid_losing(self):
dic_f = { 'analysis': self.ana_f,'options': self.opt_f,
'enemy_code': 1 }
dic_0 = { 'analysis': self.ana_0,'options': self.row_0,
'enemy_code': 1 }
dic_1 = { 'analysis': self.ana_1,'options': self.row_1,
'enemy_code': 1 }
dic_2 = { 'analysis': self.ana_2,'options': self.row_2,
'enemy_code': 1 }
dic_3 = { 'analysis': self.ana_3,'options': self.col_3,
'enemy_code': 1 }
dic_4 = { 'analysis': self.ana_4,'options': self.col_4,
'enemy_code': 1 }
dic_5 = { 'analysis': self.ana_5,'options': self.col_5,
'enemy_code': 1 }
dic_6 = { 'analysis': self.ana_6,'options': self.diag6,
'enemy_code': 1 }
dic_7 = { 'analysis': self.ana_7,'options': self.diag7,
'enemy_code': 1 }
tf = self.cortex.avoid_losing(dic_f)
t0 = self.cortex.avoid_losing(dic_0)
t1 = self.cortex.avoid_losing(dic_1)
t2 = self.cortex.avoid_losing(dic_2)
t3 = self.cortex.avoid_losing(dic_3)
t4 = self.cortex.avoid_losing(dic_4)
t5 = self.cortex.avoid_losing(dic_5)
t6 = self.cortex.avoid_losing(dic_6)
t7 = self.cortex.avoid_losing(dic_7)
test_yields = [tf, t0,t1,t2, t3,t4,t5, t6,t7]
self.assertEqual(test_yields, self.expected)
def test_take_fork_chance(self):
pass
def test_avoid_fork(self):
pass
|
Python
| 0
|
@@ -1399,16 +1399,70 @@
, 0,6%5D%0A%0A
+ self.forking_board = %5B1,10,0, 0,1,0, 0,0,10%5D%0A%0A
def
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.