commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
c0297fd4837a83c177a89656c5ef591d7b5430d2 | add download remote file function | mp3Downloader.py | mp3Downloader.py | import urllib2
import subprocess
import os
import tempfile
import shutil
"""
http://feeds.gimletmedia.com/~r/hearstartup/~5/sqn8_rZ3xTM/GLT6849433183.mp3
"""
TEMP_DIR = './tmp'
OUTPUT_DIR = './output'
def cleanup():
shutil.rmtree(TEMP_DIR)
def create_ancillary_folders():
if not os.path.exists(OUTPUT_DIR):
print "Output directory absent. Creating output directory..."
os.makedirs(OUTPUT_DIR)
if not os.path.exists(TEMP_DIR):
print "Creating tmp directory..."
os.makedirs(TEMP_DIR)
def get_url_from_user():
"""
Function that a URL to be passed as a parameter from the terminal.
The URL should contain an mp3 file to be downloaded
"""
url = raw_input(
"Please enter the URL of the podcast you'd like to transcribe. ")
print "You just entered: ", url
return url
def create_temporary_folder():
dirpath = tempfile.mkdtemp(dir=TEMP_DIR)
print "Just created tmp dir at ", dirpath
return dirpath
def create_temporary_file(directory, suffix):
fp = tempfile.NamedTemporaryFile(dir=directory, suffix=suffix)
print "Just created tmp file at ", fp.name
return fp
def download_remote_file(url, dest):
"""
Downloads a remote file to the specified location.
Params:
url (string): The url of the remote file
dest (string): The download destination
"""
remote_file = urllib2.urlopen(url)
meta_info = remote_file.info()
file_size = int(meta_info.getheaders("Content-Length")[0])
print "Downloading: %s \n Bytes: %s" % (url.split('/')[-1], file_size)
file_size_dl = 0
block_sz = 8192
with open(dest, 'wb') as local_file:
while True:
buf = remote_file.read(block_sz)
if not buf:
break
file_size_dl += len(buf)
local_file.write(buf)
status = r"%10d [%3.2f%%]" % (
file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8) * (len(status) + 1)
print status,
def get_podcast_file(url):
"""
Returns the podcast file to process in the right format. First, we download
the podcast from the remote location and then we convert the file to the
right format for the transcriber.
Params:
url (string): The url of the remote file
"""
# create the download destination
dirpath = create_temporary_folder()
mp3file = urllib2.urlopen(url)
mp3_uid = url.split('/')[-1:]
print mp3_uid
filepath = create_temporary_file(dirpath, mp3_uid[0])
print filepath.name, "this is the filepath"
with open(filepath.name, 'wb') as output:
output.write(mp3file.read())
if not os.path.exists(filepath.name):
print "Failed to write mp3 in ", filepath
convert_to_wav(filepath.name)
return filepath.name
def convert_to_wav(filepath):
"""
Converts files to a format that pocketsphinx can deal with
(16khz mono 16bit wav)
"""
print filepath
new_file = filepath[:-4]
print new_file, "this the new filename without the .mp3 extension"
new_file = new_file + '.wav'
if os.path.exists(new_file + '.transcription.txt') is False:
subprocess.call(['ffmpeg', '-y', '-i', filepath, '-acodec',
'pcm_s16le', '-ac', '1', '-ar', '16000', new_file])
def main():
create_ancillary_folders()
new_path = download_mp3_from_url(get_url_from_user())
# assuming here a function that does transcribe & write to output
print "I have transcribed the podcast here. "
print ""
print "Proceeding to cleanup"
print ""
cleanup()
# here I need to go and delete the temp files.
if __name__ == "__main__":
main()
| import urllib2
import subprocess
import os
import tempfile
import shutil
"""
http://feeds.gimletmedia.com/~r/hearstartup/~5/sqn8_rZ3xTM/GLT6849433183.mp3
"""
TEMP_DIR = './tmp'
OUTPUT_DIR = './output'
def cleanup():
shutil.rmtree(TEMP_DIR)
def create_ancillary_folders():
if not os.path.exists(OUTPUT_DIR):
print "Output directory absent. Creating output directory..."
os.makedirs(OUTPUT_DIR)
if not os.path.exists(TEMP_DIR):
print "Creating tmp directory..."
os.makedirs(TEMP_DIR)
def get_url_from_user():
"""
Function that a URL to be passed as a parameter from the terminal.
The URL should contain an mp3 file to be downloaded
"""
url = raw_input(
"Please enter the URL of the podcast you'd like to transcribe. ")
print "You just entered: ", url
return url
def create_temporary_folder():
dirpath = tempfile.mkdtemp(dir=TEMP_DIR)
print "Just created tmp dir at ", dirpath
return dirpath
def create_temporary_file(directory, suffix):
fp = tempfile.NamedTemporaryFile(dir=directory, suffix=suffix)
print "Just created tmp file at ", fp.name
return fp
def download_mp3_from_url(url):
"""
Once we have received the mp3 url from the user, we download and write it
in a file, in binary. This function writes always in the same file
"""
dirpath = create_temporary_folder()
mp3file = urllib2.urlopen(url)
mp3_uid = url.split('/')[-1:]
print mp3_uid
filepath = create_temporary_file(dirpath, mp3_uid[0])
print filepath.name, "this is the filepath"
with open(filepath.name, 'wb') as output:
output.write(mp3file.read())
if not os.path.exists(filepath.name):
print "Failed to write mp3 in ", filepath
convert_to_wav(filepath.name)
return filepath.name
def convert_to_wav(filepath):
"""
Converts files to a format that pocketsphinx can deal with
(16khz mono 16bit wav)
"""
print filepath
new_file = filepath[:-4]
print new_file, "this the new filename without the .mp3 extension"
new_file = new_file + '.wav'
if os.path.exists(new_file + '.transcription.txt') is False:
subprocess.call(['ffmpeg', '-y', '-i', filepath, '-acodec',
'pcm_s16le', '-ac', '1', '-ar', '16000', new_file])
def main():
create_ancillary_folders()
new_path = download_mp3_from_url(get_url_from_user())
# assuming here a function that does transcribe & write to output
print "I have transcribed the podcast here. "
print ""
print "Proceeding to cleanup"
print ""
cleanup()
# here I need to go and delete the temp files.
if __name__ == "__main__":
main()
| Python | 0.000001 |
a529eb18e9d114672350853a48a16d6036ca0c76 | split the former RulesView into three parts | ELiDE/rulesview.py | ELiDE/rulesview.py | # This file is part of LiSE, a framework for life simulation games.
# Copyright (C) 2013-2014 Zachary Spector, ZacharySpector@gmail.com
"""Widget to enable browsing rules and the functions that make them."""
from functools import partial
from kivy.lang import Builder
from kivy.clock import Clock
from kivy.logger import Logger
from kivy.adapters import ListAdapter
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.listview import ListView, ListItemButton
from kivy.uix.widget import Widget
from kivy.properties import ObjectProperty, DictProperty
class FuncsView(Widget):
func_adapter = DictProperty({})
def get_func_data(self, store):
return list(
self.engine.function.db.func_table_name_plaincode(store)
)
def get_func_adapter(self, store):
if store not in self.func_adapter:
self.func_adapter[store] = ListAdapter(
data=self.get_func_data(store),
cls=ListItemButton,
args_converter=lambda i, (name, code): {
'text': name,
'on_press': lambda inst:
self.show_func_editor(
store,
name,
code
)
},
selection_mode='single',
allow_empty_selection=True
)
return self.func_adapter[store]
def refresh_func_adapter(self, store, *args):
self.get_func_adapter(store).data = self.get_func_data(store)
def on_engine(self):
if self.engine is None:
return
self._func_view_trigger = ListView(
adapter=self.get_func_adapter('trigger')
)
self._trigger_refresh_trigger = Clock.create_trigger(
partial(self.refresh_func_adapter, 'trigger')
)
self._func_view_prereq = ListView(
adapter=self.get_func_adapter('prereq')
)
self._trigger_refresh_prereq = Clock.create_trigger(
partial(self.refresh_func_adapter, 'prereq')
)
self._func_view_action = ListView(
adapter=self.get_func_adapter('action')
)
self._trigger_refresh_action = Clock.create_trigger(
partial(self.refresh_func_adapter, 'action')
)
class RulesView(Widget):
subject = ObjectProperty()
class RulesBench(BoxLayout):
subject = ObjectProperty()
kv = """
<RulesBench>:
orientation: 'horizontal'
FuncsView:
engine: root.subject.engine
RulesView:
subject: root.subject
"""
Builder.load_string(kv)
| # This file is part of LiSE, a framework for life simulation games.
# Copyright (C) 2013-2014 Zachary Spector, ZacharySpector@gmail.com
"""Widget to enable browsing rules and the functions that make them."""
from functools import partial
from kivy.clock import Clock
from kivy.logger import Logger
from kivy.adapters import ListAdapter
from kivy.uix.listview import ListView, ListItemButton
from kivy.uix.widget import Widget
from kivy.properties import ObjectProperty, DictProperty
class RulesView(Widget):
subject = ObjectProperty()
func_adapter = DictProperty({})
def get_func_data(self, store):
return list(
self.engine.function.db.func_table_name_plaincode(store)
)
def get_func_adapter(self, store):
if store not in self.func_adapter:
self.func_adapter[store] = ListAdapter(
data=self.get_func_data(store),
cls=ListItemButton,
args_converter=lambda i, (name, code): {
'text': name,
'on_press': lambda inst:
self.show_func_editor(
store,
name,
code
)
},
selection_mode='single',
allow_empty_selection=True
)
return self.func_adapter[store]
def refresh_func_adapter(self, store, *args):
self.get_func_adapter(store).data = self.get_func_data(store)
def on_engine(self):
if self.engine is None:
return
self._func_view_trigger = ListView(
adapter=self.get_func_adapter('trigger')
)
self._trigger_refresh_trigger = Clock.create_trigger(
partial(self.refresh_func_adapter, 'trigger')
)
self._func_view_prereq = ListView(
adapter=self.get_func_adapter('prereq')
)
self._trigger_refresh_prereq = Clock.create_trigger(
partial(self.refresh_func_adapter, 'prereq')
)
self._func_view_action = ListView(
adapter=self.get_func_adapter('action')
)
self._trigger_refresh_action = Clock.create_trigger(
partial(self.refresh_func_adapter, 'action')
)
| Python | 0 |
c4d809a3b8ccb24d684c489925dd6c9634dbdf55 | Remove use of DesiredCapabilities object, use Options object instead (#981) | splinter/driver/webdriver/firefox.py | splinter/driver/webdriver/firefox.py | # -*- coding: utf-8 -*-
# Copyright 2012 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from selenium.webdriver import Firefox
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
from splinter.driver.webdriver import (
BaseWebDriver,
WebDriverElement as WebDriverElement,
)
from splinter.driver.webdriver.cookie_manager import CookieManager
from selenium.webdriver.firefox.options import Options
class WebDriver(BaseWebDriver):
driver_name = "Firefox"
def __init__(
self,
options=None,
profile=None,
extensions=None,
user_agent=None,
profile_preferences=None,
fullscreen=False,
wait_time=2,
capabilities=None,
headless=False,
incognito=False,
**kwargs
):
firefox_profile = FirefoxProfile(profile)
firefox_profile.set_preference("extensions.logging.enabled", False)
firefox_profile.set_preference("network.dns.disableIPv6", False)
options = options or Options()
if capabilities:
for key, value in capabilities.items():
options.set_capability(key, value)
if user_agent is not None:
firefox_profile.set_preference("general.useragent.override", user_agent)
if profile_preferences:
for key, value in profile_preferences.items():
firefox_profile.set_preference(key, value)
if extensions:
for extension in extensions:
firefox_profile.add_extension(extension)
if headless:
options.add_argument("--headless")
if incognito:
options.add_argument("-private")
self.driver = Firefox(
firefox_profile,
options=options,
**kwargs,
)
if fullscreen:
self.driver.fullscreen_window()
self.element_class = WebDriverElement
self._cookie_manager = CookieManager(self.driver)
super(WebDriver, self).__init__(wait_time)
| # -*- coding: utf-8 -*-
# Copyright 2012 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from selenium.webdriver import DesiredCapabilities, Firefox
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
from splinter.driver.webdriver import (
BaseWebDriver,
WebDriverElement as WebDriverElement,
)
from splinter.driver.webdriver.cookie_manager import CookieManager
from selenium.webdriver.firefox.options import Options
class WebDriver(BaseWebDriver):
driver_name = "Firefox"
def __init__(
self,
options=None,
profile=None,
extensions=None,
user_agent=None,
profile_preferences=None,
fullscreen=False,
wait_time=2,
capabilities=None,
headless=False,
incognito=False,
**kwargs
):
firefox_profile = FirefoxProfile(profile)
firefox_profile.set_preference("extensions.logging.enabled", False)
firefox_profile.set_preference("network.dns.disableIPv6", False)
firefox_capabilities = DesiredCapabilities().FIREFOX
firefox_capabilities["marionette"] = True
options = options or Options()
if capabilities:
for key, value in capabilities.items():
# Selenium 3
firefox_capabilities[key] = value
# Selenium 4
options.set_capability(key, value)
if user_agent is not None:
firefox_profile.set_preference("general.useragent.override", user_agent)
if profile_preferences:
for key, value in profile_preferences.items():
firefox_profile.set_preference(key, value)
if extensions:
for extension in extensions:
firefox_profile.add_extension(extension)
if headless:
options.add_argument("--headless")
if incognito:
options.add_argument("-private")
self.driver = Firefox(
firefox_profile,
capabilities=firefox_capabilities,
options=options,
**kwargs
)
if fullscreen:
self.driver.fullscreen_window()
self.element_class = WebDriverElement
self._cookie_manager = CookieManager(self.driver)
super(WebDriver, self).__init__(wait_time)
| Python | 0 |
546b55248457055c4803d7ea65c21b92276309bd | Reformat and update copyright. | spotseeker_server/views/add_image.py | spotseeker_server/views/add_image.py | # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
""" Changes
=================================================================
sbutler1@illinois.edu: adapt to a simplier RESTDispatch framework.
"""
from spotseeker_server.views.rest_dispatch import RESTDispatch, RESTException
from spotseeker_server.models import SpotImage, Spot
from django.http import HttpResponse
from spotseeker_server.require_auth import *
from PIL import Image
class AddImageView(RESTDispatch):
"""Saves a SpotImage for a particular Spot on POST to
/api/v1/spot/<spot id>/image.
"""
@user_auth_required
@admin_auth_required
def POST(self, request, spot_id):
spot = Spot.objects.get(pk=spot_id)
if "image" not in request.FILES:
raise RESTException("No image", 400)
args = {
"upload_application": request.META.get(
"SS_OAUTH_CONSUMER_NAME", ""
),
"upload_user": request.META.get("SS_OAUTH_USER", ""),
"description": request.POST.get("description", ""),
"display_index": request.POST.get("display_index"),
"image": request.FILES["image"],
}
if args["display_index"] is None:
# TODO: is there a better way?
# get display_indexes for all of the existing images
# and set the new one to the biggest + 1
indices = [img.display_index for img in spot.spotimage_set.all()]
if indices:
args["display_index"] = max(indices) + 1
else:
args["display_index"] = 0
image = spot.spotimage_set.create(**args)
response = HttpResponse(status=201)
response["Location"] = image.rest_url()
return response
| # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
""" Copyright 2012, 2013 UW Information Technology, University of Washington
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Changes
=================================================================
sbutler1@illinois.edu: adapt to a simplier RESTDispatch framework.
"""
from spotseeker_server.views.rest_dispatch import RESTDispatch, RESTException
from spotseeker_server.models import SpotImage, Spot
from django.http import HttpResponse
from spotseeker_server.require_auth import *
from PIL import Image
class AddImageView(RESTDispatch):
""" Saves a SpotImage for a particular Spot on POST to
/api/v1/spot/<spot id>/image.
"""
@user_auth_required
@admin_auth_required
def POST(self, request, spot_id):
spot = Spot.objects.get(pk=spot_id)
if "image" not in request.FILES:
raise RESTException("No image", 400)
args = {
'upload_application': request.META.get('SS_OAUTH_CONSUMER_NAME',
''),
'upload_user': request.META.get('SS_OAUTH_USER', ''),
'description': request.POST.get('description', ''),
'display_index': request.POST.get('display_index'),
'image': request.FILES['image']
}
if args['display_index'] is None:
# TODO: is there a better way?
# get display_indexes for all of the existing images
# and set the new one to the biggest + 1
indices = [img.display_index for img in spot.spotimage_set.all()]
if indices:
args['display_index'] = max(indices) + 1
else:
args['display_index'] = 0
image = spot.spotimage_set.create(**args)
response = HttpResponse(status=201)
response["Location"] = image.rest_url()
return response
| Python | 0 |
913163a1acddc1d846eb269c04ae3dc60ecbc2bd | Update LongitudinalController.py | workspace/src/labs/src/lab5/LongitudinalController.py | workspace/src/labs/src/lab5/LongitudinalController.py | #!/usr/bin/env python
import rospy
import time
from barc.msg import ECU, Encoder
from numpy import pi
# from encoder
v_meas = 0.0
t0 = time.time()
r_tire = 0.05 # radius of the tire
servo_pwm = 1580.0
motor_pwm = 1500.0
motor_pwm_offset = 1500.0
# reference speed
v_ref = 0.5 # give reference speed is 0.5 m/s
# ===================================PID longitudinal controller================================#
class PID():
def __init__(self, kp=1, ki=1, kd=1, integrator=0, derivator=0):
self.kp = kp
self.ki = ki
self.kd = kd
self.integrator = integrator
self.derivator = derivator
self.integrator_max = 10
self.integrator_min = -10
def acc_calculate(self, speed_reference, speed_current):
self.error = speed_reference - speed_current
# Propotional control
self.P_effect = self.kp*self.error
# Integral control
self.integrator = self.integrator + self.error
## Anti windup
if self.integrator >= self.integrator_max:
self.integrator = self.integrator_max
if self.integrator <= self.integrator_min:
self.integrator = self.integrator_min
self.I_effect = self.ki*self.integrator
# Derivative control
self.derivator = self.error - self.derivator
self.D_effect = self.kd*self.derivator
self.derivator = self.error
acc = self.P_effect + self.I_effect + self.D_effect
if acc <= 0:
acc = 20
return acc
# =====================================end of the controller====================================#
# state estimation node
def controller():
global motor_pwm, servo_pwm, motor_pwm_offset
global v_ref, v_meas
# Initialize node:
rospy.init_node('simulationGain', anonymous=True)
# TODO: Add your necessary topic subscriptions / publications, depending on your preferred method of velocity estimation
ecu_pub = rospy.Publisher('ecu_pwm', ECU, queue_size = 10)
# Set node rate
loop_rate = 50
rate = rospy.Rate(loop_rate)
# TODO: Initialize your PID controller here, with your chosen PI gains
PID_control = PID(kp = 1, ki = 1, kd = 0)
while not rospy.is_shutdown():
# calculate acceleration from PID controller.
motor_pwm = PID_control.acc_calculate(v_ref, v_meas) + motor_pwm_offset
# publish control command
ecu_pub.publish( ECU(motor_pwm, servo_pwm) )
# wait
rate.sleep()
if __name__ == '__main__':
try:
controller()
except rospy.ROSInterruptException:
pass
| #!/usr/bin/env python
import rospy
import time
from barc.msg import ECU, Encoder
from numpy import pi
# from encoder
v_meas = 0.0
t0 = time.time()
ang_km1 = 0.0
ang_km2 = 0.0
n_FL = 0.0
n_FR = 0.0
n_BL = 0.0
n_BR = 0.0
r_tire = 0.05 # radius of the tire
servo_pwm = 1580.0
motor_pwm = 1500.0
motor_pwm_offset = 1500.0
# reference speed
v_ref = 0.5 # reference speed is 0.5 m/s
# ===================================PID longitudinal controller================================#
class PID():
def __init__(self, kp=1, ki=1, kd=1, integrator=0, derivator=0):
self.kp = kp
self.ki = ki
self.kd = kd
self.integrator = integrator
self.derivator = derivator
self.integrator_max = 10
self.integrator_min = -10
def acc_calculate(self, speed_reference, speed_current):
self.error = speed_reference - speed_current
# Propotional control
self.P_effect = self.kp*self.error
# Integral control
self.integrator = self.integrator + self.error
## Anti windup
if self.integrator >= self.integrator_max:
self.integrator = self.integrator_max
if self.integrator <= self.integrator_min:
self.integrator = self.integrator_min
self.I_effect = self.ki*self.integrator
# Derivative control
self.derivator = self.error - self.derivator
self.D_effect = self.kd*self.derivator
self.derivator = self.error
acc = self.P_effect + self.I_effect + self.D_effect
if acc <= 0:
acc = 20
return acc
# =====================================end of the controller====================================#
# state estimation node
def controller():
global motor_pwm, servo_pwm, motor_pwm_offset
global v_ref, v_meas
# Initialize node:
rospy.init_node('simulationGain', anonymous=True)
# TODO: Add your necessary topic subscriptions / publications, depending on your preferred method of velocity estimation
ecu_pub = rospy.Publisher('ecu_pwm', ECU, queue_size = 10)
# Set node rate
loop_rate = 50
rate = rospy.Rate(loop_rate)
# TODO: Initialize your PID controller here
while not rospy.is_shutdown():
# acceleration calculated from PID controller.
motor_pwm = PID_control.acc_calculate(v_ref, v_meas) + motor_pwm_offset
rospy.logwarn("pwm = {}".format(motor_pwm))
# publish control command
ecu_pub.publish( ECU(motor_pwm, servo_pwm) )
# wait
rate.sleep()
if __name__ == '__main__':
try:
controller()
except rospy.ROSInterruptException:
pass
| Python | 0 |
b6363044cac862dd5bef54bc210c4beceaa90bdd | refactor fixtures and add 2 more tests for old collections | test/test_collection.py | test/test_collection.py | import pytest
import json
from girder.models.collection import Collection
from pytest_girder.assertions import assertStatusOk
@pytest.fixture
def collections(db):
yield [
Collection().createCollection('private collection', public=False),
Collection().createCollection('public collection', public=True)
]
@pytest.fixture
def collection(db):
yield Collection().createCollection('public collection', public=True)
@pytest.fixture
def oldCollection(db, collection):
del collection['meta']
collection = Collection().save(collection)
assert 'meta' not in collection
yield collection
@pytest.fixture
def oldCollections(db, collections):
for i, collection in enumerate(collections):
del collection['meta']
collections[i] = Collection().save(collection)
assert 'meta' not in collections[i]
yield collections
@pytest.fixture
def metadata():
yield {
'key': 'value',
'apple': 'fruit'
}
@pytest.fixture
def users(admin, user):
yield [admin, user, None]
@pytest.mark.parametrize('userIdx,expected', [
(0, 2),
(1, 1),
(2, 1)
])
def testCollectionsCount(server, userIdx, expected, collections, users):
resp = server.request(path='/collection/details', user=users[userIdx])
assertStatusOk(resp)
assert resp.json['nCollections'] == expected
def testSingleCollectionMetaExists(server, collection, admin):
resp = server.request(path='/collection/%s' % collection['_id'], user=admin)
assertStatusOk(resp)
assert 'meta' in resp.json
def testSingleOldCollectionMetaExists(server, oldCollection, admin):
resp = server.request(path='/collection/%s' % oldCollection['_id'], user=admin)
assertStatusOk(resp)
assert 'meta' in resp.json
def testListCollectionMetaExists(server, collections, admin):
resp = server.request(path='/collection', user=admin)
assertStatusOk(resp)
assert all(('meta' in x) for x in resp.json)
def testListOldCollectionMetaExists(server, oldCollections, admin):
resp = server.request(path='/collection', user=admin)
assertStatusOk(resp)
assert all(('meta' in x) for x in resp.json)
def testCollectionSetMetadata(server, collection, metadata, admin):
resp = server.request(
path='/collection/%s/metadata' % collection['_id'],
user=admin,
method='PUT',
body=json.dumps(metadata),
type='application/json')
assertStatusOk(resp)
assert resp.json['meta'] == metadata
# Check that fetching the object again yields the same result
newDoc = server.request(
path='/collection/%s' % collection['_id'],
user=admin,
method='GET')
assert newDoc.json['meta'] == metadata
def testCollectionDeleteMetadata(server, collection, metadata, admin):
collection = Collection().setMetadata(collection, metadata)
resp = server.request(
path='/collection/%s/metadata' % collection['_id'],
user=admin,
method='DELETE',
body=json.dumps(list(metadata.keys())),
type='application/json')
assertStatusOk(resp)
assert resp.json['meta'] != metadata
assert resp.json['meta'] == {}
newDoc = server.request(
path='/collection/%s' % collection['_id'],
user=admin,
method='GET')
assert newDoc.json['meta'] != metadata
assert newDoc.json['meta'] == {}
# Model Layer
def testCollectionModelSetMetadata(collection, metadata):
updatedCollection = Collection().setMetadata(collection, metadata)
assert updatedCollection['meta'] == metadata
# Model Layer
def testCollectionModelDeleteMetadata(collection, metadata):
collection = Collection().setMetadata(collection, metadata)
noMeta = Collection().deleteMetadata(collection, list(metadata.keys()))
assert noMeta['meta'] == {}
# Model Layer
def testCollectionLoad(collection, admin):
loadedCollection = Collection().load(collection['_id'], user=admin)
assert 'meta' in loadedCollection
# Model Layer
def testCollectionFilter(collection):
loadedCollection = Collection().filter(collection)
assert 'meta' in loadedCollection
| import pytest
import json
from girder.models.collection import Collection
from pytest_girder.assertions import assertStatusOk
@pytest.fixture
def collections(db):
yield [
Collection().createCollection('private collection', public=False),
Collection().createCollection('public collection', public=True)
]
@pytest.fixture
def collection(db):
yield Collection().createCollection('public collection', public=True)
@pytest.fixture
def collectionWithMeta(db, collection, metadata):
def _collectionWithMeta(_metadata=None):
if _metadata is None:
_metadata = metadata
return Collection().setMetadata(collection, _metadata)
yield _collectionWithMeta
@pytest.fixture
def metadata():
return {
'key': 'value',
'apple': 'fruit'
}
@pytest.fixture
def users(admin, user):
yield [admin, user, None]
@pytest.mark.parametrize('userIdx,expected', [
(0, 2),
(1, 1),
(2, 1)
])
def testCollectionsCount(server, userIdx, expected, collections, users):
resp = server.request(path='/collection/details', user=users[userIdx])
assertStatusOk(resp)
assert resp.json['nCollections'] == expected
def testSingleCollectionMetaExists(server, collection, admin):
resp = server.request(path='/collection/%s' % collection['_id'], user=admin)
assertStatusOk(resp)
assert 'meta' in resp.json
def testListCollectionMetaExists(server, collection, admin):
resp = server.request(path='/collection', user=admin)
assertStatusOk(resp)
assert all(('meta' in x) for x in resp.json)
def testCollectionSetMetadata(server, collection, metadata, admin):
resp = server.request(
path='/collection/%s/metadata' % collection['_id'],
user=admin,
method='PUT',
body=json.dumps(metadata),
type='application/json')
assertStatusOk(resp)
assert resp.json['meta'] == metadata
# Check that fetching the object again yields the same result
newDoc = server.request(
path='/collection/%s' % collection['_id'],
user=admin,
method='GET')
assert newDoc.json['meta'] == metadata
def testCollectionDeleteMetadata(server, collectionWithMeta, metadata, admin):
collection = collectionWithMeta(metadata)
resp = server.request(
path='/collection/%s/metadata' % collection['_id'],
user=admin,
method='DELETE',
body=json.dumps(list(metadata.keys())),
type='application/json')
assertStatusOk(resp)
assert resp.json['meta'] != metadata
assert resp.json['meta'] == {}
newDoc = server.request(
path='/collection/%s' % collection['_id'],
user=admin,
method='GET')
assert newDoc.json['meta'] != metadata
assert newDoc.json['meta'] == {}
# Model Layer
def testCollectionModelSetMetadata(collection, metadata):
updatedCollection = Collection().setMetadata(collection, metadata)
assert updatedCollection['meta'] == metadata
# Model Layer
def testCollectionModelDeleteMetadata(collectionWithMeta, metadata):
collection = collectionWithMeta(metadata)
noMeta = Collection().deleteMetadata(collection, list(metadata.keys()))
assert noMeta['meta'] == {}
# Model Layer
def testCollectionLoad(collection, admin):
loadedCollection = Collection().load(collection['_id'], user=admin)
assert 'meta' in loadedCollection
# Model Layer
def testCollectionFilter(collection):
loadedCollection = Collection().filter(collection)
assert 'meta' in loadedCollection
| Python | 0 |
9ac7be20f3b25ca768f7260900928b2c7224f470 | Improve correlator test | test/test_correlator.py | test/test_correlator.py |
# http://www.apache.org/licenses/LICENSE-2.0
import unittest
import time
import numpy as np
import auspex.config as config
config.auspex_dummy_mode = True
from auspex.experiment import Experiment
from auspex.stream import DataStream, DataAxis, DataStreamDescriptor, OutputConnector
from auspex.filters.debug import Print, Passthrough
from auspex.filters.correlator import Correlator
from auspex.filters.io import DataBuffer
from auspex.log import logger
class CorrelatorExperiment(Experiment):
# DataStreams
chan1 = OutputConnector()
chan2 = OutputConnector()
# Constants
samples = 100
idx_1 = 0
idx_2 = 0
# For correlator verification
vals = 2.0 + np.linspace(0, 10*np.pi, samples)
def init_streams(self):
self.chan1.add_axis(DataAxis("samples", list(range(self.samples))))
self.chan2.add_axis(DataAxis("samples", list(range(self.samples))))
def run(self):
logger.debug("Data taker running (inner loop)")
while self.idx_1 < self.samples or self.idx_2 < self.samples:
# Generate random number of samples:
new_1 = np.random.randint(1,5)
new_2 = np.random.randint(1,5)
if self.chan1.points_taken.value < self.chan1.num_points():
if self.chan1.points_taken.value + new_1 > self.chan1.num_points():
new_1 = self.chan1.num_points() - self.chan1.points_taken.value
self.chan1.push(self.vals[self.idx_1:self.idx_1+new_1])
self.idx_1 += new_1
if self.chan2.points_taken.value < self.chan2.num_points():
if self.chan2.points_taken.value + new_2 > self.chan2.num_points():
new_2 = self.chan2.num_points() - self.chan2.points_taken.value
self.chan2.push(self.vals[self.idx_2:self.idx_2+new_2])
self.idx_2 += new_2
time.sleep(0.002)
logger.debug("Idx_1: %d, Idx_2: %d", self.idx_1, self.idx_2)
class CorrelatorTestCase(unittest.TestCase):
def test_correlator(self):
exp = CorrelatorExperiment()
corr = Correlator(name='corr')
buff = DataBuffer()
edges = [(exp.chan1, corr.sink),
(exp.chan2, corr.sink),
(corr.source, buff.sink)]
exp.set_graph(edges)
exp.run_sweeps()
time.sleep(0.1)
corr_data = buff.output_data['corr']
expected_data = exp.vals*exp.vals
self.assertTrue(np.abs(np.sum(corr_data - expected_data)) <= 1e-1)
if __name__ == '__main__':
unittest.main()
|
# http://www.apache.org/licenses/LICENSE-2.0
import unittest
import time
import numpy as np
import auspex.config as config
config.auspex_dummy_mode = True
from auspex.experiment import Experiment
from auspex.stream import DataStream, DataAxis, DataStreamDescriptor, OutputConnector
from auspex.filters.debug import Print, Passthrough
from auspex.filters.correlator import Correlator
from auspex.filters.io import DataBuffer
from auspex.log import logger
class CorrelatorExperiment(Experiment):
# DataStreams
chan1 = OutputConnector()
chan2 = OutputConnector()
# Constants
samples = 100
idx_1 = 0
idx_2 = 0
# For correlator verification
vals = 2.0 + np.linspace(0, 10*np.pi, samples)
def init_streams(self):
self.chan1.add_axis(DataAxis("samples", list(range(self.samples))))
self.chan2.add_axis(DataAxis("samples", list(range(self.samples))))
def run(self):
logger.debug("Data taker running (inner loop)")
while self.idx_1 < self.samples or self.idx_2 < self.samples:
# Generate random number of samples:
new_1 = np.random.randint(1,5)
new_2 = np.random.randint(1,5)
if self.chan1.points_taken.value < self.chan1.num_points():
if self.chan1.points_taken.value + new_1 > self.chan1.num_points():
new_1 = self.chan1.num_points() - self.chan1.points_taken.value
self.chan1.push(self.vals[self.idx_1:self.idx_1+new_1])
self.idx_1 += new_1
if self.chan2.points_taken.value < self.chan2.num_points():
if self.chan2.points_taken.value + new_2 > self.chan2.num_points():
new_2 = self.chan2.num_points() - self.chan2.points_taken.value
self.chan2.push(self.vals[self.idx_2:self.idx_2+new_2])
self.idx_2 += new_2
time.sleep(0.002)
logger.debug("Idx_1: %d, Idx_2: %d", self.idx_1, self.idx_2)
class CorrelatorTestCase(unittest.TestCase):
def test_correlator(self):
exp = CorrelatorExperiment()
corr = Correlator(name='corr')
buff = DataBuffer()
edges = [(exp.chan1, corr.sink),
(exp.chan2, corr.sink),
(corr.source, buff.sink)]
exp.set_graph(edges)
exp.run_sweeps()
corr_data = buff.output_data['corr']
expected_data = exp.vals*exp.vals
self.assertTrue(np.abs(np.sum(corr_data - expected_data)) <= 1e-4)
if __name__ == '__main__':
unittest.main()
| Python | 0.000001 |
99241ab49a0a76472bb6f107a078248782af9626 | fix string_types in _compat | myhdl/_compat.py | myhdl/_compat.py | import sys
PY2 = sys.version_info[0] == 2
if not PY2:
string_types = (str,)
integer_types = (int,)
long = int
import builtins
else:
string_types = (str, unicode)
integer_types = (int, long)
long = long
import __builtin__ as builtins
| import sys
PY2 = sys.version_info[0] == 2
if not PY2:
string_types = (str, unicode)
integer_types = (int,)
long = int
import builtins
else:
string_types = (str,)
integer_types = (int, long)
long = long
import __builtin__ as builtins
| Python | 0.999999 |
aa9ce7092801e7ed8f3f86df0d1067279d13784d | Add armv7 support to create_ios_framework script (#4942) | sky/tools/create_ios_framework.py | sky/tools/create_ios_framework.py | #!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import subprocess
import shutil
import sys
import os
def main():
parser = argparse.ArgumentParser(description='Creates Flutter.framework')
parser.add_argument('--dst', type=str, required=True)
# TODO(cbracken) eliminate --device-out-dir and make armv7-out-dir and
# arm64-out-dir required once bot recipe is updated.
parser.add_argument('--device-out-dir', type=str, required=False)
parser.add_argument('--arm64-out-dir', type=str, required=False)
parser.add_argument('--armv7-out-dir', type=str, required=False)
parser.add_argument('--simulator-out-dir', type=str, required=True)
args = parser.parse_args()
if not (args.arm64_out_dir or args.device_out_dir):
print 'One of --device-out-dir or --arm64-out-dir must be specified'
fat_framework = os.path.join(args.dst, 'Flutter.framework')
arm64_framework = os.path.join(args.arm64_out_dir if args.arm64_out_dir else args.device_out_dir, 'Flutter.framework')
armv7_framework = os.path.join(args.armv7_out_dir, 'Flutter.framework') if args.armv7_out_dir else None
simulator_framework = os.path.join(args.simulator_out_dir, 'Flutter.framework')
arm64_dylib = os.path.join(arm64_framework, 'Flutter')
armv7_dylib = os.path.join(armv7_framework, 'Flutter') if args.armv7_out_dir else None
simulator_dylib = os.path.join(simulator_framework, 'Flutter')
if not os.path.isdir(arm64_framework):
print 'Cannot find iOS arm64 Framework at', arm64_framework
return 1
# TODO(cbracken): require armv7 once bot recipe is updated.
if armv7_framework and not os.path.isdir(armv7_framework):
print 'Cannot find iOS armv7 Framework at', armv7_framework
return 1
if not os.path.isdir(simulator_framework):
print 'Cannot find iOS simulator Framework at', simulator_framework
return 1
if not os.path.isfile(arm64_dylib):
print 'Cannot find iOS arm64 dylib at', arm64_dylib
return 1
# TODO(cbracken): require armv7 once bot recipe is updated.
if armv7_dylib and not os.path.isfile(armv7_dylib):
print 'Cannot find iOS armv7 dylib at', armv7_dylib
return 1
if not os.path.isfile(simulator_dylib):
print 'Cannot find iOS simulator dylib at', simulator_dylib
return 1
shutil.rmtree(fat_framework, True)
shutil.copytree(arm64_framework, fat_framework)
# TODO(cbracken): require armv7 once bot recipe is updated.
dylibs = [arm64_dylib, simulator_dylib]
if armv7_dylib:
dylibs += [armv7_dylib]
subprocess.call(['lipo'] + dylibs + [
'-create',
'-output',
os.path.join(fat_framework, 'Flutter')
])
if __name__ == '__main__':
sys.exit(main())
| #!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import subprocess
import shutil
import sys
import os
def main():
parser = argparse.ArgumentParser(description='Creates Flutter.framework')
parser.add_argument('--dst', type=str, required=True)
parser.add_argument('--device-out-dir', type=str, required=True)
parser.add_argument('--simulator-out-dir', type=str, required=True)
args = parser.parse_args()
fat_framework = os.path.join(args.dst, 'Flutter.framework')
device_framework = os.path.join(args.device_out_dir, 'Flutter.framework')
simulator_framework = os.path.join(args.simulator_out_dir, 'Flutter.framework')
device_dylib = os.path.join(device_framework, 'Flutter')
simulator_dylib = os.path.join(simulator_framework, 'Flutter')
if not os.path.isdir(device_framework):
print 'Cannot find iOS device Framework at', device_framework
return 1
if not os.path.isdir(simulator_framework):
print 'Cannot find iOS simulator Framework at', simulator_framework
return 1
if not os.path.isfile(device_dylib):
print 'Cannot find iOS device dylib at', device_dylib
return 1
if not os.path.isfile(simulator_dylib):
print 'Cannot find iOS simulator dylib at', simulator_dylib
return 1
shutil.rmtree(fat_framework, True)
shutil.copytree(device_framework, fat_framework)
subprocess.call([
'lipo',
device_dylib,
simulator_dylib,
'-create',
'-output',
os.path.join(fat_framework, 'Flutter')
])
if __name__ == '__main__':
sys.exit(main())
| Python | 0 |
97fa2f41bb00d2ceb0726b9cffdaa7c4ea97bc45 | Remove trailing whitespace from test/test_xml_parser.py | test/test_xml_parser.py | test/test_xml_parser.py | import unittest
from apel.db.loader.xml_parser import XMLParser, get_primary_ns
class XMLParserTest(unittest.TestCase):
'''
Test case for XMLParser
'''
data1 = '''<?xml version="1.0"?>
<ns:node xmlns:ns="http://fake.namespace.org" xmlns:ons="http://fake.othernamespace.org">
<ns:title>Some title</ns:title>
<ns:values>
<ns:value>data1</ns:value>
<ns:value>data2</ns:value>
</ns:values>
<ns:attributes ns:attr="value">
<ns:attribute ns:id="test1">attribute 1</ns:attribute>
<ns:attribute ns:id="test2">attribute 2</ns:attribute>
</ns:attributes>
<ns:mixednamespace>
<ons:attribute ons:type="test3">test4</ons:attribute>
<ons:attribute ons:type="nope">notthis</ons:attribute>
</ns:mixednamespace>
</ns:node>'''
def setUp(self):
self.parser = XMLParser(self.data1)
self.parser.NAMESPACE = 'http://fake.namespace.org'
self.parser.OTHERNAMESPACE = 'http://fake.othernamespace.org'
def test_get_text(self):
values = self.parser.doc.getElementsByTagNameNS(self.parser.NAMESPACE, 'value')
self.assertEqual('data1', self.parser.getText(values[0].childNodes))
self.assertEqual('data2', self.parser.getText(values[1].childNodes))
def test_get_tag_by_attr(self):
attributes = self.parser.doc.getElementsByTagNameNS(self.parser.NAMESPACE, 'attribute')
#print len(attributes)
self.assertEqual(len(self.parser.getTagByAttr(attributes, 'id', 'test1')), 1)
self.assertEqual(len(self.parser.getTagByAttr(attributes, 'id', 'test2')), 1)
def test_mixed_namespace(self):
"""Check that nested elements with a diff namespace can be retrieved."""
# Get all attribute nodes in 'ons' namespace
allnodes = self.parser.doc.getElementsByTagNameNS(self.parser.OTHERNAMESPACE, 'attribute')
# Get just the type=test3 nodes in 'ons' namespace
nodes = self.parser.getTagByAttr(allnodes, 'type', 'test3', self.parser.OTHERNAMESPACE)
# Check value in the first node is as expected
self.assertEqual(self.parser.getText(nodes[0].childNodes), 'test4')
def test_get_attr(self):
attributes_tag = self.parser.doc.getElementsByTagNameNS(self.parser.NAMESPACE, 'attributes')[0]
self.assertEqual(self.parser.getAttr(attributes_tag, 'attr'), 'value')
def test_get_primary_ns(self):
test_xml = '<?xml version="1.0" ?><ur:UsageRecord xmlns:ur="booboob"/>'
ns = get_primary_ns(test_xml)
self.assertEqual("booboob", ns)
if __name__ == '__main__':
unittest.main()
| import unittest
from apel.db.loader.xml_parser import XMLParser, get_primary_ns
class XMLParserTest(unittest.TestCase):
'''
Test case for XMLParser
'''
data1 = '''<?xml version="1.0"?>
<ns:node xmlns:ns="http://fake.namespace.org" xmlns:ons="http://fake.othernamespace.org">
<ns:title>Some title</ns:title>
<ns:values>
<ns:value>data1</ns:value>
<ns:value>data2</ns:value>
</ns:values>
<ns:attributes ns:attr="value">
<ns:attribute ns:id="test1">attribute 1</ns:attribute>
<ns:attribute ns:id="test2">attribute 2</ns:attribute>
</ns:attributes>
<ns:mixednamespace>
<ons:attribute ons:type="test3">test4</ons:attribute>
<ons:attribute ons:type="nope">notthis</ons:attribute>
</ns:mixednamespace>
</ns:node>'''
def setUp(self):
self.parser = XMLParser(self.data1)
self.parser.NAMESPACE = 'http://fake.namespace.org'
self.parser.OTHERNAMESPACE = 'http://fake.othernamespace.org'
def test_get_text(self):
values = self.parser.doc.getElementsByTagNameNS(self.parser.NAMESPACE, 'value')
self.assertEqual('data1', self.parser.getText(values[0].childNodes))
self.assertEqual('data2', self.parser.getText(values[1].childNodes))
def test_get_tag_by_attr(self):
attributes = self.parser.doc.getElementsByTagNameNS(self.parser.NAMESPACE, 'attribute')
#print len(attributes)
self.assertEqual(len(self.parser.getTagByAttr(attributes, 'id', 'test1')), 1)
self.assertEqual(len(self.parser.getTagByAttr(attributes, 'id', 'test2')), 1)
def test_mixed_namespace(self):
"""Check that nested elements with a diff namespace can be retrieved."""
# Get all attribute nodes in 'ons' namespace
allnodes = self.parser.doc.getElementsByTagNameNS(self.parser.OTHERNAMESPACE, 'attribute')
# Get just the type=test3 nodes in 'ons' namespace
nodes = self.parser.getTagByAttr(allnodes, 'type', 'test3', self.parser.OTHERNAMESPACE)
# Check value in the first node is as expected
self.assertEqual(self.parser.getText(nodes[0].childNodes), 'test4')
def test_get_attr(self):
attributes_tag = self.parser.doc.getElementsByTagNameNS(self.parser.NAMESPACE, 'attributes')[0]
self.assertEqual(self.parser.getAttr(attributes_tag, 'attr'), 'value')
def test_get_primary_ns(self):
test_xml = '<?xml version="1.0" ?><ur:UsageRecord xmlns:ur="booboob"/>'
ns = get_primary_ns(test_xml)
self.assertEqual("booboob", ns)
if __name__ == '__main__':
unittest.main()
| Python | 0.999959 |
37d20fe09aa19dde2ac50816958d9b1372bc76eb | indent (tabs!) | integration-test/1251-early-track-roads.py | integration-test/1251-early-track-roads.py | from . import FixtureTest
class EarlyUnclassifiedRoads(FixtureTest):
def test_early_track_road_z11_grade1_paved(self):
# asphalt, grade1, track (default zoom 11, no demotion)
self.load_fixtures([
'https://www.openstreetmap.org/way/329375413',
])
self.assert_has_feature(
11, 396, 781, 'roads',
{'kind': 'path',
'kind_detail': 'track'})
def test_early_track_road_z12_grade1_private(self):
# private, grade1, track (since private demoted from zoom 11)
self.load_fixtures([
'https://www.openstreetmap.org/way/10611894',
])
self.assert_no_matching_feature(
11, 330, 781, 'roads',
{ 'kind': 'path',
'kind_detail': 'track'})
self.assert_has_feature(
12, 661, 1562, 'roads',
{'kind': 'path',
'kind_detail': 'track'})
def test_early_track_road_z12_grade1_dirt(self):
# dirt, grade1, track (since dirt demoted from zoom 11)
self.load_fixtures([
'https://www.openstreetmap.org/way/286309045',
])
self.assert_no_matching_feature(
11, 399, 782, 'roads',
{ 'kind': 'path',
'kind_detail': 'track'})
self.assert_has_feature(
12, 799, 1565, 'roads',
{'kind': 'path',
'kind_detail': 'track'})
def test_early_track_road_z12_grade2_dirt(self):
# dirt, grade2, track (default zoom 12, no demotion)
self.load_fixtures([
'https://www.openstreetmap.org/way/330951783',
])
self.assert_has_feature(
12, 778, 1575, 'roads',
{'kind': 'path',
'kind_detail': 'track'})
def test_remain_z13_track_road_no_grade1(self):
# gravel, track (no grade so default track at zoom 13)
self.load_fixtures([
'https://www.openstreetmap.org/way/313839575',
])
self.assert_has_feature(
13, 1561, 3146, 'roads',
{'kind': 'path',
'kind_detail': 'track'})
def test_remain_z13_track_road_no_grade2(self):
# gravel, track (no grade so default track at zoom 13)
self.load_fixtures([
'https://www.openstreetmap.org/way/14351002',
])
self.assert_has_feature(
13, 1500, 3170, 'roads',
{'kind': 'path',
'kind_detail': 'track'})
def test_remain_z13_track_road_grade5_gravel(self):
# gravel, grade5, track (fails zoom 12 test
# so default zoom 13 for track)
self.load_fixtures([
'https://www.openstreetmap.org/way/10103047',
])
self.assert_has_feature(
13, 1550, 3167, 'roads',
{'kind': 'path',
'kind_detail': 'track'})
| from . import FixtureTest
class EarlyUnclassifiedRoads(FixtureTest):
def test_early_track_road_z11_grade1_paved(self):
# asphalt, grade1, track (default zoom 11, no demotion)
self.load_fixtures([
'https://www.openstreetmap.org/way/329375413',
])
self.assert_has_feature(
11, 396, 781, 'roads',
{'kind': 'path',
'kind_detail': 'track'})
def test_early_track_road_z12_grade1_private(self):
# private, grade1, track (since private demoted from zoom 11)
self.load_fixtures([
'https://www.openstreetmap.org/way/10611894',
])
self.assert_no_matching_feature(
11, 330, 781, 'roads',
{ 'kind': 'path',
'kind_detail': 'track'})
self.assert_has_feature(
12, 661, 1562, 'roads',
{'kind': 'path',
'kind_detail': 'track'})
def test_early_track_road_z12_grade1_dirt(self):
# dirt, grade1, track (since dirt demoted from zoom 11)
self.load_fixtures([
'https://www.openstreetmap.org/way/286309045',
])
self.assert_no_matching_feature(
11, 399, 782, 'roads',
{ 'kind': 'path',
'kind_detail': 'track'})
self.assert_has_feature(
12, 799, 1565, 'roads',
{'kind': 'path',
'kind_detail': 'track'})
def test_early_track_road_z12_grade2_dirt(self):
# dirt, grade2, track (default zoom 12, no demotion)
self.load_fixtures([
'https://www.openstreetmap.org/way/330951783',
])
self.assert_has_feature(
12, 778, 1575, 'roads',
{'kind': 'path',
'kind_detail': 'track'})
def test_remain_z13_track_road_no_grade1(self):
# gravel, track (no grade so default track at zoom 13)
self.load_fixtures([
'https://www.openstreetmap.org/way/313839575',
])
self.assert_has_feature(
13, 1561, 3146, 'roads',
{'kind': 'path',
'kind_detail': 'track'})
def test_remain_z13_track_road_no_grade2(self):
# gravel, track (no grade so default track at zoom 13)
self.load_fixtures([
'https://www.openstreetmap.org/way/14351002',
])
self.assert_has_feature(
13, 1500, 3170, 'roads',
{'kind': 'path',
'kind_detail': 'track'})
def test_remain_z13_track_road_grade5_gravel(self):
# gravel, grade5, track (fails zoom 12 test
# so default zoom 13 for track)
self.load_fixtures([
'https://www.openstreetmap.org/way/10103047',
])
self.assert_has_feature(
13, 1550, 3167, 'roads',
{'kind': 'path',
'kind_detail': 'track'})
| Python | 0 |
2e761252093b41d33cf57599ba8f05ec01e90a6a | delete noneffective codes | fate_flow/flowpy/client/base.py | fate_flow/flowpy/client/base.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import json
import inspect
import requests
import traceback
from fate_flow.flowpy.client.api.base import BaseFlowAPI
def _is_api_endpoint(obj):
return isinstance(obj, BaseFlowAPI)
class BaseFlowClient:
API_BASE_URL = ''
def __new__(cls, *args, **kwargs):
self = super().__new__(cls)
api_endpoints = inspect.getmembers(self, _is_api_endpoint)
for name, api in api_endpoints:
api_cls = type(api)
api = api_cls(self)
setattr(self, name, api)
return self
def __init__(self, ip, port, version):
self._http = requests.Session()
self.ip = ip
self.port = port
self.version = version
def _request(self, method, url, **kwargs):
request_url = self.API_BASE_URL + url
try:
response = self._http.request(method=method, url=request_url, **kwargs)
return response
except Exception as e:
exc_type, exc_value, exc_traceback_obj = sys.exc_info()
response = {'retcode': 100, 'retmsg': str(e),
'traceback': traceback.format_exception(exc_type, exc_value, exc_traceback_obj)}
if 'Connection refused' in str(e):
response['retmsg'] = 'Connection refused, Please check if the fate flow service is started'
del response['traceback']
return response
@staticmethod
def _decode_result(response):
try:
result = json.loads(response.content.decode('utf-8', 'ignore'), strict=False)
except (TypeError, ValueError):
return response
else:
return result
def _handle_result(self, response):
if isinstance(response, requests.models.Response):
return response.json()
elif isinstance(response, dict):
return response
else:
return self._decode_result(response)
def get(self, url, **kwargs):
return self._request(method='get', url=url, **kwargs)
def post(self, url, **kwargs):
return self._request(method='post', url=url, **kwargs)
| #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import json
import inspect
import requests
import traceback
from fate_flow.flowpy.client.api.base import BaseFlowAPI
def _is_api_endpoint(obj):
return isinstance(obj, BaseFlowAPI)
class BaseFlowClient:
API_BASE_URL = ''
def __new__(cls, *args, **kwargs):
self = super().__new__(cls)
api_endpoints = inspect.getmembers(self, _is_api_endpoint)
for name, api in api_endpoints:
print('name: {}, api: {}'.format(name, api))
api_cls = type(api)
api = api_cls(self)
setattr(self, name, api)
return self
def __init__(self, ip, port, version):
self._http = requests.Session()
self.ip = ip
self.port = port
self.version = version
def _request(self, method, url, **kwargs):
request_url = self.API_BASE_URL + url
try:
response = self._http.request(method=method, url=request_url, **kwargs)
return response
except Exception as e:
exc_type, exc_value, exc_traceback_obj = sys.exc_info()
response = {'retcode': 100, 'retmsg': str(e),
'traceback': traceback.format_exception(exc_type, exc_value, exc_traceback_obj)}
if 'Connection refused' in str(e):
response['retmsg'] = 'Connection refused, Please check if the fate flow service is started'
del response['traceback']
return response
@staticmethod
def _decode_result(response):
try:
result = json.loads(response.content.decode('utf-8', 'ignore'), strict=False)
except (TypeError, ValueError):
return response
else:
return result
def _handle_result(self, response):
if isinstance(response, requests.models.Response):
return response.json()
elif isinstance(response, dict):
return response
else:
return self._decode_result(response)
def get(self, url, **kwargs):
return self._request(method='get', url=url, **kwargs)
def post(self, url, **kwargs):
return self._request(method='post', url=url, **kwargs)
| Python | 0.000003 |
3662beb1afc72aaf00b1f9fe3ae0577cf2a4f138 | update docu | robo/solver/base_solver.py | robo/solver/base_solver.py | '''
Created on Aug 21, 2015
@author: Aaron Klein
'''
import os
import csv
import time
import errno
import logging
logger = logging.getLogger(__name__)
class BaseSolver(object):
def __init__(self, acquisition_func=None, model=None,
maximize_func=None, task=None, save_dir=None):
"""
Base class which specifies the interface for solvers. Derive from
this class if you implement your own solver.
Parameters
----------
acquisition_func: AcquisitionFunctionObject
The acquisition function which will be maximized.
model: ModelObject
Model (i.e. GaussianProcess, RandomForest) that models our current
believe of the objective function.
task: TaskObject
Task object that contains the objective function and additional
meta information such as the lower and upper bound of the search
space.
maximize_func: MaximizerObject
Optimization method that is used to maximize the acquisition
function
save_dir: String
Output path
"""
self.model = model
self.acquisition_func = acquisition_func
self.maximize_func = maximize_func
self.task = task
self.save_dir = save_dir
if self.save_dir is not None:
self.create_save_dir()
def create_save_dir(self):
"""
Creates the save directory to store the runs
"""
try:
os.makedirs(self.save_dir)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
self.output_file = open(os.path.join(self.save_dir, 'results.csv'), 'w')
self.csv_writer = None
def get_observations(self):
return self.X, self.Y
def get_model(self):
if self.model is None:
logger.info("No model trained yet!")
return self.model
def run(self, num_iterations=10, X=None, Y=None, overwrite=False):
"""
The main optimization loop
Parameters
----------
num_iterations: int
The number of iterations
X: np.ndarray(N,D)
Initial points that are already evaluated
Y: np.ndarray(N,1)
Function values of the already evaluated points
Returns
-------
np.ndarray(1,D)
Incumbent
np.ndarray(1,1)
(Estimated) function value of the incumbent
"""
pass
def choose_next(self, X=None, Y=None):
"""
Suggests a new point to evaluate.
Parameters
----------
num_iterations: int
The number of iterations
X: np.ndarray(N,D)
Initial points that are already evaluated
Y: np.ndarray(N,1)
Function values of the already evaluated points
Returns
-------
np.ndarray(1,D)
Suggested point
"""
pass
def save_iteration(self, it, **kwargs):
"""
Saves the meta information of an iteration.
"""
if self.csv_writer is None:
self.fieldnames = ['iteration', 'config', 'fval',
'incumbent', 'incumbent_val',
'time_func_eval', 'time_overhead', 'runtime']
for key in kwargs:
self.fieldnames.append(key)
self.csv_writer = csv.DictWriter(self.output_file,
fieldnames=self.fieldnames)
self.csv_writer.writeheader()
output = dict()
output["iteration"] = it
output['config'] = self.X[-1]
output['fval'] = self.Y[-1]
output['incumbent'] = self.incumbent
output['incumbent_val'] = self.incumbent_value
output['time_func_eval'] = self.time_func_eval[-1]
output['time_overhead'] = self.time_overhead[-1]
output['runtime'] = time.time() - self.time_start
if kwargs is not None:
for key, value in kwargs.items():
output[key] = str(value)
self.csv_writer.writerow(output)
self.output_file.flush()
| '''
Created on Aug 21, 2015
@author: Aaron Klein
'''
import os
import csv
import time
import errno
import logging
logger = logging.getLogger(__name__)
class BaseSolver(object):
'''
classdocs
'''
def __init__(self, acquisition_func=None, model=None,
maximize_func=None, task=None, save_dir=None):
'''
Constructor
'''
self.model = model
self.acquisition_func = acquisition_func
self.maximize_func = maximize_func
self.task = task
self.save_dir = save_dir
if self.save_dir is not None:
self.create_save_dir()
def init_last_iteration(self):
"""
Loads the last iteration from a previously stored run
:return: the previous observations
"""
raise("Not yet implemented")
def from_iteration(self, save_dir, i):
"""
Loads the data from a previous run
:param save_dir: directory for the data
:param i: index of iteration
:return:
"""
raise("Not yet implemented")
def create_save_dir(self):
"""
Creates the save directory to store the runs
"""
try:
os.makedirs(self.save_dir)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
self.output_file = open(os.path.join(self.save_dir, 'results.csv'), 'w')
self.csv_writer = None
def get_observations(self):
return self.X, self.Y
def get_model(self):
if self.model is None:
logger.info("No model trained yet!")
return self.model
def run(self, num_iterations=10, X=None, Y=None, overwrite=False):
"""
The main Bayesian optimization loop
:param num_iterations: number of iterations to perform
:param X: (optional) Initial observations. If a run
continues these observations will be overwritten by the load
:param Y: (optional) Initial observations. If a run
continues these observations will be overwritten by the load
:param overwrite: data present in save_dir will be deleted
and overwritten, otherwise the run will be continued.
:return: the incumbent
"""
pass
def choose_next(self, X=None, Y=None):
"""
Chooses the next configuration by optimizing the acquisition function.
:param X: The point that have been where the objective function has been evaluated
:param Y: The function values of the evaluated points
:return: The next promising configuration
"""
pass
def save_iteration(self, it, **kwargs):
"""
Saves an iteration.
"""
if self.csv_writer is None:
self.fieldnames = ['iteration', 'config', 'fval',
'incumbent', 'incumbent_val',
'time_func_eval', 'time_overhead', 'runtime']
for key in kwargs:
self.fieldnames.append(key)
self.csv_writer = csv.DictWriter(self.output_file,
fieldnames=self.fieldnames)
self.csv_writer.writeheader()
output = dict()
output["iteration"] = it
output['config'] = self.X[-1]
output['fval'] = self.Y[-1]
output['incumbent'] = self.incumbent
output['incumbent_val'] = self.incumbent_value
output['time_func_eval'] = self.time_func_eval[-1]
output['time_overhead'] = self.time_overhead[-1]
output['runtime'] = time.time() - self.time_start
if kwargs is not None:
for key, value in kwargs.items():
output[key] = str(value)
self.csv_writer.writerow(output)
self.output_file.flush()
| Python | 0.000001 |
a9cfd2bc842631431e20b6c13d3d98535b643b3b | Fix mispelling | ixdjango/management/commands/copystatic.py | ixdjango/management/commands/copystatic.py | """
Copy static files to nginx location
.. moduleauthor:: Infoxchange Development Team <development@infoxchange.net.au>
"""
import logging
import os
from shutil import copy2, copystat
from django.conf import settings
from django.core.management.base import NoArgsCommand
LOGGER = logging.getLogger(__name__)
def copytree(src, dst):
"""
A version of copytree I don't hate
"""
if not (os.path.exists(dst) and os.path.isdir(dst)):
LOGGER.info("Creating directory %s", dst)
os.makedirs(dst)
copystat(src, dst)
for name in os.listdir(src):
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
if os.path.isdir(srcname):
LOGGER.debug("Copying directory %s", name)
copytree(srcname, dstname)
else:
LOGGER.debug("Copying file %s", name)
copy2(srcname, dstname)
class Command(NoArgsCommand):
"""
Copy static files to nginx location
"""
def handle_noargs(self, **options):
try:
static_dir = settings.NGINX_STATIC_DIR
except AttributeError:
static_dir = None
if not static_dir:
LOGGER.info("static dir not defined, copy static content skipped")
return
LOGGER.info("Copying static content to %s", static_dir)
copytree(settings.STATIC_ROOT, static_dir)
| """
Copy static files to nginx location
.. moduleauthor:: Infoxchange Development Team <development@infoxchange.net.au>
"""
import logging
import os
from shutil import copy2, copystat
from django.conf import settings
from django.core.management.base import NoArgsCommand
LOGGER = logging.getLogger(__name__)
def copytree(src, dst):
"""
A version of copytree I don't hate
"""
if not (os.path.exists(dst) and os.path.isdir(dst)):
LOGGER.INFO("Creating directory %s", dst)
os.makedirs(dst)
copystat(src, dst)
for name in os.listdir(src):
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
if os.path.isdir(srcname):
LOGGER.DEBUG("Copying directory %s", name)
copytree(srcname, dstname)
else:
LOGGER.DEBUG("Copying file %s", name)
copy2(srcname, dstname)
class Command(NoArgsCommand):
"""
Copy static files to nginx location
"""
def handle_noargs(self, **options):
try:
static_dir = settings.NGINX_STATIC_DIR
except AttributeError:
static_dir = None
if not static_dir:
LOGGER.info("static dir not defined, copy static content skipped")
return
LOGGER.info("Copying static content to %s", static_dir)
copytree(settings.STATIC_ROOT, static_dir)
| Python | 0.999687 |
7888b2b14a26deead0b4f1559b755fcf17cbb6f8 | correct style link | cte-collation-poc/fullbook.py | cte-collation-poc/fullbook.py | #!/usr/bin/env python
from __future__ import print_function
import sys
import argparse
import requests
from lxml import etree
ARCHIVEJS = 'http://archive.cnx.org/contents/{}.json'
ARCHIVEHTML = 'http://archive.cnx.org/contents/{}.html'
NS = {'x': 'http://www.w3.org/1999/xhtml'}
HTMLWRAPPER = """<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>{title}</title>
<link href="styles.css" rel="stylesheet" type="text/css"/>
</head>
</html>
"""
parts = ['page', 'chapter', 'unit', 'book', 'series']
def debug(*args, **kwargs):
if verbose:
print(*args, file=sys.stderr, **kwargs)
def main(code, html_out=sys.stdout):
"""Generate complete book HTML."""
res = requests.get(ARCHIVEJS.format(code))
b_json = res.json()
html = etree.fromstring(HTMLWRAPPER.format(title=b_json['title']))
book_elem = etree.SubElement(html, 'body', attrib={'data-type': 'book'})
html_nest([b_json['tree']], book_elem)
print(etree.tostring(html), file=html_out)
def html_nest(tree, parent):
"""Recursively construct HTML nested div version of book tree."""
for node in tree:
div_elem = etree.SubElement(parent, 'div')
if node['id'] != 'subcol':
page_nodes(node['id'], div_elem)
mytype = parts.index(div_elem.get('data-type'))
if parent.get('data-type'):
parenttype = parts.index(parent.get('data-type'))
if parenttype <= mytype:
parent.set('data-type', parts[mytype + 1])
else:
parent.set('data-type', parts[mytype + 1])
title_xpath = etree.XPath("//x:div[@data-type='document-title']",
namespaces=NS)
try:
title_elem = title_xpath(div_elem)[0]
except IndexError:
title_elem = etree.SubElement(div_elem, 'div',
attrib={'data-type':
'document-title'})
title_elem.text = node['title']
debug(node['title'])
if 'contents' in node:
elem = etree.SubElement(div_elem, 'div')
html_nest(node['contents'], elem)
def page_nodes(page_id, elem):
"""Fetch page return body wrapped in provided element."""
debug(page_id)
res = requests.get(ARCHIVEHTML.format(page_id))
xpath = etree.XPath('//x:body', namespaces=NS)
body = xpath(etree.fromstring(res.content))[0]
elem.set('data-type', 'page')
for c in body.iterchildren():
elem.append(c)
return elem
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Assemble complete book "
"as single HTML file")
parser.add_argument("bookid", help="Identifier of book: "
"<uuid|shortId>[@ver]")
parser.add_argument("html_out", nargs="?",
type=argparse.FileType('w'),
help="assembled HTML file output (default stdout)",
default=sys.stdout)
parser.add_argument('-v', '--verbose', action='store_true',
help='Send debugging info to stderr')
args = parser.parse_args()
verbose = args.verbose
main(args.bookid, args.html_out)
| #!/usr/bin/env python
from __future__ import print_function
import sys
import argparse
import requests
from lxml import etree
ARCHIVEJS = 'http://archive.cnx.org/contents/{}.json'
ARCHIVEHTML = 'http://archive.cnx.org/contents/{}.html'
NS = {'x': 'http://www.w3.org/1999/xhtml'}
HTMLWRAPPER = """<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>{title}</title>
<style href="styles.css" rel="stylsheet" type="text/css"/>
</head>
</html>
"""
parts = ['page', 'chapter', 'unit', 'book', 'series']
def debug(*args, **kwargs):
if verbose:
print(*args, file=sys.stderr, **kwargs)
def main(code, html_out=sys.stdout):
"""Generate complete book HTML."""
res = requests.get(ARCHIVEJS.format(code))
b_json = res.json()
html = etree.fromstring(HTMLWRAPPER.format(title=b_json['title']))
book_elem = etree.SubElement(html, 'body', attrib={'data-type': 'book'})
html_nest([b_json['tree']], book_elem)
print(etree.tostring(html), file=html_out)
def html_nest(tree, parent):
"""Recursively construct HTML nested div version of book tree."""
for node in tree:
div_elem = etree.SubElement(parent, 'div')
if node['id'] != 'subcol':
page_nodes(node['id'], div_elem)
mytype = parts.index(div_elem.get('data-type'))
if parent.get('data-type'):
parenttype = parts.index(parent.get('data-type'))
if parenttype <= mytype:
parent.set('data-type', parts[mytype + 1])
else:
parent.set('data-type', parts[mytype + 1])
title_xpath = etree.XPath("//x:div[@data-type='document-title']",
namespaces=NS)
try:
title_elem = title_xpath(div_elem)[0]
except IndexError:
title_elem = etree.SubElement(div_elem, 'div',
attrib={'data-type':
'document-title'})
title_elem.text = node['title']
debug(node['title'])
if 'contents' in node:
elem = etree.SubElement(div_elem, 'div')
html_nest(node['contents'], elem)
def page_nodes(page_id, elem):
"""Fetch page return body wrapped in provided element."""
debug(page_id)
res = requests.get(ARCHIVEHTML.format(page_id))
xpath = etree.XPath('//x:body', namespaces=NS)
body = xpath(etree.fromstring(res.content))[0]
elem.set('data-type', 'page')
for c in body.iterchildren():
elem.append(c)
return elem
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Assemble complete book "
"as single HTML file")
parser.add_argument("bookid", help="Identifier of book: "
"<uuid|shortId>[@ver]")
parser.add_argument("html_out", nargs="?",
type=argparse.FileType('w'),
help="assembled HTML file output (default stdout)",
default=sys.stdout)
parser.add_argument('-v', '--verbose', action='store_true',
help='Send debugging info to stderr')
args = parser.parse_args()
verbose = args.verbose
main(args.bookid, args.html_out)
| Python | 0.000001 |
df85906e8e2a872ca99002b26af6ea5d495b23ca | fix wrong document string | data_migrator/emitters/__init__.py | data_migrator/emitters/__init__.py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
This module contains all classes for models, managers and fields
* :class:`BaseEmitter`
* :class:`MySQLEmitter`
* ...
"""
from .mysql import MySQLEmitter
from .csv import CSVEmitter
| #!/usr/bin/python
# -*- coding: UTF-8 -*-
from .mysql import MySQLEmitter
from .csv import CSVEmitter
"""
This module contains all classes for models, managers and fields
* :class:`BaseEmitter`
* :class:`MySQLEmitter`
* ...
"""
| Python | 0.999949 |
0a05e6479ee907c3702cc895c5a180cd816a5433 | Build interdependencies. | d1_common_python/src/setup.py | d1_common_python/src/setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:mod:`setup`
====================
:Synopsis: Create egg.
:Author: DataONE (Dahl)
"""
from setuptools import setup, find_packages
import d1_common
setup(
name='DataONE_Common',
version=d1_common.__version__,
author='DataONE Project',
author_email='developers@dataone.org',
url='http://dataone.org',
description='Contains functionality common to projects that interact with the DataONE infrastructure via Python',
license='Apache License, Version 2.0',
packages=find_packages(),
# Dependencies that are available through PYPI / easy_install.
install_requires=[
'iso8601 >= 0.1',
'pyxb >= 1.1.2',
],
package_data={
# If any package contains *.txt or *.rst files, include them:
'': ['*.txt', '*.rst'],
}
)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:mod:`setup`
====================
:Synopsis: Create egg.
:Author: DataONE (Dahl)
"""
from setuptools import setup, find_packages
setup(
name='Python DataONE Common',
#version=d1_client.__version__,
description='Contains functionality common to projects that interact with the DataONE infrastructure via Python',
author='DataONE Project',
url='http://dataone.org',
packages=find_packages(),
# Dependencies that are available through PYPI / easy_install.
install_requires=[
# iso860
'iso8601 >= 0.1',
# PyXB
'pyxb >= 1.1.2',
],
package_data={
# If any package contains *.txt or *.rst files, include them:
'': ['*.txt', '*.rst'],
}
)
| Python | 0.00002 |
9664f6e6bf64e10fe0ce6fbfc3bbf20d4775cdb6 | Update MotorsControlFile.py | ProBot_BeagleBone/MotorsControlFile.py | ProBot_BeagleBone/MotorsControlFile.py | #!/usr/bin/python
# Python Standart Library Imports
import SabertoothFile
import PWMFile
import ProBotConstantsFile
# Initialization of classes from local files
Sabertooth = SabertoothFile.SabertoothClass()
PWM = PWMFile.PWMClass()
Pconst = ProBotConstantsFile.Constants()
class MotorsControlClass():
def MotorsControl(self,rightMotor, leftMotor, userChoice):
if userChoice=='1':
# Sending the values to the Sabertooth that is connected to the motors
Sabertooth.drive(Pconst.addr, 1, int(rightMotor))
Sabertooth.drive(Pconst.addr, 2, int(leftMotor))
if userChoice=='2':
# Sending the values to the pwm controller that is connected to the motors
PWM.PWM_Signals(int(rightMotor), int(leftMotor))
| #!/usr/bin/python
import SabertoothFile
import PWMFile
import ProBotConstantsFile
# Initialization of classes from local files
Sabertooth = SabertoothFile.SabertoothClass()
PWM = PWMFile.PWMClass()
Pconst = ProBotConstantsFile.Constants()
class MotorsControlClass():
def MotorsControl(self,rightMotor, leftMotor, userChoice):
if userChoice=='1':
# Sending the values to the Sabertooth that is connected to the motors
Sabertooth.drive(Pconst.addr, 1, int(rightMotor))
Sabertooth.drive(Pconst.addr, 2, int(leftMotor))
if userChoice=='2':
PWM.PWM_Signals(int(rightMotor), int(leftMotor))
| Python | 0 |
76a39d6ab95f3b036b93a4a4680f7a5e37e981ec | Fix ElasticNet distance unit test | foolbox/tests/test_distances.py | foolbox/tests/test_distances.py | import pytest
import numpy as np
from foolbox import distances
from pytest import approx
def test_abstract_distance():
with pytest.raises(TypeError):
distances.Distance()
def test_base_distance():
class TestDistance(distances.Distance):
def _calculate(self):
return 22, 2
distance = TestDistance(None, None, bounds=(0, 1))
assert distance.name() == 'TestDistance'
assert distance.value == 22
assert distance.gradient == 2
assert '2.2' in str(distance)
assert 'TestDistance' in str(distance)
assert distance == distance
assert not distance < distance
assert not distance > distance
assert distance <= distance
assert distance >= distance
with pytest.raises(TypeError):
distance < 3
with pytest.raises(TypeError):
distance == 3
def test_mse():
assert distances.MSE == distances.MeanSquaredDistance
def test_mae():
assert distances.MAE == distances.MeanAbsoluteDistance
def test_linf():
assert distances.Linf == distances.Linfinity
def test_mean_squared_distance():
d = distances.MeanSquaredDistance(
np.array([0, .5]),
np.array([.5, .5]),
bounds=(0, 1))
assert d.value == 1. / 8.
assert (d.gradient == np.array([.5, 0])).all()
def test_mean_absolute_distance():
d = distances.MeanAbsoluteDistance(
np.array([0, .5]),
np.array([.7, .5]),
bounds=(0, 1))
assert d.value == approx(0.35)
assert (d.gradient == np.array([0.5, 0])).all()
def test_linfinity():
d = distances.Linfinity(
np.array([0, .5]),
np.array([.7, .5]),
bounds=(0, 1))
assert d.value == approx(.7)
with pytest.raises(NotImplementedError):
d.gradient
def test_l0():
d = distances.L0(
np.array([0, .5]),
np.array([.7, .5]),
bounds=(0, 1))
assert d.value == approx(1.)
with pytest.raises(NotImplementedError):
d.gradient
def test_en():
en = distances.EN(0.1)
d = en(
np.array([0, .5]),
np.array([.7, .5]),
bounds=(0, 1))
assert d.value == approx(0.56)
assert (d.gradient == np.array([2.4, 0])).all()
@pytest.mark.parametrize('Distance', [
distances.MeanSquaredDistance,
distances.MeanAbsoluteDistance,
distances.Linfinity,
distances.L0,
distances.EN(0),
])
def test_str_repr(Distance):
"""Tests that str and repr contain the value
and that str does not fail when initialized
with a value rather than calculated."""
reference = np.zeros((10, 10))
other = np.ones((10, 10))
d = Distance(reference, other, bounds=(0, 1))
assert isinstance(str(d), str)
if 'L0' in str(d):
assert '100' in str(d)
assert '100' in repr(d)
else:
assert '1.00e+' in str(d)
assert '1.00e+' in repr(d)
| import pytest
import numpy as np
from foolbox import distances
from pytest import approx
def test_abstract_distance():
with pytest.raises(TypeError):
distances.Distance()
def test_base_distance():
class TestDistance(distances.Distance):
def _calculate(self):
return 22, 2
distance = TestDistance(None, None, bounds=(0, 1))
assert distance.name() == 'TestDistance'
assert distance.value == 22
assert distance.gradient == 2
assert '2.2' in str(distance)
assert 'TestDistance' in str(distance)
assert distance == distance
assert not distance < distance
assert not distance > distance
assert distance <= distance
assert distance >= distance
with pytest.raises(TypeError):
distance < 3
with pytest.raises(TypeError):
distance == 3
def test_mse():
assert distances.MSE == distances.MeanSquaredDistance
def test_mae():
assert distances.MAE == distances.MeanAbsoluteDistance
def test_linf():
assert distances.Linf == distances.Linfinity
def test_mean_squared_distance():
d = distances.MeanSquaredDistance(
np.array([0, .5]),
np.array([.5, .5]),
bounds=(0, 1))
assert d.value == 1. / 8.
assert (d.gradient == np.array([.5, 0])).all()
def test_mean_absolute_distance():
d = distances.MeanAbsoluteDistance(
np.array([0, .5]),
np.array([.7, .5]),
bounds=(0, 1))
assert d.value == approx(0.35)
assert (d.gradient == np.array([0.5, 0])).all()
def test_linfinity():
d = distances.Linfinity(
np.array([0, .5]),
np.array([.7, .5]),
bounds=(0, 1))
assert d.value == approx(.7)
with pytest.raises(NotImplementedError):
d.gradient
def test_l0():
d = distances.L0(
np.array([0, .5]),
np.array([.7, .5]),
bounds=(0, 1))
assert d.value == approx(1.)
with pytest.raises(NotImplementedError):
d.gradient
def test_en():
en = distances.EN(0.1)
d = en(
np.array([0, .5]),
np.array([.7, .5]),
bounds=(0, 1))
assert d.value == approx(0.56)
with pytest.raises(NotImplementedError):
d.gradient
@pytest.mark.parametrize('Distance', [
distances.MeanSquaredDistance,
distances.MeanAbsoluteDistance,
distances.Linfinity,
distances.L0,
distances.EN(1),
])
def test_str_repr(Distance):
"""Tests that str and repr contain the value
and that str does not fail when initialized
with a value rather than calculated."""
reference = np.zeros((10, 10))
other = np.ones((10, 10))
d = Distance(reference, other, bounds=(0, 1))
assert isinstance(str(d), str)
if 'L0' in str(d):
assert '100' in str(d)
assert '100' in repr(d)
else:
assert '1.00e+' in str(d)
assert '1.00e+' in repr(d)
| Python | 0.000001 |
19b0391aad11748cfca4b22616159a7b2893ff9b | Change api to return money objects | bluebottle/utils/serializers.py | bluebottle/utils/serializers.py | from HTMLParser import HTMLParser
import re
from moneyed import Money
from rest_framework import serializers
from .validators import validate_postal_code
from .models import Address, Language
class MoneySerializer(serializers.DecimalField):
def __init__(self, max_digits=12, decimal_places=2, **kwargs):
super(MoneySerializer, self).__init__(
max_digits=max_digits,
decimal_places=decimal_places,
**kwargs
)
def to_representation(self, instance):
return {
'amount': instance.amount,
'currency': str(instance.currency)
}
def to_internal_value(self, data):
if not data:
return data
return Money(data, 'EUR')
class MoneyTotalSerializer(serializers.ListField):
"""
Serialize money totals with multiple currencies, e.g.
[(450, 'EUR'), (23050, 'XEF')]
"""
child = MoneySerializer()
class ShareSerializer(serializers.Serializer):
share_name = serializers.CharField(max_length=256, required=True)
share_email = serializers.EmailField(required=True)
share_motivation = serializers.CharField(default="")
share_cc = serializers.BooleanField(default=False)
project = serializers.CharField(max_length=256, required=True)
class LanguageSerializer(serializers.ModelSerializer):
class Meta:
model = Language
fields = ('id', 'code', 'language_name', 'native_name')
class MLStripper(HTMLParser):
""" Used to strip HTML tags for meta fields (e.g. description) """
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
class AddressSerializer(serializers.ModelSerializer):
def validate_postal_code(self, attrs, source):
value = attrs[source]
if value:
country_code = ''
if 'country' in attrs:
country_code = attrs['country']
elif self.object and self.object.country:
country_code = self.object.country.alpha2_code
if country_code:
validate_postal_code(value, country_code)
return attrs
class Meta:
model = Address
fields = (
'id', 'line1', 'line2', 'city', 'state', 'country', 'postal_code')
SCHEME_PATTERN = r'^https?://'
class URLField(serializers.URLField):
""" URLField allowing absence of url scheme """
def to_internal_value(self, value):
""" Allow exclusion of http(s)://, add it if it's missing """
if not value:
return None
m = re.match(SCHEME_PATTERN, value)
if not m: # no scheme
value = "http://%s" % value
return value
| from HTMLParser import HTMLParser
import re
from moneyed import Money
from rest_framework import serializers
from .validators import validate_postal_code
from .models import Address, Language
class MoneySerializer(serializers.DecimalField):
def __init__(self, max_digits=12, decimal_places=2, **kwargs):
super(MoneySerializer, self).__init__(
max_digits=max_digits,
decimal_places=decimal_places,
**kwargs
)
def to_representation(self, instance):
return instance.amount
def to_internal_value(self, data):
if not data:
return data
return Money(data, 'EUR')
class MoneyTotalSerializer(serializers.ListField):
"""
Serialize money totals with multiple currencies, e.g.
[(450, 'EUR'), (23050, 'XEF')]
"""
child = MoneySerializer()
class ShareSerializer(serializers.Serializer):
share_name = serializers.CharField(max_length=256, required=True)
share_email = serializers.EmailField(required=True)
share_motivation = serializers.CharField(default="")
share_cc = serializers.BooleanField(default=False)
project = serializers.CharField(max_length=256, required=True)
class LanguageSerializer(serializers.ModelSerializer):
class Meta:
model = Language
fields = ('id', 'code', 'language_name', 'native_name')
class MLStripper(HTMLParser):
""" Used to strip HTML tags for meta fields (e.g. description) """
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
class AddressSerializer(serializers.ModelSerializer):
def validate_postal_code(self, attrs, source):
value = attrs[source]
if value:
country_code = ''
if 'country' in attrs:
country_code = attrs['country']
elif self.object and self.object.country:
country_code = self.object.country.alpha2_code
if country_code:
validate_postal_code(value, country_code)
return attrs
class Meta:
model = Address
fields = (
'id', 'line1', 'line2', 'city', 'state', 'country', 'postal_code')
SCHEME_PATTERN = r'^https?://'
class URLField(serializers.URLField):
""" URLField allowing absence of url scheme """
def to_internal_value(self, value):
""" Allow exclusion of http(s)://, add it if it's missing """
if not value:
return None
m = re.match(SCHEME_PATTERN, value)
if not m: # no scheme
value = "http://%s" % value
return value
| Python | 0.000001 |
d0d80c459bcac9b86fff146726e9e0e9ec788652 | fix some broken doctests | sympy/assumptions/assume.py | sympy/assumptions/assume.py | # doctests are disabled because of issue #1521
from sympy.core import Basic, Symbol
from sympy.core.relational import Relational
class AssumptionsContext(set):
"""Set representing assumptions.
This is used to represent global assumptions, but you can also use this
class to create your own local assumptions contexts. It is basically a thin
wrapper to Python's set, so see its documentation for advanced usage.
Examples:
>>> from sympy import *
>>> global_assumptions
AssumptionsContext()
>>> x = Symbol('x')
>>> global_assumptions.add(Assume(x, Q.real))
>>> global_assumptions
AssumptionsContext([Assume(x, 'real', True)])
>>> global_assumptions.remove(Assume(x, Q.real))
>>> global_assumptions
AssumptionsContext()
>>> global_assumptions.clear()
"""
def add(self, *assumptions):
"""Add an assumption."""
for a in assumptions:
assert isinstance(a, Assume), 'can only store instances of Assume'
super(AssumptionsContext, self).add(a)
global_assumptions = AssumptionsContext()
class Assume(Basic):
"""New-style assumptions.
>>> from sympy import *
>>> x = Symbol('x')
>>> Assume(x, Q.integer)
Assume(x, 'integer', True)
>>> Assume(x, Q.integer, False)
Assume(x, 'integer', False)
>>> Assume( x > 1 )
Assume(1 < x, 'relational', True)
"""
def __init__(self, expr, key='relational', value=True):
self._args = (expr, key, value)
is_Atom = True # do not attempt to decompose this
@property
def expr(self):
"""
Return the expression used by this assumption.
Examples:
>>> from sympy import *
>>> x = Symbol('x')
>>> a = Assume(x+1, Q.integer)
>>> a.expr
1 + x
"""
return self._args[0]
@property
def key(self):
"""
Return the key used by this assumption.
It is a string, e.g. 'integer', 'rational', etc.
Examples:
>>> from sympy import *
>>> x = Symbol('x')
>>> a = Assume(x, Q.integer)
>>> a.key
'integer'
"""
return self._args[1]
@property
def value(self):
"""
Return the value stored by this assumptions.
It's a boolean. True means that the assumption
holds always, and False means the assumption
does not hold
Examples:
>>> from sympy import *
>>> x = Symbol('x')
>>> a = Assume(x, Q.integer)
>>> a.value
True
>>> b = Assume(x, Q.integer, False)
>>> b.value
False
"""
return self._args[2]
def __eq__(self, other):
if type(other) == Assume:
return self._args == other._args
return False
def eliminate_assume(expr, symbol=None):
"""
Convert an expression with assumptions to an equivalent with all assumptions
replaced by symbols.
Assume(x, integer=True) --> integer
Assume(x, integer=False) --> ~integer
Examples:
>>> from sympy import *
>>> x = Symbol('x')
>>> eliminate_assume(Assume(x, Q.positive))
positive
>>> eliminate_assume(Assume(x, Q.positive, False))
Not(positive)
"""
if type(expr) == Assume:
if symbol is not None:
if not expr.expr.has(symbol): return
if expr.value: return Symbol(expr.key)
return ~Symbol(expr.key)
args = []
for a in expr.args:
args.append(eliminate_assume(a))
return type(expr)(*args)
| # doctests are disabled because of issue #1521
from sympy.core import Basic, Symbol
from sympy.core.relational import Relational
class AssumptionsContext(set):
"""Set representing assumptions.
This is used to represent global assumptions, but you can also use this
class to create your own local assumptions contexts. It is basically a thin
wrapper to Python's set, so see its documentation for advanced usage.
Examples:
>>> from sympy import *
>>> global_assumptions
set([])
>>> x = Symbol('x')
>>> global_assumptions.add(Assume(x, Q.real))
>>> global_assumptions
set([Assume(x, 'real', True)])
>>> global_assumptions.remove(Assume(x, Q.real))
>>> global_assumptions
set([])
>>> global_assumptions.clear()
"""
def add(self, *assumptions):
"""Add an assumption."""
for a in assumptions:
assert isinstance(a, Assume), 'can only store instances of Assume'
super(AssumptionsContext, self).add(a)
global_assumptions = AssumptionsContext()
class Assume(Basic):
"""New-style assumptions.
>>> from sympy import *
>>> x = Symbol('x')
>>> Assume(x, Q.integer)
Assume(x, 'integer', True)
>>> Assume(x, Q.integer, False)
Assume(x, 'integer', False)
>>> Assume( x > 1 )
Assume(1 < x, 'relational', True)
"""
def __init__(self, expr, key='relational', value=True):
self._args = (expr, key, value)
is_Atom = True # do not attempt to decompose this
@property
def expr(self):
"""
Return the expression used by this assumption.
Examples:
>>> from sympy import *
>>> x = Symbol('x')
>>> a = Assume(x+1, Q.integer)
>>> a.expr
1 + x
"""
return self._args[0]
@property
def key(self):
"""
Return the key used by this assumption.
It is a string, e.g. 'integer', 'rational', etc.
Examples:
>>> from sympy import *
>>> x = Symbol('x')
>>> a = Assume(x, Q.integer)
>>> a.key
'integer'
"""
return self._args[1]
@property
def value(self):
"""
Return the value stored by this assumptions.
It's a boolean. True means that the assumption
holds always, and False means the assumption
does not hold
Examples:
>>> from sympy import *
>>> x = Symbol('x')
>>> a = Assume(x, Q.integer)
>>> a.value
True
>>> b = Assume(x, Q.integer, False)
>>> b.value
False
"""
return self._args[2]
def __eq__(self, other):
if type(other) == Assume:
return self._args == other._args
return False
def eliminate_assume(expr, symbol=None):
"""
Convert an expression with assumptions to an equivalent with all assumptions
replaced by symbols.
Assume(x, integer=True) --> integer
Assume(x, integer=False) --> ~integer
Examples:
>>> from sympy import *
>>> x = Symbol('x')
>>> eliminate_assume(Assume(x, Q.positive))
positive
>>> eliminate_assume(Assume(x, Q.positive, False))
Not(positive)
"""
if type(expr) == Assume:
if symbol is not None:
if not expr.expr.has(symbol): return
if expr.value: return Symbol(expr.key)
return ~Symbol(expr.key)
args = []
for a in expr.args:
args.append(eliminate_assume(a))
return type(expr)(*args)
| Python | 0.000041 |
c1a71ff5f5a777bb9ea28b6109334067f186eb5a | add Q.infinity(Add(args)) <==> any(map(Q.infinity, args)) | sympy/assumptions/newask.py | sympy/assumptions/newask.py | from __future__ import print_function, division
from sympy.core import Basic, Mul, Add, Pow
from sympy.assumptions.assume import global_assumptions, AppliedPredicate
from sympy.logic.inference import satisfiable
from sympy.logic.boolalg import And, Implies, Equivalent, Or
from sympy.assumptions.ask import Q
from sympy.utilities.iterables import sift
def newask(proposition, assumptions=True, context=global_assumptions):
relevant_facts = get_all_relevant_facts(proposition, assumptions, context)
# TODO: Can this be faster to do it in one pass using xor?
can_be_true = satisfiable(And(proposition, assumptions,
relevant_facts, *context))
can_be_false = satisfiable(And(~proposition, assumptions,
relevant_facts, *context))
if can_be_true and can_be_false:
return None
if can_be_true and not can_be_false:
return True
if not can_be_true and can_be_false:
return False
if not can_be_true and not can_be_false:
# TODO: Run additional checks to see which combination of the
# assumptions, global_assumptions, and relevant_facts are
# inconsistent.
raise ValueError("Inconsistent assumptions")
equiv_any_args = set(((Q.zero, Mul),
(Q.infinity, Add)))
def get_relevant_facts(proposition, assumptions=True, context=global_assumptions):
keys = proposition.atoms(AppliedPredicate)
if isinstance(assumptions, Basic):
# XXX: We need this since True/False are not Basic
keys |= assumptions.atoms(AppliedPredicate)
if context:
keys |= And(*context).atoms(AppliedPredicate)
relevant_facts = True
keys_by_predicate = sift(keys, lambda ap: ap.func)
# TODO: Write this in a more scalable and extendable way
# To keep things straight, for implications, only worry about the
# Implies(key, Q.something(key.args[0])) fact.
for key in keys_by_predicate[Q.positive]:
relevant_facts &= Implies(key, Q.real(key.args[0]))
for key in keys_by_predicate[Q.zero]:
relevant_facts &= Equivalent(key, ~Q.nonzero(key.args[0]))
relevant_facts &= Implies(key, ~Q.positive(key.args[0]))
relevant_facts &= Implies(key, Q.real(key.args[0]))
# Now for something interesting...
if isinstance(key.args[0], Pow):
relevant_facts &= Implies(key, Q.zero(key.args[0].base))
relevant_facts &= Implies(And(Q.zero(key.args[0].base),
Q.positive(key.args[0].exp)), key)
for key in keys_by_predicate[Q.nonzero]:
relevant_facts &= Equivalent(key, ~Q.zero(key.args[0]))
if isinstance(key.args[0], Add):
relevant_facts &= Implies(And(*[Q.positive(i) for i in
key.args[0].args]), key)
for key in keys:
predicate = key.func
expr = key.args[0]
if (predicate, type(expr)) in equiv_any_args:
relevant_facts &= Equivalent(key, Or(*map(predicate, expr.args)))
return relevant_facts
def get_all_relevant_facts(proposition, assumptions=True, context=global_assumptions):
# The relevant facts might introduce new keys, e.g., Q.zero(x*y) will
# introduce the keys Q.zero(x) and Q.zero(y), so we need to run it until
# we stop getting new things. Hopefully this strategy won't lead to an
# infinite loop in the future.
relevant_facts = True
old_relevant_facts = False
while relevant_facts != old_relevant_facts:
old_relevant_facts, relevant_facts = (relevant_facts,
get_relevant_facts(proposition, assumptions & relevant_facts,
context))
return relevant_facts
| from __future__ import print_function, division
from sympy.core import Basic, Mul, Add, Pow
from sympy.assumptions.assume import global_assumptions, AppliedPredicate
from sympy.logic.inference import satisfiable
from sympy.logic.boolalg import And, Implies, Equivalent, Or
from sympy.assumptions.ask import Q
from sympy.utilities.iterables import sift
def newask(proposition, assumptions=True, context=global_assumptions):
relevant_facts = get_all_relevant_facts(proposition, assumptions, context)
# TODO: Can this be faster to do it in one pass using xor?
can_be_true = satisfiable(And(proposition, assumptions,
relevant_facts, *context))
can_be_false = satisfiable(And(~proposition, assumptions,
relevant_facts, *context))
if can_be_true and can_be_false:
return None
if can_be_true and not can_be_false:
return True
if not can_be_true and can_be_false:
return False
if not can_be_true and not can_be_false:
# TODO: Run additional checks to see which combination of the
# assumptions, global_assumptions, and relevant_facts are
# inconsistent.
raise ValueError("Inconsistent assumptions")
equiv_any_args = set(((Q.zero, Mul),))
def get_relevant_facts(proposition, assumptions=True, context=global_assumptions):
keys = proposition.atoms(AppliedPredicate)
if isinstance(assumptions, Basic):
# XXX: We need this since True/False are not Basic
keys |= assumptions.atoms(AppliedPredicate)
if context:
keys |= And(*context).atoms(AppliedPredicate)
relevant_facts = True
keys_by_predicate = sift(keys, lambda ap: ap.func)
# TODO: Write this in a more scalable and extendable way
# To keep things straight, for implications, only worry about the
# Implies(key, Q.something(key.args[0])) fact.
for key in keys_by_predicate[Q.positive]:
relevant_facts &= Implies(key, Q.real(key.args[0]))
for key in keys_by_predicate[Q.zero]:
relevant_facts &= Equivalent(key, ~Q.nonzero(key.args[0]))
relevant_facts &= Implies(key, ~Q.positive(key.args[0]))
relevant_facts &= Implies(key, Q.real(key.args[0]))
# Now for something interesting...
if isinstance(key.args[0], Pow):
relevant_facts &= Implies(key, Q.zero(key.args[0].base))
relevant_facts &= Implies(And(Q.zero(key.args[0].base),
Q.positive(key.args[0].exp)), key)
for key in keys_by_predicate[Q.nonzero]:
relevant_facts &= Equivalent(key, ~Q.zero(key.args[0]))
if isinstance(key.args[0], Add):
relevant_facts &= Implies(And(*[Q.positive(i) for i in
key.args[0].args]), key)
for key in keys:
predicate = key.func
expr = key.args[0]
if (predicate, type(expr)) in equiv_any_args:
relevant_facts &= Equivalent(key, Or(*map(predicate, expr.args)))
return relevant_facts
def get_all_relevant_facts(proposition, assumptions=True, context=global_assumptions):
# The relevant facts might introduce new keys, e.g., Q.zero(x*y) will
# introduce the keys Q.zero(x) and Q.zero(y), so we need to run it until
# we stop getting new things. Hopefully this strategy won't lead to an
# infinite loop in the future.
relevant_facts = True
old_relevant_facts = False
while relevant_facts != old_relevant_facts:
old_relevant_facts, relevant_facts = (relevant_facts,
get_relevant_facts(proposition, assumptions & relevant_facts,
context))
return relevant_facts
| Python | 0.000029 |
50b19958b531cd94b537f3d911ce9b0c0b7f1ea2 | add ordereddictionary to store information about file .rooms loaded | trunk/editor/structdata/project.py | trunk/editor/structdata/project.py | #!/usr/bin/env python
try:
from collections import OrderedDict
except ImportError:
from misc.dict import OrderedDict
from subject import Subject
class Project(Subject):
def __init__(self):
super(Project, self).__init__()
self.data = OrderedDict()
self.data['world'] = None
self.data['images'] = {}
self.data['items'] = OrderedDict()
self.data['vars'] = {}
self.data['events'] = OrderedDict()
self.data['rooms'] = OrderedDict()
g_project = Project()
| #!/usr/bin/env python
try:
from collections import OrderedDict
except ImportError:
from misc.dict import OrderedDict
from subject import Subject
class Project(Subject):
def __init__(self):
super(Project, self).__init__()
self.informations = None
self.images = {}
self.items = OrderedDict()
self.vars = {}
self.events = OrderedDict()
self.rooms = OrderedDict()
self.selected_room = None
g_project = Project()
| Python | 0 |
01e9fa344259faa6eeb7f0480975547d375e132f | add function to change and image. The function remove the image from the dictionary and add an image with the new key and new path to file | trunk/editor/structdata/project.py | trunk/editor/structdata/project.py | #!/usr/bin/env python
from misc.odict import OrderedDict
from subject import Subject
class Project(Subject):
def __init__(self):
super(Project, self).__init__()
self.data = OrderedDict()
self.data['world'] = None
self.data['images'] = {}
self.data['items'] = OrderedDict()
self.data['vars'] = {}
self.data['events'] = OrderedDict()
self.data['rooms'] = OrderedDict()
def changeImage(self, old_image, new_image):
image = self.data['images'].pop(old_image)
image.file = new_image
self.data['images'][new_image] = image
self.notify()
def changeEventName(self, old_name, new_name):
event = self.data['events'].pop(old_name)
event.setName(new_name)
self.data['events'][event.id] = event
self.notify()
def changeStartRoom(self, new_start_room_name):
self.data['world'].start = new_start_room_name
self.notify()
def changeRoomName(self, old_room_name, new_room_name):
room = self.data['rooms'].pop(old_room_name)
self.data['rooms'][new_room_name] = room
if self.data['world'].start == old_room_name:
self.changeStartRoom(new_room_name)
room.setName(new_room_name)
self.notify()
def removeRoom(self, room_name):
self.data['rooms'].pop(room_name)
if self.data['world'].start == room_name:
if g_project.data['rooms']:
new_start_room_name = g_project.data['rooms'].keys()[0]
else:
new_start_room_name = ""
self.changeStartRoom(new_start_room_name)
self.notify()
g_project = Project()
| #!/usr/bin/env python
from misc.odict import OrderedDict
from subject import Subject
class Project(Subject):
def __init__(self):
super(Project, self).__init__()
self.data = OrderedDict()
self.data['world'] = None
self.data['images'] = {}
self.data['items'] = OrderedDict()
self.data['vars'] = {}
self.data['events'] = OrderedDict()
self.data['rooms'] = OrderedDict()
def changeEventName(self, old_name, new_name):
event = self.data['events'].pop(old_name)
event.setName(new_name)
self.data['events'][event.id] = event
self.notify()
def changeStartRoom(self, new_start_room_name):
self.data['world'].start = new_start_room_name
self.notify()
def changeRoomName(self, old_room_name, new_room_name):
room = self.data['rooms'].pop(old_room_name)
self.data['rooms'][new_room_name] = room
if self.data['world'].start == old_room_name:
self.changeStartRoom(new_room_name)
room.setName(new_room_name)
self.notify()
def removeRoom(self, room_name):
self.data['rooms'].pop(room_name)
if self.data['world'].start == room_name:
if g_project.data['rooms']:
new_start_room_name = g_project.data['rooms'].keys()[0]
else:
new_start_room_name = ""
self.changeStartRoom(new_start_room_name)
self.notify()
g_project = Project()
| Python | 0.000002 |
b04693387be08c1ead880d0e7472026ed76dad80 | Fix django.conf.urls.defaults imports | openstack_auth/urls.py | openstack_auth/urls.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls import patterns, url
from .utils import patch_middleware_get_user
patch_middleware_get_user()
urlpatterns = patterns('openstack_auth.views',
url(r"^login/$", "login", name='login'),
url(r"^logout/$", 'logout', name='logout'),
url(r'^switch/(?P<tenant_id>[^/]+)/$', 'switch', name='switch_tenants'),
url(r'^switch_services_region/(?P<region_name>[^/]+)/$', 'switch_region',
name='switch_services_region')
)
| # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls.defaults import patterns, url
from .utils import patch_middleware_get_user
patch_middleware_get_user()
urlpatterns = patterns('openstack_auth.views',
url(r"^login/$", "login", name='login'),
url(r"^logout/$", 'logout', name='logout'),
url(r'^switch/(?P<tenant_id>[^/]+)/$', 'switch', name='switch_tenants'),
url(r'^switch_services_region/(?P<region_name>[^/]+)/$', 'switch_region',
name='switch_services_region')
)
| Python | 0.003868 |
78705f598e7e3325e871bd17ff353a31c71bc399 | Extend all admin form to Container Admin Form (json field) | opps/articles/forms.py | opps/articles/forms.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from opps.core.widgets import OppsEditor
from opps.containers.forms import ContainerAdminForm
from .models import Post, Album, Link
class PostAdminForm(ContainerAdminForm):
multiupload_link = '/fileupload/image/'
class Meta:
model = Post
widgets = {'content': OppsEditor()}
class AlbumAdminForm(ContainerAdminForm):
multiupload_link = '/fileupload/image/'
class Meta:
model = Album
widgets = {
'headline': OppsEditor()
}
class LinkAdminForm(ContainerAdminForm):
class Meta:
model = Link
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django import forms
from .models import Post, Album, Link
from opps.core.widgets import OppsEditor
from opps.db.models.fields.jsonf import JSONFormField
from opps.fields.widgets import JSONField
from opps.fields.models import Field, FieldOption
class PostAdminForm(forms.ModelForm):
json = JSONFormField(widget=JSONField(attrs={'_model': 'Post'}), required=False)
multiupload_link = '/fileupload/image/'
def __init__(self, *args, **kwargs):
super(PostAdminForm, self).__init__(*args, **kwargs)
for field in Field.objects.filter(
application__contains=self._meta.model.__name__):
for fo in FieldOption.objects.filter(field=field):
self.fields[
'json_{}_{}'.format(
field.slug, fo.option.slug
)] = forms.CharField(required=False)
class Meta:
model = Post
widgets = {'content': OppsEditor()}
class AlbumAdminForm(forms.ModelForm):
multiupload_link = '/fileupload/image/'
class Meta:
model = Album
widgets = {
'headline': OppsEditor()
}
class LinkAdminForm(forms.ModelForm):
class Meta:
model = Link
| Python | 0 |
67a7a3f5bc05265690a831dea7c4310af66870a8 | add channel obj on set_context_data * long_slug * level | opps/articles/utils.py | opps/articles/utils.py | # -*- coding: utf-8 -*-
from django.utils import timezone
from opps.articles.models import ArticleBox, Article
def set_context_data(self, SUPER, **kwargs):
context = super(SUPER, self).get_context_data(**kwargs)
article = Article.objects.filter(
site=self.site,
channel_long_slug__in=self.channel_long_slug,
date_available__lte=timezone.now(),
published=True)
context['posts'] = article.filter(child_class='Post')[:self.limit]
context['albums'] = article.filter(child_class='Album')[:self.limit]
context['channel'] = {}
context['channel']['long_slug'] = self.long_slug
if self.channel:
context['channel']['level'] = self.channel.get_level()
context['articleboxes'] = ArticleBox.objects.filter(
channel__long_slug=self.long_slug)
if self.slug:
context['articleboxes'] = context['articleboxes'].filter(
article__slug=self.slug)
return context
| # -*- coding: utf-8 -*-
from django.utils import timezone
from opps.articles.models import ArticleBox, Article
def set_context_data(self, SUPER, **kwargs):
context = super(SUPER, self).get_context_data(**kwargs)
article = Article.objects.filter(
site=self.site,
channel_long_slug__in=self.channel_long_slug,
date_available__lte=timezone.now(),
published=True)
context['posts'] = article.filter(child_class='Post')[:self.limit]
context['albums'] = article.filter(child_class='Album')[:self.limit]
context['channel_long_slug'] = self.long_slug
context['articleboxes'] = ArticleBox.objects.filter(
channel__long_slug=self.long_slug)
if self.slug:
context['articleboxes'] = context['articleboxes'].filter(
article__slug=self.slug)
return context
| Python | 0.000001 |
5ad21e185cf1984eb0a068387fdd1d73a4a56d15 | Create get context data, set template var opps_channel and opps_channel_conf issue #47 | opps/articles/views.py | opps/articles/views.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.contrib.sites.models import get_current_site
from django.core.paginator import Paginator, InvalidPage
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from django.shortcuts import get_object_or_404
from django.utils import timezone
from django.http import Http404
from django.conf import settings
from haystack.views import SearchView
from .models import Post
from opps.channels.models import Channel
def set_context_data(self, SUPER, **kwargs):
context = super(SUPER, self).get_context_data(**kwargs)
article = self.article.get()
context['opps_channel'] = article.channel
context['opps_channel_conf'] = settings.OPPS_CHANNEL_CONF\
.get(article.channel.slug, '')
return context
class OppsList(ListView):
context_object_name = "context"
def get_context_data(self, **kwargs):
return set_context_data(self, OppsList, **kwargs)
@property
def template_name(self):
homepage = Channel.objects.get_homepage(site=self.site)
if not homepage:
return None
long_slug = self.kwargs.get('channel__long_slug',
homepage.long_slug)
if homepage.long_slug != long_slug:
long_slug = long_slug[:-1]
domain_folder = 'channels'
if self.site.id > 1:
domain_folder = "{0}/channels".format(self.site)
return '{0}/{1}.html'.format(domain_folder, long_slug)
@property
def queryset(self):
self.site = get_current_site(self.request)
if not self.kwargs.get('channel__long_slug'):
return Post.objects.filter(channel__homepage=True,
site=self.site,
date_available__lte=timezone.now(),
published=True).all()
long_slug = self.kwargs['channel__long_slug'][:-1]
get_object_or_404(Channel, site=self.site, long_slug=long_slug,
date_available__lte=timezone.now(), published=True)
self.article = Post.objects.filter(site=self.site,
channel__long_slug=long_slug,
date_available__lte=timezone.now(),
published=True).all()
return self.article
class OppsDetail(DetailView):
context_object_name = "context"
def get_context_data(self, **kwargs):
return set_context_data(self, OppsDetail, **kwargs)
@property
def template_name(self):
homepage = Channel.objects.get_homepage(site=self.site)
if not homepage:
return None
long_slug = self.kwargs.get('channel__long_slug', homepage.long_slug)
domain_folder = 'articles'
if self.site.id > 1:
domain_folder = "{0}/articles".format(self.site)
return '{0}/{1}.html'.format(domain_folder,
long_slug)
@property
def queryset(self):
self.site = get_current_site(self.request)
homepage = Channel.objects.get_homepage(site=self.site)
slug = None
if homepage:
slug = homepage.long_slug
long_slug = self.kwargs.get('channel__long_slug', slug)
self.article = Post.objects.filter(site=self.site,
channel__long_slug=long_slug,
slug=self.kwargs['slug'],
date_available__lte=timezone.now(),
published=True).all()
return self.article
class Search(SearchView):
def get_results(self):
return self.form.search().order_by('-date_available')
def build_page(self):
paginator = Paginator(self.results, self.results_per_page)
try:
paginator.page(int(self.request.GET.get('page', 1)))
except InvalidPage:
raise Http404("No such page!")
return (None, self.results)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.contrib.sites.models import get_current_site
from django.core.paginator import Paginator, InvalidPage
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from django.shortcuts import get_object_or_404
from django.utils import timezone
from django.http import Http404
from haystack.views import SearchView
from .models import Post
from opps.channels.models import Channel
class OppsList(ListView):
context_object_name = "context"
@property
def template_name(self):
homepage = Channel.objects.get_homepage(site=self.site)
if not homepage:
return None
long_slug = self.kwargs.get('channel__long_slug',
homepage.long_slug)
if homepage.long_slug != long_slug:
long_slug = long_slug[:-1]
domain_folder = 'channels'
if self.site.id > 1:
domain_folder = "{0}/channels".format(self.site)
return '{0}/{1}.html'.format(domain_folder, long_slug)
@property
def queryset(self):
self.site = get_current_site(self.request)
if not self.kwargs.get('channel__long_slug'):
return Post.objects.filter(channel__homepage=True,
site=self.site,
date_available__lte=timezone.now(),
published=True).all()
long_slug = self.kwargs['channel__long_slug'][:-1]
get_object_or_404(Channel, site=self.site, long_slug=long_slug,
date_available__lte=timezone.now(), published=True)
return Post.objects.filter(site=self.site,
channel__long_slug=long_slug,
date_available__lte=timezone.now(),
published=True).all()
class OppsDetail(DetailView):
context_object_name = "context"
@property
def template_name(self):
homepage = Channel.objects.get_homepage(site=self.site)
if not homepage:
return None
long_slug = self.kwargs.get('channel__long_slug', homepage.long_slug)
domain_folder = 'articles'
if self.site.id > 1:
domain_folder = "{0}/articles".format(self.site)
return '{0}/{1}.html'.format(domain_folder,
long_slug)
@property
def queryset(self):
self.site = get_current_site(self.request)
homepage = Channel.objects.get_homepage(site=self.site)
slug = None
if homepage:
slug = homepage.long_slug
long_slug = self.kwargs.get('channel__long_slug', slug)
return Post.objects.filter(site=self.site,
channel__long_slug=long_slug,
slug=self.kwargs['slug'],
date_available__lte=timezone.now(),
published=True).all()
class Search(SearchView):
def get_results(self):
return self.form.search().order_by('-date_available')
def build_page(self):
paginator = Paginator(self.results, self.results_per_page)
try:
paginator.page(int(self.request.GET.get('page', 1)))
except InvalidPage:
raise Http404("No such page!")
return (None, self.results)
| Python | 0 |
9a83ec4c80bec0cec45904a8998cd82a99a9b1b2 | Save `resources` as extra data in its entirety | social_core/backends/atlassian.py | social_core/backends/atlassian.py | from social_core.backends.oauth import BaseOAuth2
class AtlassianOAuth2(BaseOAuth2):
name = 'atlassian'
AUTHORIZATION_URL = 'https://accounts.atlassian.com/authorize'
ACCESS_TOKEN_METHOD = 'POST'
ACCESS_TOKEN_URL = 'https://api.atlassian.com/oauth/token'
DEFAULT_SCOPE = ['read:jira-user', 'offline_access']
ID_KEY = 'accountId'
EXTRA_DATA = [
('resources', 'resources'),
('refresh_token', 'refresh_token'),
('expires_in', 'expires_in'),
]
def auth_params(self, state=None):
params = super(AtlassianOAuth2, self).auth_params(state)
params.update({'audience': 'api.atlassian.com',
'prompt': 'consent'})
return params
def get_user_details(self, response):
fullname, first_name, last_name = self.get_user_names(response['displayName'])
return {'username': response['name'],
'email': response['emailAddress'],
'fullname': fullname,
'first_name': first_name,
'last_name': last_name}
def user_data(self, access_token, *args, **kwargs):
resources = self.get_json('https://api.atlassian.com/oauth/token/accessible-resources',
headers={'Authorization': 'Bearer {}'.format(access_token)})
user_info = self.get_json('https://api.atlassian.com/ex/jira/{}/rest/api/2/myself'.format(resources[0]['id']),
headers={'Authorization': 'Bearer {}'.format(access_token)})
user_info['resources'] = resources
return user_info
| from social_core.backends.oauth import BaseOAuth2
class AtlassianOAuth2(BaseOAuth2):
name = 'atlassian'
AUTHORIZATION_URL = 'https://accounts.atlassian.com/authorize'
ACCESS_TOKEN_METHOD = 'POST'
ACCESS_TOKEN_URL = 'https://api.atlassian.com/oauth/token'
DEFAULT_SCOPE = ['read:jira-user', 'offline_access']
ID_KEY = 'accountId'
EXTRA_DATA = [
('resource_ids', 'resource_ids'),
('refresh_token', 'refresh_token'),
('expires_in', 'expires_in'),
]
def auth_params(self, state=None):
params = super(AtlassianOAuth2, self).auth_params(state)
params.update({'audience': 'api.atlassian.com',
'prompt': 'consent'})
return params
def get_user_details(self, response):
fullname, first_name, last_name = self.get_user_names(response['displayName'])
return {'username': response['name'],
'email': response['emailAddress'],
'fullname': fullname,
'first_name': first_name,
'last_name': last_name}
def user_data(self, access_token, *args, **kwargs):
resources = self.get_json('https://api.atlassian.com/oauth/token/accessible-resources',
headers={'Authorization': 'Bearer {}'.format(access_token)})
resource_ids = [resource['id'] for resource in resources]
user_info = self.get_json('https://api.atlassian.com/ex/jira/{}/rest/api/2/myself'.format(resource_ids[0]),
headers={'Authorization': 'Bearer {}'.format(access_token)})
user_info['resource_ids'] = resource_ids
return user_info
| Python | 0.000001 |
ad1fe8f7f636d8bf5bb92599b37ac8aa7849596e | Add small test | tests/grab_transport.py | tests/grab_transport.py | import pickle
import os
import sys
from test_server import Response
from tests.util import BaseGrabTestCase, only_grab_transport, temp_dir
from grab import Grab
from grab.error import GrabMisuseError
FAKE_TRANSPORT_CODE = """
from grab.transport.curl import CurlTransport
class FakeTransport(CurlTransport):
pass
"""
def get_fake_transport_class():
from grab.transport.curl import ( # pylint: disable=import-outside-toplevel
CurlTransport,
)
class FakeTransport(CurlTransport):
pass
return FakeTransport
def get_fake_transport_instance():
return get_fake_transport_class()()
def get_curl_transport_instance():
from grab.transport.curl import ( # pylint: disable=import-outside-toplevel
CurlTransport,
)
return CurlTransport()
class TestTransportTestCase(BaseGrabTestCase):
def assert_transport_response(self, transport, response):
self.server.add_response(Response(data=response), count=2)
grab = Grab(transport=transport)
grab.go(self.server.get_url())
self.assertEqual(grab.doc.body, response)
grab2 = grab.clone()
grab2.go(self.server.get_url())
self.assertEqual(grab2.doc.body, response)
def assert_transport_pickle(self, transport, response):
grab = Grab(transport=transport)
grab2 = grab.clone()
grab2_data = pickle.dumps(grab2, pickle.HIGHEST_PROTOCOL)
grab3 = pickle.loads(grab2_data)
grab3.go(self.server.get_url())
self.assertEqual(grab3.doc.body, response)
@only_grab_transport("pycurl")
def test_transport_option_as_string_curl(self):
self.assert_transport_response("grab.transport.curl.CurlTransport", b"XYZ")
@only_grab_transport("pycurl")
def test_transport_option_as_string_fake(self):
with temp_dir() as dir_:
sys.path.insert(0, dir_)
with open(os.path.join(dir_, "foo.py"), "w", encoding="utf-8") as out:
out.write(FAKE_TRANSPORT_CODE)
self.assert_transport_response("foo.FakeTransport", b"XYZ")
sys.path.remove(dir_)
@only_grab_transport("pycurl")
def test_transport_option_as_class_curl(self):
from grab.transport.curl import ( # pylint: disable=import-outside-toplevel
CurlTransport,
)
self.assert_transport_response(CurlTransport, b"XYZ")
@only_grab_transport("pycurl")
def test_transport_option_as_class_fake(self):
fake_transport_cls = get_fake_transport_class()
self.assert_transport_response(fake_transport_cls, b"XYZ")
@only_grab_transport("pycurl")
def test_transport_option_as_function_curl(self):
self.assert_transport_response(get_curl_transport_instance, b"XYZ")
@only_grab_transport("pycurl")
def test_transport_option_as_function_fake(self):
self.assert_transport_response(get_fake_transport_instance, b"XYZ")
def test_invalid_transport_invalid_alias(self):
with self.assertRaises(GrabMisuseError):
Grab(transport="zzzzzzzzzz").go(self.server.get_url())
def test_invalid_transport_invalid_path(self):
# AttributeError comes from setup_transport method
with self.assertRaises(AttributeError):
Grab(transport="tests.grab_transport.ZZZ").go(self.server.get_url())
def test_invalid_transport_not_collable_or_string(self):
with self.assertRaises(GrabMisuseError):
Grab(transport=13).go(self.server.get_url())
def test_setup_transport_twice(self):
transport = "grab.transport.curl.CurlTransport"
grab = Grab()
grab.setup_transport(transport)
with self.assertRaises(GrabMisuseError) as ex:
grab.setup_transport(transport)
self.assertTrue("Transport is already set up" in str(ex.exception))
| import pickle
import os
import sys
from test_server import Response
from tests.util import BaseGrabTestCase, only_grab_transport, temp_dir
from grab import Grab
from grab.error import GrabMisuseError
FAKE_TRANSPORT_CODE = """
from grab.transport.curl import CurlTransport
class FakeTransport(CurlTransport):
pass
"""
def get_fake_transport_class():
from grab.transport.curl import ( # pylint: disable=import-outside-toplevel
CurlTransport,
)
class FakeTransport(CurlTransport):
pass
return FakeTransport
def get_fake_transport_instance():
return get_fake_transport_class()()
def get_curl_transport_instance():
from grab.transport.curl import ( # pylint: disable=import-outside-toplevel
CurlTransport,
)
return CurlTransport()
class TestTransportTestCase(BaseGrabTestCase):
def assert_transport_response(self, transport, response):
self.server.add_response(Response(data=response), count=2)
grab = Grab(transport=transport)
grab.go(self.server.get_url())
self.assertEqual(grab.doc.body, response)
grab2 = grab.clone()
grab2.go(self.server.get_url())
self.assertEqual(grab2.doc.body, response)
def assert_transport_pickle(self, transport, response):
grab = Grab(transport=transport)
grab2 = grab.clone()
grab2_data = pickle.dumps(grab2, pickle.HIGHEST_PROTOCOL)
grab3 = pickle.loads(grab2_data)
grab3.go(self.server.get_url())
self.assertEqual(grab3.doc.body, response)
@only_grab_transport("pycurl")
def test_transport_option_as_string_curl(self):
self.assert_transport_response("grab.transport.curl.CurlTransport", b"XYZ")
@only_grab_transport("pycurl")
def test_transport_option_as_string_fake(self):
with temp_dir() as dir_:
sys.path.insert(0, dir_)
with open(os.path.join(dir_, "foo.py"), "w", encoding="utf-8") as out:
out.write(FAKE_TRANSPORT_CODE)
self.assert_transport_response("foo.FakeTransport", b"XYZ")
sys.path.remove(dir_)
@only_grab_transport("pycurl")
def test_transport_option_as_class_curl(self):
from grab.transport.curl import ( # pylint: disable=import-outside-toplevel
CurlTransport,
)
self.assert_transport_response(CurlTransport, b"XYZ")
@only_grab_transport("pycurl")
def test_transport_option_as_class_fake(self):
fake_transport_cls = get_fake_transport_class()
self.assert_transport_response(fake_transport_cls, b"XYZ")
@only_grab_transport("pycurl")
def test_transport_option_as_function_curl(self):
self.assert_transport_response(get_curl_transport_instance, b"XYZ")
@only_grab_transport("pycurl")
def test_transport_option_as_function_fake(self):
self.assert_transport_response(get_fake_transport_instance, b"XYZ")
def test_invalid_transport_invalid_alias(self):
with self.assertRaises(GrabMisuseError):
Grab(transport="zzzzzzzzzz").go(self.server.get_url())
def test_invalid_transport_invalid_path(self):
# AttributeError comes from setup_transport method
with self.assertRaises(AttributeError):
Grab(transport="tests.grab_transport.ZZZ").go(self.server.get_url())
def test_invalid_transport_not_collable_or_string(self):
with self.assertRaises(GrabMisuseError):
Grab(transport=13).go(self.server.get_url())
| Python | 0.00005 |
e66178cc0521426036d4c9166bf76e9379bc62ef | disable Run tests temporarily | cloudrun/tests.py | cloudrun/tests.py | import pytest
import uuid
from .cloudrun import Cloudrun
from .run import Run
token = uuid.uuid4().hex
id = uuid.uuid4().hex
def test_cloudrun_init():
assert type(Cloudrun(token)) is Cloudrun
assert Cloudrun(token).token == token
#def test_run_init():
# assert type(Run(token,id)) is Run
# assert Run(token,id).token == token
# assert Run(token,id).id == id
#def test_cloudrun_get_run_returns_run():
# assert type(Cloudrun(token).get_run(id)) is Run
| import pytest
import uuid
from .cloudrun import Cloudrun
from .run import Run
token = uuid.uuid4().hex
id = uuid.uuid4().hex
def test_cloudrun_init():
assert type(Cloudrun(token)) is Cloudrun
assert Cloudrun(token).token == token
def test_run_init():
assert type(Run(token,id)) is Run
assert Run(token,id).token == token
assert Run(token,id).id == id
def test_cloudrun_get_run_returns_run():
assert type(Cloudrun(token).get_run(id)) is Run
| Python | 0.000001 |
5af29cfa071360265b1c31538f89e806ae4eabc4 | Fix #142: Testrunner and SOUTH_TESTS_MIGRATE broken on 1.1. | south/management/commands/test.py | south/management/commands/test.py | from django.core import management
from django.core.management.commands import test
from django.core.management.commands import syncdb
from django.conf import settings
from syncdb import Command as SyncDbCommand
class MigrateAndSyncCommand(SyncDbCommand):
option_list = SyncDbCommand.option_list
for opt in option_list:
if "--migrate" == opt.get_opt_string():
opt.default = True
break
class Command(test.Command):
def handle(self, *args, **kwargs):
management.get_commands()
if not hasattr(settings, "SOUTH_TESTS_MIGRATE") or not settings.SOUTH_TESTS_MIGRATE:
# point at the core syncdb command when creating tests
# tests should always be up to date with the most recent model structure
management._commands['syncdb'] = 'django.core'
else:
management._commands['syncdb'] = MigrateAndSyncCommand()
super(Command, self).handle(*args, **kwargs) | from django.core import management
from django.core.management.commands import test
from django.core.management.commands import syncdb
from django.conf import settings
class Command(test.Command):
def handle(self, *args, **kwargs):
if not hasattr(settings, "SOUTH_TESTS_MIGRATE") or not settings.SOUTH_TESTS_MIGRATE:
# point at the core syncdb command when creating tests
# tests should always be up to date with the most recent model structure
management.get_commands()
management._commands['syncdb'] = 'django.core'
super(Command, self).handle(*args, **kwargs) | Python | 0.000001 |
39874a0ddb65582a04ea32fa2b05bacc968f56f3 | Update max-chunks-to-make-sorted-ii.py | Python/max-chunks-to-make-sorted-ii.py | Python/max-chunks-to-make-sorted-ii.py | # Time: O(nlogn)
# Space: O(n)
# This question is the same as "Max Chunks to Make Sorted"
# except the integers of the given array are not necessarily distinct,
# the input array could be up to length 2000, and the elements could be up to 10**8.
#
# Given an array arr of integers (not necessarily distinct),
# we split the array into some number of "chunks" (partitions),
# and individually sort each chunk.
# After concatenating them, the result equals the sorted array.
#
# What is the most number of chunks we could have made?
#
# Example 1:
#
# Input: arr = [5,4,3,2,1]
# Output: 1
# Explanation:
# Splitting into two or more chunks will not return the required result.
# For example, splitting into [5, 4], [3, 2, 1] will result in [4, 5, 1, 2, 3], which isn't sorted.
# Example 2:
#
# Input: arr = [2,1,3,4,4]
# Output: 4
# Explanation:
# We can split into two chunks, such as [2, 1], [3, 4, 4].
# However, splitting into [2, 1], [3], [4], [4] is the highest number of chunks possible.
#
# Note:
# - arr will have length in range [1, 2000].
# - arr[i] will be an integer in range [0, 10**8].
class Solution(object):
def maxChunksToSorted(self, arr):
"""
:type arr: List[int]
:rtype: int
"""
def compare(i1, i2):
return arr[i1]-arr[i2] if arr[i1] != arr[i2] else i1-i2
idxs = [i for i in xrange(len(arr))]
result, max_i = 0, 0
for i, v in enumerate(sorted(idxs, cmp=compare)):
max_i = max(max_i, v)
if max_i == i:
result += 1
return result
| # Time: O(nlogn)
# Space: O(n)
class Solution(object):
def maxChunksToSorted(self, arr):
"""
:type arr: List[int]
:rtype: int
"""
def compare(i1, i2):
return arr[i1]-arr[i2] if arr[i1] != arr[i2] else i1-i2
idxs = [i for i in xrange(len(arr))]
result, max_i = 0, 0
for i, v in enumerate(sorted(idxs, cmp=compare)):
max_i = max(max_i, v)
if max_i == i:
result += 1
return result
| Python | 0.000001 |
5c20418b8e5f6dc033d1a7c515d30d5e9b026db5 | Fix sampleproject view | sampleproject/bot/views.py | sampleproject/bot/views.py | from django.shortcuts import render
from django.conf import settings
from django_telegrambot.apps import DjangoTelegramBot
# Create your views here.
def index(request):
bot_list = DjangoTelegramBot.bots
context = {'bot_list': bot_list, 'update_mode':settings.DJANGO_TELEGRAMBOT['MODE']}
return render(request, 'bot/index.html', context)
| from django.shortcuts import render
from django.conf import settings
from django_telegrambot.apps import DjangoTelegramBot
# Create your views here.
def index(request):
bot_list = DjangoTelegramBot.bots
context = {'bot_list': bot_list, 'update_mode':settings.TELEGRAM_BOT_MODE}
return render(request, 'bot/index.html', context)
| Python | 0.000001 |
f0f31ea0a86620b77073b5da0dca386b337b98da | update prop2part tests | tests/prop2part_test.py | tests/prop2part_test.py | #!/usr/bin/env python
"""
Tests for abstract.prop2partition
"""
from tulip.abstract import prop2part
import tulip.polytope as pc
import numpy as np
def prop2part_test():
state_space = pc.Polytope.from_box(np.array([[0., 2.],[0., 2.]]))
cont_props = []
A = []
b = []
A.append(np.array([[1., 0.],
[-1., 0.],
[0., 1.],
[0., -1.]]))
b.append(np.array([[.5, 0., .5, 0.]]).T)
cont_props.append(pc.Polytope(A[0], b[0]))
A.append(np.array([[1., 0.],
[-1., 0.],
[0., 1.],
[0., -1.]]))
b.append(np.array([[2., -1.5, 2., -1.5]]).T)
cont_props.append(pc.Polytope(A[1], b[1]))
cont_props_dict = {"C"+str(i) : pc.Polytope(A[i], b[i]) for i in range(2)}
mypartition = prop2part(state_space, cont_props_dict)
print(mypartition)
ref_adjacency = np.array([[1,0,1],[0,1,1],[1,1,1]])
assert np.all(mypartition.adj.todense() == ref_adjacency)
assert len(mypartition.regions) == 3
for reg in mypartition.regions[0:2]:
assert len(reg.props) == 1
assert len(reg.list_poly) == 1
assert cont_props_dict == mypartition.prop_regions
assert len(mypartition.regions[2].props) == 0
assert len(mypartition.regions[2].list_poly) == 3
dum = state_space.copy()
for reg in mypartition.regions[0:2]:
dum = dum.diff(reg)
assert pc.is_empty(dum.diff(mypartition.regions[2]) )
assert pc.is_empty(mypartition.regions[2].diff(dum) )
| #!/usr/bin/env python
"""
Tests for abstract.prop2partition
"""
from tulip.abstract import prop2part
import tulip.polytope as pc
import numpy as np
def prop2part_test():
state_space = pc.Polytope.from_box(np.array([[0., 2.],[0., 2.]]))
cont_props = []
A = []
b = []
A.append(np.array([[1., 0.],
[-1., 0.],
[0., 1.],
[0., -1.]]))
b.append(np.array([[.5, 0., .5, 0.]]).T)
cont_props.append(pc.Polytope(A[0], b[0]))
A.append(np.array([[1., 0.],
[-1., 0.],
[0., 1.],
[0., -1.]]))
b.append(np.array([[2., -1.5, 2., -1.5]]).T)
cont_props.append(pc.Polytope(A[1], b[1]))
cont_props_dict = dict([("C"+str(i), pc.Polytope(A[i], b[i])) for i in range(2)])
mypartition = prop2part(state_space, cont_props_dict)
ref_adjacency = np.array([[1,0,1],[0,1,1],[1,1,1]])
assert np.all(mypartition.adj.todense() == ref_adjacency)
assert len(mypartition.regions) == 3
for reg in mypartition.regions[0:2]:
assert len(reg.props) == 2
assert len(reg.list_poly) == 1
i = [i for i in range(len(reg.props)) if reg.props[i] == 1]
assert len(i) == 1
i = i[0]
assert cont_props_dict == mypartition.cont_props
ref_V = pc.extreme(mypartition.cont_props)
ref_V = set([(v[0],v[1]) for v in ref_V.tolist()])
actual_V = pc.extreme(reg.list_poly[0])
actual_V = set([(v[0],v[1]) for v in actual_V.tolist()])
assert ref_V == actual_V
assert len(mypartition.regions[2].props) == 2
assert sum(mypartition.regions[2].props) == 0
assert len(mypartition.regions[2].list_poly) == 3
dum = state_space.copy()
for reg in mypartition.regions[0:2]:
dum = dum.diff(reg)
assert pc.is_empty(dum.diff(mypartition.regions[2]) )
assert pc.is_empty(mypartition.regions[2].diff(dum) )
| Python | 0 |
81bbe22cd92ea834f059b963cf0d0127f2d45a19 | Add SUSPENDED to new "error" status group. | core/constants.py | core/constants.py |
""" Norc-specific constants.
Any constants required for the core execution of Norc
should be defined here if possible.
"""
# The maximum number of tasks an Executor is allowed to run at once.
CONCURRENCY_LIMIT = 4
# How often a scheduler can poll the database for new schedules.
SCHEDULER_PERIOD = 5
# How many new schedules the scheduler can pull from the database at once.
SCHEDULER_LIMIT = 10000
EXECUTOR_PERIOD = 0.5
# A list of all Task implementations.
TASK_MODELS = [] # NOTE: This is dynamically generated by MetaTask.
# A list of all AbstractInstance implementations.
INSTANCE_MODELS = [] # NOTE: This is dynamically generated by MetaInstance.
# How often hearts should beat, in seconds.
HEARTBEAT_PERIOD = 3
# How long a heart can go without beating before being considered failed.
# This has serious implications for how long before an error in the system
# is caught. If the number is too small, though, a slow database could
# cause failsafes to activate erroneously.
HEARTBEAT_FAILED = HEARTBEAT_PERIOD + 20
class MetaConstant(type):
"""Generates the NAMES attribute of the Status class."""
def __new__(cls, name, bases, dct):
"""Magical function to dynamically create NAMES and ALL."""
NAMES = {}
ALL = []
for k, v in dct.iteritems():
if type(v) == int:
assert not v in NAMES, "Can't have duplicate values."
NAMES[v] = k
ALL.append(v)
dct['NAMES'] = NAMES
dct['ALL'] = ALL
return type.__new__(cls, name, bases, dct)
def name(cls, item):
return cls.NAMES.get(item)
class Status(object):
"""Class to hold all status constants.
The MetaStatus class automatically generates a NAMES attribute which
contains the reverse dict for retrieving a status name from its value.
The numbers should probably be moved further apart, but SUCCESS being
7 and FAILURE being 13 just seems so fitting...
"""
__metaclass__ = MetaConstant
# Transitive states.
CREATED = 1 # Created but nothing else.
RUNNING = 2 # Is currently running.
PAUSED = 3 # Currently paused.
STOPPING = 4 # In the process of stopping; should become ENDED.
SUSPENDED = 5 # Errors need addressing before a restart.
# Final states.
SUCCESS = 7 # Succeeded.
ENDED = 8 # Ended gracefully.
KILLED = 9 # Forcefully killed.
HANDLED = 12 # Was ERROR, but the problem's been handled.
# Failure states.
FAILURE = 13 # User defined failure (Task returned False).
ERROR = 14 # There was an error during execution.
TIMEDOUT = 15 # The execution timed out.
INTERRUPTED = 16 # Execution was interrupted before completion.
@staticmethod
def is_final(status):
"""Whether the given status counts as final."""
return status >= 7
@staticmethod
def is_failure(status):
"""Whether the given status counts as a failure."""
return status >= 13
@staticmethod
def GROUPS(name):
"""Used for accessing groups of Statuses by a string name."""
return {
"active": filter(lambda s: s < 7, Status.ALL),
"running": [Status.RUNNING],
"succeeded": filter(lambda s: s >= 7 and s < 13, Status.ALL),
"failed": filter(lambda s: s >= 13, Status.ALL),
"final": filter(lambda s: s >= 7, Status.ALL),
"error": filter(lambda s: s >= 13, Status.ALL) +
[Status.SUSPENDED],
}.get(name.lower())
class Request(object):
""""""
__metaclass__ = MetaConstant
# Requests to change to a final state.
STOP = 1
KILL = 2
# Other features.
PAUSE = 7
RESUME = 8
RELOAD = 9
|
""" Norc-specific constants.
Any constants required for the core execution of Norc
should be defined here if possible.
"""
# The maximum number of tasks an Executor is allowed to run at once.
CONCURRENCY_LIMIT = 4
# How often a scheduler can poll the database for new schedules.
SCHEDULER_PERIOD = 5
# How many new schedules the scheduler can pull from the database at once.
SCHEDULER_LIMIT = 10000
EXECUTOR_PERIOD = 0.5
# A list of all Task implementations.
TASK_MODELS = [] # NOTE: This is dynamically generated by MetaTask.
# A list of all AbstractInstance implementations.
INSTANCE_MODELS = [] # NOTE: This is dynamically generated by MetaInstance.
# How often hearts should beat, in seconds.
HEARTBEAT_PERIOD = 3
# How long a heart can go without beating before being considered failed.
# This has serious implications for how long before an error in the system
# is caught. If the number is too small, though, a slow database could
# cause failsafes to activate erroneously.
HEARTBEAT_FAILED = HEARTBEAT_PERIOD + 20
class MetaConstant(type):
"""Generates the NAMES attribute of the Status class."""
def __new__(cls, name, bases, dct):
"""Magical function to dynamically create NAMES and ALL."""
NAMES = {}
ALL = []
for k, v in dct.iteritems():
if type(v) == int:
assert not v in NAMES, "Can't have duplicate values."
NAMES[v] = k
ALL.append(v)
dct['NAMES'] = NAMES
dct['ALL'] = ALL
return type.__new__(cls, name, bases, dct)
def name(cls, item):
return cls.NAMES.get(item)
class Status(object):
"""Class to hold all status constants.
The MetaStatus class automatically generates a NAMES attribute which
contains the reverse dict for retrieving a status name from its value.
The numbers should probably be moved further apart, but SUCCESS being
7 and FAILURE being 13 just seems so fitting...
"""
__metaclass__ = MetaConstant
# Transitive states.
CREATED = 1 # Created but nothing else.
RUNNING = 2 # Is currently running.
PAUSED = 3 # Currently paused.
STOPPING = 4 # In the process of stopping; should become ENDED.
SUSPENDED = 5 # Errors need addressing before a restart.
# Final states.
SUCCESS = 7 # Succeeded.
ENDED = 8 # Ended gracefully.
KILLED = 9 # Forcefully killed.
HANDLED = 12 # Was ERROR, but the problem's been handled.
# Failure states.
FAILURE = 13 # User defined failure (Task returned False).
ERROR = 14 # There was an error during execution.
TIMEDOUT = 15 # The execution timed out.
INTERRUPTED = 16 # Execution was interrupted before completion.
@staticmethod
def is_final(status):
"""Whether the given status counts as final."""
return status >= 7
@staticmethod
def is_failure(status):
"""Whether the given status counts as a failure."""
return status >= 13
@staticmethod
def GROUPS(name):
"""Used for accessing groups of Statuses by a string name."""
return {
"active": filter(lambda s: s < 7, Status.ALL),
"running": [Status.RUNNING],
"succeeded": filter(lambda s: s >= 7 and s < 13, Status.ALL),
"failed": filter(lambda s: s >= 13, Status.ALL),
"final": filter(lambda s: s >= 7, Status.ALL),
}.get(name.lower())
class Request(object):
""""""
__metaclass__ = MetaConstant
# Requests to change to a final state.
STOP = 1
KILL = 2
# Other features.
PAUSE = 7
RESUME = 8
RELOAD = 9
| Python | 0 |
77cb3f0037dad2444560d8231e6ffb4f072e19f5 | Remove Continue click after New | tests/steps/creation.py | tests/steps/creation.py | # -*- coding: UTF-8 -*-
from __future__ import unicode_literals
from behave import step
from dogtail.rawinput import typeText
from dogtail.predicate import GenericPredicate
from time import sleep
from utils import get_showing_node_name
@step('Create new box "{name}" from "{item}" menuitem')
def create_machine_from_menuitem(context, name, item):
"""
Create new box, wait till it finishes and save its IP
"""
context.execute_steps(u"""
* Create new box from menu "%s"
* Press "Create"
* Wait for "sleep 1" end
* Hit "Enter"
* Wait for "sleep 1" end
* Hit "Enter"
* Wait for "sleep 1" end
* Hit "Enter"
* Save IP for machine "%s"
* Press "back" in "%s" vm
""" %(item, name, name))
@step('Create new box "{name}"')
def create_machine(context, name):
"""
Same as create_machine_from_menuitem except it assumes menu item and created box to have the same name.
"""
context.execute_steps(u"""
* Create new box "%s" from "%s" menuitem
""" %(name, name))
@step('Create new box from file "{location}"')
def create_new_vm_via_file(context, location):
path = location.split('/')
context.app.child('New').click()
context.app.child('Continue').click()
context.app.child('Select a file').click()
for item in path:
context.app.child(item).click()
context.app.child('Open').click()
@step('Create new box from url "{url}"')
def create_new_vm_via_url(context, url):
context.app.child('New').click()
context.app.child('Continue').click()
context.app.child('Enter URL').click()
typeText(url)
context.app.child('Continue').click()
if url.find('http') != -1:
half_minutes = 0
while half_minutes < 120:
half_minutes += 1
if context.app.findChild(
GenericPredicate(name='Choose express install to automatically '
'preconfigure the box with optimal settings.'),
retry=False,
requireResult=False):
return
create = context.app.child('Create')
if create.sensitive and create.showing:
create.click()
break
else:
sleep(30)
@step('Create new box from menu "{sys_name}"')
def create_new_vm_from_menu(context, sys_name):
context.app.child('New').click()
get_showing_node_name(sys_name, context.app).click()
@step('Import machine "{name}" from image "{location}"')
def import_image(context, name, location):
context.execute_steps(u"""
* Create new box from file "%s"
* Press "Create"
* Save IP for machine "%s"
""" %(location, name))
@step('Initiate new box "{name}" installation from "{item}" menuitem')
def create_machine_from_menuitem_no_wait(context, name, item):
"""
Initiate new box installation but don't save its IP nor wait for it to be ready
"""
context.execute_steps(u"""
* Create new box from menu "%s"
* Press "Create"
* Wait for "sleep 1" end
* Hit "Enter"
* Wait for "sleep 1" end
* Hit "Enter"
* Press "back" in "%s" vm
""" %(item, name))
@step('Initiate new box "{name}" installation')
def create_machine_no_wait(context, name):
"""
Same as create_machine_from_menuitem_no_wait except it assumes menu item and created box to have the same name.
"""
context.execute_steps(u"""
* Initiate new box "%s" installation from "%s" menuitem
""" %(name, name))
| # -*- coding: UTF-8 -*-
from __future__ import unicode_literals
from behave import step
from dogtail.rawinput import typeText
from dogtail.predicate import GenericPredicate
from time import sleep
from utils import get_showing_node_name
@step('Create new box "{name}" from "{item}" menuitem')
def create_machine_from_menuitem(context, name, item):
"""
Create new box, wait till it finishes and save its IP
"""
context.execute_steps(u"""
* Create new box from menu "%s"
* Press "Create"
* Wait for "sleep 1" end
* Hit "Enter"
* Wait for "sleep 1" end
* Hit "Enter"
* Wait for "sleep 1" end
* Hit "Enter"
* Save IP for machine "%s"
* Press "back" in "%s" vm
""" %(item, name, name))
@step('Create new box "{name}"')
def create_machine(context, name):
"""
Same as create_machine_from_menuitem except it assumes menu item and created box to have the same name.
"""
context.execute_steps(u"""
* Create new box "%s" from "%s" menuitem
""" %(name, name))
@step('Create new box from file "{location}"')
def create_new_vm_via_file(context, location):
path = location.split('/')
context.app.child('New').click()
context.app.child('Continue').click()
context.app.child('Select a file').click()
for item in path:
context.app.child(item).click()
context.app.child('Open').click()
@step('Create new box from url "{url}"')
def create_new_vm_via_url(context, url):
context.app.child('New').click()
context.app.child('Continue').click()
context.app.child('Enter URL').click()
typeText(url)
context.app.child('Continue').click()
if url.find('http') != -1:
half_minutes = 0
while half_minutes < 120:
half_minutes += 1
if context.app.findChild(
GenericPredicate(name='Choose express install to automatically '
'preconfigure the box with optimal settings.'),
retry=False,
requireResult=False):
return
create = context.app.child('Create')
if create.sensitive and create.showing:
create.click()
break
else:
sleep(30)
@step('Create new box from menu "{sys_name}"')
def create_new_vm_from_menu(context, sys_name):
context.app.child('New').click()
context.app.child('Continue').click()
get_showing_node_name(sys_name, context.app).click()
@step('Import machine "{name}" from image "{location}"')
def import_image(context, name, location):
context.execute_steps(u"""
* Create new box from file "%s"
* Press "Create"
* Save IP for machine "%s"
""" %(location, name))
@step('Initiate new box "{name}" installation from "{item}" menuitem')
def create_machine_from_menuitem_no_wait(context, name, item):
"""
Initiate new box installation but don't save its IP nor wait for it to be ready
"""
context.execute_steps(u"""
* Create new box from menu "%s"
* Press "Create"
* Wait for "sleep 1" end
* Hit "Enter"
* Wait for "sleep 1" end
* Hit "Enter"
* Press "back" in "%s" vm
""" %(item, name))
@step('Initiate new box "{name}" installation')
def create_machine_no_wait(context, name):
"""
Same as create_machine_from_menuitem_no_wait except it assumes menu item and created box to have the same name.
"""
context.execute_steps(u"""
* Initiate new box "%s" installation from "%s" menuitem
""" %(name, name))
| Python | 0 |
d32b2494c1a72d040a651bbb2f0abb7a94c1d2db | remove stray line | tests/test-datatypes.py | tests/test-datatypes.py | """Test datatypes."""
from statscraper.datatypes import Datatype
from statscraper import Dimension, DimensionValue
def test_allowed_values():
"""Datatypes shuold have allowed values."""
dt = Datatype("region")
assert("Ale kommun" in dt.allowed_values)
def test_b():
"""Dimension values should be translatable."""
d = Dimension("municipality", datatype="region", domain="sweden/municipalities")
dv = DimensionValue("Ale kommun", d)
assert(dv.translate("numerical") == "1440")
| """Test datatypes."""
from statscraper.datatypes import Datatype
from statscraper import Dimension, DimensionValue
def test_allowed_values():
"""Datatypes shuold have allowed values."""
dt = Datatype("region")
assert("Ale kommun" in dt.allowed_values)
def test_b():
"""Dimension values should be translatable."""
d = Dimension("municipality", datatype="region", domain="sweden/municipalities")
dv = DimensionValue("Ale kommun", d)
assert(dv.translate("numerical") == "1440")
| Python | 0.002086 |
e8cffceecf79b42790ccab1c61a2da06ae6529cd | comment no longer relevant. dealt with 2FA already | corehq/apps/sso/backends.py | corehq/apps/sso/backends.py | from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.models import User
from corehq.apps.sso.models import IdentityProvider, AuthenticatedEmailDomain
from corehq.apps.sso.utils.user_helpers import get_email_domain_from_username
class SsoBackend(ModelBackend):
"""
Authenticates against an IdentityProvider and SAML2 session data.
"""
def authenticate(self, request, username, idp_slug, is_handshake_successful):
if not (request and username and idp_slug and is_handshake_successful):
return None
try:
identity_provider = IdentityProvider.objects.get(slug=idp_slug)
except IdentityProvider.DoesNotExist:
# not sure how we would even get here, but just in case
request.sso_login_error = f"Identity Provider {idp_slug} does not exist."
return None
if not identity_provider.is_active:
request.sso_login_error = f"This Identity Provider {idp_slug} is not active."
return None
email_domain = get_email_domain_from_username(username)
if not email_domain:
# not a valid username
request.sso_login_error = f"Username {username} is not valid."
return None
if not AuthenticatedEmailDomain.objects.filter(
email_domain=email_domain, identity_provider=identity_provider
).exists():
# if this user's email domain is not authorized by this identity
# do not continue with authentication
request.sso_login_error = (
f"The Email Domain {email_domain} is not allowed to "
f"authenticate with this Identity Provider ({idp_slug})."
)
return None
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
# todo handle user creation based on information from request/session
# do this prior to handling the invite scenario and new user scenario
request.sso_login_error = f"User {username} does not exist."
return None
request.sso_login_error = None
return user
| from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.models import User
from corehq.apps.sso.models import IdentityProvider, AuthenticatedEmailDomain
from corehq.apps.sso.utils.user_helpers import get_email_domain_from_username
class SsoBackend(ModelBackend):
"""
Authenticates against an IdentityProvider and SAML2 session data.
"""
def authenticate(self, request, username, idp_slug, is_handshake_successful):
if not (request and username and idp_slug and is_handshake_successful):
return None
try:
identity_provider = IdentityProvider.objects.get(slug=idp_slug)
except IdentityProvider.DoesNotExist:
# not sure how we would even get here, but just in case
request.sso_login_error = f"Identity Provider {idp_slug} does not exist."
return None
if not identity_provider.is_active:
request.sso_login_error = f"This Identity Provider {idp_slug} is not active."
return None
email_domain = get_email_domain_from_username(username)
if not email_domain:
# not a valid username
request.sso_login_error = f"Username {username} is not valid."
return None
if not AuthenticatedEmailDomain.objects.filter(
email_domain=email_domain, identity_provider=identity_provider
).exists():
# if this user's email domain is not authorized by this identity
# do not continue with authentication
request.sso_login_error = (
f"The Email Domain {email_domain} is not allowed to "
f"authenticate with this Identity Provider ({idp_slug})."
)
return None
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
# todo handle user creation based on information from request/session
# do this prior to handling the invite scenario and new user scenario
request.sso_login_error = f"User {username} does not exist."
return None
request.sso_login_error = None
# todo what happens with 2FA required here?
return user
| Python | 0 |
52c3981b8880085d060f874eb8feace6ac125411 | Replace exact equality assert with isclose in bands cli | tests/test_cli_bands.py | tests/test_cli_bands.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Dominik Gresch <greschd@gmx.ch>
import os
import pytest
import tempfile
import numpy as np
import bandstructure_utils as bs
from click.testing import CliRunner
import tbmodels
from tbmodels._cli import cli
from parameters import SAMPLES_DIR
def test_cli_bands():
samples_dir = os.path.join(SAMPLES_DIR, 'cli_bands')
runner = CliRunner()
with tempfile.NamedTemporaryFile() as out_file:
run = runner.invoke(
cli,
[
'bands',
'-o', out_file.name,
'-k', os.path.join(samples_dir, 'kpoints.hdf5'),
'-i', os.path.join(samples_dir, 'silicon_model.hdf5')
],
catch_exceptions=False
)
print(run.output)
res = bs.io.load(out_file.name)
reference = bs.io.load(os.path.join(samples_dir, 'silicon_bands.hdf5'))
np.testing.assert_allclose(bs.compare.difference(res, reference), 0)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Dominik Gresch <greschd@gmx.ch>
import os
import pytest
import tempfile
import bandstructure_utils as bs
from click.testing import CliRunner
import tbmodels
from tbmodels._cli import cli
from parameters import SAMPLES_DIR
def test_cli_bands():
samples_dir = os.path.join(SAMPLES_DIR, 'cli_bands')
runner = CliRunner()
with tempfile.NamedTemporaryFile() as out_file:
run = runner.invoke(
cli,
[
'bands',
'-o', out_file.name,
'-k', os.path.join(samples_dir, 'kpoints.hdf5'),
'-i', os.path.join(samples_dir, 'silicon_model.hdf5')
],
catch_exceptions=False
)
print(run.output)
res = bs.io.load(out_file.name)
reference = bs.io.load(os.path.join(samples_dir, 'silicon_bands.hdf5'))
assert bs.compare.difference(res, reference) == 0
| Python | 0.000116 |
8b4b5eb2506feed164b69efa66b4cdae159182c3 | Fix pre-commit issues in the cli_parse tests. | tests/test_cli_parse.py | tests/test_cli_parse.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2015-2018, ETH Zurich, Institut fuer Theoretische Physik
# Author: Dominik Gresch <greschd@gmx.ch>
"""Tests for the 'parse' CLI command."""
import tempfile
import pytest
from click.testing import CliRunner
import tbmodels
from tbmodels._cli import cli
@pytest.mark.parametrize('pos_kind', ['wannier', 'nearest_atom'])
@pytest.mark.parametrize('prefix', ['silicon', 'bi'])
def test_cli_parse(models_equal, prefix, sample, pos_kind):
"""Test the 'parse' command with different 'prefix' and 'pos_kind'."""
runner = CliRunner()
with tempfile.NamedTemporaryFile() as out_file:
run = runner.invoke(
cli, ['parse', '-o', out_file.name, '-f',
sample(''), '-p', prefix, '--pos-kind', pos_kind],
catch_exceptions=False
)
print(run.output)
model_res = tbmodels.Model.from_hdf5_file(out_file.name)
model_reference = tbmodels.Model.from_wannier_folder(folder=sample(''), prefix=prefix, pos_kind=pos_kind)
models_equal(model_res, model_reference)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2015-2018, ETH Zurich, Institut fuer Theoretische Physik
# Author: Dominik Gresch <greschd@gmx.ch>
import pytest
import tempfile
from click.testing import CliRunner
import tbmodels
from tbmodels._cli import cli
@pytest.mark.parametrize('pos_kind', ['wannier', 'nearest_atom'])
@pytest.mark.parametrize('prefix', ['silicon', 'bi'])
def test_cli_parse(models_equal, prefix, sample, pos_kind):
runner = CliRunner()
with tempfile.NamedTemporaryFile() as out_file:
run = runner.invoke(
cli, ['parse', '-o', out_file.name, '-f',
sample(''), '-p', prefix, '--pos-kind', pos_kind],
catch_exceptions=False
)
print(run.output)
model_res = tbmodels.Model.from_hdf5_file(out_file.name)
model_reference = tbmodels.Model.from_wannier_folder(folder=sample(''), prefix=prefix, pos_kind=pos_kind)
models_equal(model_res, model_reference)
| Python | 0 |
78434bafbcc60ba7207d63481d3179474ae939ed | change to using scontrol for getting job state by default | pipeline/pipeline/batch.py | pipeline/pipeline/batch.py | import os, re
import subprocess
import time
def write_slurm_script(filename, cmd, **batch_options):
with open(filename, 'w') as fout:
fout.write('#!/bin/bash\n')
for opts in batch_options.items():
fout.write('#SBATCH --{0}={1}\n'.format(*opts))
fout.write('\n')
fout.write('{0}\n'.format(cmd))
def get_job_status(jobid, wait=30):
"""Returns status of slurm job <jobid>
Currently parses output of `sacct`. Perhaps would
be a good idea to move this to pyslurm (though this would
add a dependency.)
"""
cmd = 'scontrol show job {0}'.format(jobid)
output = subprocess.check_output(cmd, shell=True)
m = re.search('JobState=(\w+)', output)
status = None
if m:
status = m.group(1)
else:
repeat = 0
while not m and repeat < wait:
cmd = 'sacct -b -j {0}'.format(jobid)
output = subprocess.check_output(cmd, shell=True)
m = re.search('{0}\s+([A-Z]+)'.format(jobid), output)
time.sleep(1)
repeat += 1
if m:
status = m.group(1)
if status is None:
raise ValueError('Job not found: {0}'.format(jobid))
else:
return status
| import os, re
import subprocess
import time
def write_slurm_script(filename, cmd, **batch_options):
with open(filename, 'w') as fout:
fout.write('#!/bin/bash\n')
for opts in batch_options.items():
fout.write('#SBATCH --{0}={1}\n'.format(*opts))
fout.write('\n')
fout.write('{0}\n'.format(cmd))
def get_job_status(jobid, wait=30):
"""Returns status of slurm job <jobid>
Currently parses output of `sacct`. Perhaps would
be a good idea to move this to pyslurm (though this would
add a dependency.)
"""
m = False
repeat = 0
while not m and repeat < wait:
cmd = 'sacct -b -j {0}'.format(jobid)
output = subprocess.check_output(cmd, shell=True)
m = re.search('{0}\s+([A-Z]+)'.format(jobid), output)
time.sleep(1)
repeat += 1
if not m:
raise ValueError('Job not found: {0}'.format(jobid))
return m.group(1)
| Python | 0 |
047a1a6072905e650d8a8c6dee3078a14b9df759 | Use Path instead of PosixPath | tests/test_corrector.py | tests/test_corrector.py | # -*- coding: utf-8 -*-
import pytest
from pathlib import Path
from thefuck import corrector, const
from tests.utils import Rule, Command, CorrectedCommand
from thefuck.corrector import get_corrected_commands, organize_commands
class TestGetRules(object):
@pytest.fixture
def glob(self, mocker):
results = {}
mocker.patch('pathlib.Path.glob',
new_callable=lambda: lambda *_: results.pop('value', []))
return lambda value: results.update({'value': value})
@pytest.fixture(autouse=True)
def load_source(self, monkeypatch):
monkeypatch.setattr('thefuck.types.load_source',
lambda x, _: Rule(x))
def _compare_names(self, rules, names):
assert {r.name for r in rules} == set(names)
@pytest.mark.parametrize('paths, conf_rules, exclude_rules, loaded_rules', [
(['git.py', 'bash.py'], const.DEFAULT_RULES, [], ['git', 'bash']),
(['git.py', 'bash.py'], ['git'], [], ['git']),
(['git.py', 'bash.py'], const.DEFAULT_RULES, ['git'], ['bash']),
(['git.py', 'bash.py'], ['git'], ['git'], [])])
def test_get_rules(self, glob, settings, paths, conf_rules, exclude_rules,
loaded_rules):
glob([Path(path) for path in paths])
settings.update(rules=conf_rules,
priority={},
exclude_rules=exclude_rules)
rules = corrector.get_rules()
self._compare_names(rules, loaded_rules)
def test_get_corrected_commands(mocker):
command = Command('test', 'test', 'test')
rules = [Rule(match=lambda _: False),
Rule(match=lambda _: True,
get_new_command=lambda x: x.script + '!', priority=100),
Rule(match=lambda _: True,
get_new_command=lambda x: [x.script + '@', x.script + ';'],
priority=60)]
mocker.patch('thefuck.corrector.get_rules', return_value=rules)
assert [cmd.script for cmd in get_corrected_commands(command)] \
== ['test!', 'test@', 'test;']
def test_organize_commands():
"""Ensures that the function removes duplicates and sorts commands."""
commands = [CorrectedCommand('ls'), CorrectedCommand('ls -la', priority=9000),
CorrectedCommand('ls -lh', priority=100),
CorrectedCommand(u'echo café', priority=200),
CorrectedCommand('ls -lh', priority=9999)]
assert list(organize_commands(iter(commands))) \
== [CorrectedCommand('ls'), CorrectedCommand('ls -lh', priority=100),
CorrectedCommand(u'echo café', priority=200),
CorrectedCommand('ls -la', priority=9000)]
| # -*- coding: utf-8 -*-
import pytest
from pathlib import PosixPath
from thefuck import corrector, const
from tests.utils import Rule, Command, CorrectedCommand
from thefuck.corrector import get_corrected_commands, organize_commands
class TestGetRules(object):
@pytest.fixture
def glob(self, mocker):
results = {}
mocker.patch('pathlib.Path.glob',
new_callable=lambda: lambda *_: results.pop('value', []))
return lambda value: results.update({'value': value})
@pytest.fixture(autouse=True)
def load_source(self, monkeypatch):
monkeypatch.setattr('thefuck.types.load_source',
lambda x, _: Rule(x))
def _compare_names(self, rules, names):
assert {r.name for r in rules} == set(names)
@pytest.mark.parametrize('paths, conf_rules, exclude_rules, loaded_rules', [
(['git.py', 'bash.py'], const.DEFAULT_RULES, [], ['git', 'bash']),
(['git.py', 'bash.py'], ['git'], [], ['git']),
(['git.py', 'bash.py'], const.DEFAULT_RULES, ['git'], ['bash']),
(['git.py', 'bash.py'], ['git'], ['git'], [])])
def test_get_rules(self, glob, settings, paths, conf_rules, exclude_rules,
loaded_rules):
glob([PosixPath(path) for path in paths])
settings.update(rules=conf_rules,
priority={},
exclude_rules=exclude_rules)
rules = corrector.get_rules()
self._compare_names(rules, loaded_rules)
def test_get_corrected_commands(mocker):
command = Command('test', 'test', 'test')
rules = [Rule(match=lambda _: False),
Rule(match=lambda _: True,
get_new_command=lambda x: x.script + '!', priority=100),
Rule(match=lambda _: True,
get_new_command=lambda x: [x.script + '@', x.script + ';'],
priority=60)]
mocker.patch('thefuck.corrector.get_rules', return_value=rules)
assert [cmd.script for cmd in get_corrected_commands(command)] \
== ['test!', 'test@', 'test;']
def test_organize_commands():
"""Ensures that the function removes duplicates and sorts commands."""
commands = [CorrectedCommand('ls'), CorrectedCommand('ls -la', priority=9000),
CorrectedCommand('ls -lh', priority=100),
CorrectedCommand(u'echo café', priority=200),
CorrectedCommand('ls -lh', priority=9999)]
assert list(organize_commands(iter(commands))) \
== [CorrectedCommand('ls'), CorrectedCommand('ls -lh', priority=100),
CorrectedCommand(u'echo café', priority=200),
CorrectedCommand('ls -la', priority=9000)]
| Python | 0.000001 |
4180085e3bf6d0dd1f28233d4ac62198ebeb9814 | Fix wrong assert | tests/test_histogram.py | tests/test_histogram.py | # vim: set fileencoding=utf-8 :
import unittest
import pyvips
from .helpers import PyvipsTester, JPEG_FILE
class TestHistogram(PyvipsTester):
def test_hist_cum(self):
im = pyvips.Image.identity()
sum = im.avg() * 256
cum = im.hist_cum()
p = cum(255, 0)
self.assertEqual(p[0], sum)
def test_hist_equal(self):
im = pyvips.Image.new_from_file(JPEG_FILE)
im2 = im.hist_equal()
self.assertEqual(im.width, im2.width)
self.assertEqual(im.height, im2.height)
self.assertTrue(im.avg() < im2.avg())
self.assertTrue(im.deviate() < im2.deviate())
def test_hist_ismonotonic(self):
im = pyvips.Image.identity()
self.assertTrue(im.hist_ismonotonic())
def test_hist_local(self):
im = pyvips.Image.new_from_file(JPEG_FILE)
im2 = im.hist_local(10, 10)
self.assertEqual(im.width, im2.width)
self.assertEqual(im.height, im2.height)
self.assertTrue(im.avg() < im2.avg())
self.assertTrue(im.deviate() < im2.deviate())
if pyvips.at_least_libvips(8, 5):
im3 = im.hist_local(10, 10, max_slope=3)
self.assertEqual(im.width, im3.width)
self.assertEqual(im.height, im3.height)
self.assertTrue(im3.deviate() < im2.deviate())
def test_hist_match(self):
im = pyvips.Image.identity()
im2 = pyvips.Image.identity()
matched = im.hist_match(im2)
self.assertEqual((im - matched).abs().max(), 0.0)
def test_hist_norm(self):
im = pyvips.Image.identity()
im2 = im.hist_norm()
self.assertEqual((im - im2).abs().max(), 0.0)
def test_hist_plot(self):
im = pyvips.Image.identity()
im2 = im.hist_plot()
self.assertEqual(im2.width, 256)
self.assertEqual(im2.height, 256)
self.assertEqual(im2.format, pyvips.BandFormat.UCHAR)
self.assertEqual(im2.bands, 1)
def test_hist_map(self):
im = pyvips.Image.identity()
im2 = im.maplut(im)
self.assertEqual((im - im2).abs().max(), 0.0)
def test_percent(self):
im = pyvips.Image.new_from_file(JPEG_FILE).extract_band(1)
pc = im.percent(90)
msk = im <= pc
n_set = (msk.avg() * msk.width * msk.height) / 255.0
pc_set = 100 * n_set / (msk.width * msk.height)
self.assertAlmostEqual(pc_set, 90, places=0)
def test_hist_entropy(self):
im = pyvips.Image.new_from_file(JPEG_FILE).extract_band(1)
ent = im.hist_find().hist_entropy()
self.assertAlmostEqual(ent, 4.37, places=2)
def test_stdif(self):
im = pyvips.Image.new_from_file(JPEG_FILE)
im2 = im.stdif(10, 10)
self.assertEqual(im.width, im2.width)
self.assertEqual(im.height, im2.height)
# new mean should be closer to target mean
self.assertTrue(abs(im.avg() - 128) > abs(im2.avg() - 128))
if __name__ == '__main__':
unittest.main()
| # vim: set fileencoding=utf-8 :
import unittest
import pyvips
from .helpers import PyvipsTester, JPEG_FILE
class TestHistogram(PyvipsTester):
def test_hist_cum(self):
im = pyvips.Image.identity()
sum = im.avg() * 256
cum = im.hist_cum()
p = cum(255, 0)
self.assertEqual(p[0], sum)
def test_hist_equal(self):
im = pyvips.Image.new_from_file(JPEG_FILE)
im2 = im.hist_equal()
self.assertEqual(im.width, im2.width)
self.assertEqual(im.height, im2.height)
self.assertTrue(im.avg() < im2.avg())
self.assertTrue(im.deviate() < im2.deviate())
def test_hist_ismonotonic(self):
im = pyvips.Image.identity()
self.assertTrue(im.hist_ismonotonic())
def test_hist_local(self):
im = pyvips.Image.new_from_file(JPEG_FILE)
im2 = im.hist_local(10, 10)
self.assertEqual(im.width, im2.width)
self.assertEqual(im.height, im2.height)
self.assertTrue(im.avg() < im2.avg())
self.assertTrue(im.deviate() < im2.deviate())
if pyvips.at_least_libvips(8, 5):
im3 = im.hist_local(10, 10, max_slope=3)
self.assertEqual(im.width, im2.width)
self.assertEqual(im.height, im2.height)
self.assertTrue(im3.deviate() < im2.deviate())
def test_hist_match(self):
im = pyvips.Image.identity()
im2 = pyvips.Image.identity()
matched = im.hist_match(im2)
self.assertEqual((im - matched).abs().max(), 0.0)
def test_hist_norm(self):
im = pyvips.Image.identity()
im2 = im.hist_norm()
self.assertEqual((im - im2).abs().max(), 0.0)
def test_hist_plot(self):
im = pyvips.Image.identity()
im2 = im.hist_plot()
self.assertEqual(im2.width, 256)
self.assertEqual(im2.height, 256)
self.assertEqual(im2.format, pyvips.BandFormat.UCHAR)
self.assertEqual(im2.bands, 1)
def test_hist_map(self):
im = pyvips.Image.identity()
im2 = im.maplut(im)
self.assertEqual((im - im2).abs().max(), 0.0)
def test_percent(self):
im = pyvips.Image.new_from_file(JPEG_FILE).extract_band(1)
pc = im.percent(90)
msk = im <= pc
n_set = (msk.avg() * msk.width * msk.height) / 255.0
pc_set = 100 * n_set / (msk.width * msk.height)
self.assertAlmostEqual(pc_set, 90, places=0)
def test_hist_entropy(self):
im = pyvips.Image.new_from_file(JPEG_FILE).extract_band(1)
ent = im.hist_find().hist_entropy()
self.assertAlmostEqual(ent, 4.37, places=2)
def test_stdif(self):
im = pyvips.Image.new_from_file(JPEG_FILE)
im2 = im.stdif(10, 10)
self.assertEqual(im.width, im2.width)
self.assertEqual(im.height, im2.height)
# new mean should be closer to target mean
self.assertTrue(abs(im.avg() - 128) > abs(im2.avg() - 128))
if __name__ == '__main__':
unittest.main()
| Python | 0.0022 |
eacf0414f3fed58c31f280e9ad02df7e610d422d | add exception handle for KeyControlInterrupt | pagrant/basecommand.py | pagrant/basecommand.py | #!/usr/bin/python
#coding:utf8
__author__ = ['markshao']
import sys
from pagrant.vendors.myoptparser import optparse
from pagrant import cmdoptions
from pagrant.cmdparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pagrant.util import get_prog, format_exc
from pagrant.exceptions import PagrantError, PagrantConfigError, VirtualBootstrapError
from pagrant.log import logger
__all__ = ['Command']
class Command(object):
name = None
usage = None
hidden = None
summary = ""
def __init__(self):
parser_kw = {
'usage': self.usage,
'prog': '%s %s' % (get_prog(), self.name),
'formatter': UpdatingDefaultsHelpFormatter(),
'add_help_option': False,
'name': self.name,
'description': self.__doc__,
}
self.parser = ConfigOptionParser(**parser_kw)
# Commands should add options to this option group
optgroup_name = '%s Options' % self.name.capitalize()
self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name)
# Add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, self.parser)
self.parser.add_option_group(gen_opts)
self.logger = None
def setup_logging(self):
pass
def parse_args(self, args):
# factored out for testability
return self.parser.parse_args(args)
def run(self, args):
"""
The sub command class should overide this method
"""
NotImplemented
def execute(self, args=None):
"""
The main interface for exectute the command
"""
import copy
args_bk = copy.deepcopy(args)
try:
options, args = self.parse_args(args)
except (optparse.OptionError, optparse.BadOptionError), e:
options = None
level = 1 # Notify
level += getattr(options, "verbose", 0)
level -= getattr(options, "verbose", 0)
level = logger.level_for_integer(4 - level)
complete_log = []
logger.add_consumers(
(level, sys.stdout),
(logger.DEBUG, complete_log.append),
)
if getattr(options, "log_explicit_levels", False):
logger.explicit_levels = True
self.logger = logger # if the sub command does nothing , we just reuse this log
self.setup_logging()
try:
self.run(args_bk)
except VirtualBootstrapError:
self.logger.fatal("ERROR: %s" % str(sys.exc_info()[1]))
self.logger.error('Exception information:\n%s' % format_exc())
sys.exit(1)
except PagrantConfigError:
self.logger.fatal("ERROR: %s" % str(sys.exc_info()[1]))
self.logger.error('Exception information:\n%s' % format_exc())
sys.exit(1)
except PagrantError:
self.logger.fatal("ERROR: %s" % str(sys.exc_info()[1]))
self.logger.error('Exception information:\n%s' % format_exc())
sys.exit(1)
except KeyboardInterrupt:
self.logger.fatal("The user interrupt the test case execution")
self.logger.error("The user interrupt the test case execution")
sys.exit(1)
| #!/usr/bin/python
#coding:utf8
__author__ = ['markshao']
import sys
from pagrant.vendors.myoptparser import optparse
from pagrant import cmdoptions
from pagrant.cmdparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pagrant.util import get_prog, format_exc
from pagrant.exceptions import PagrantError, PagrantConfigError, VirtualBootstrapError
from pagrant.log import logger
__all__ = ['Command']
class Command(object):
name = None
usage = None
hidden = None
summary = ""
def __init__(self):
parser_kw = {
'usage': self.usage,
'prog': '%s %s' % (get_prog(), self.name),
'formatter': UpdatingDefaultsHelpFormatter(),
'add_help_option': False,
'name': self.name,
'description': self.__doc__,
}
self.parser = ConfigOptionParser(**parser_kw)
# Commands should add options to this option group
optgroup_name = '%s Options' % self.name.capitalize()
self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name)
# Add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, self.parser)
self.parser.add_option_group(gen_opts)
self.logger = None
def setup_logging(self):
pass
def parse_args(self, args):
# factored out for testability
return self.parser.parse_args(args)
def run(self, args):
"""
The sub command class should overide this method
"""
NotImplemented
def execute(self, args=None):
"""
The main interface for exectute the command
"""
import copy
args_bk = copy.deepcopy(args)
try:
options, args = self.parse_args(args)
except (optparse.OptionError, optparse.BadOptionError), e:
options = None
level = 1 # Notify
level += getattr(options, "verbose", 0)
level -= getattr(options, "verbose", 0)
level = logger.level_for_integer(4 - level)
complete_log = []
logger.add_consumers(
(level, sys.stdout),
(logger.DEBUG, complete_log.append),
)
if getattr(options, "log_explicit_levels", False):
logger.explicit_levels = True
self.logger = logger # if the sub command does nothing , we just reuse this log
self.setup_logging()
try:
self.run(args_bk)
except VirtualBootstrapError:
self.logger.fatal("ERROR: %s" % str(sys.exc_info()[1]))
self.logger.error('Exception information:\n%s' % format_exc())
sys.exit(1)
except PagrantConfigError:
self.logger.fatal("ERROR: %s" % str(sys.exc_info()[1]))
self.logger.error('Exception information:\n%s' % format_exc())
sys.exit(1)
except PagrantError:
self.logger.fatal("ERROR: %s" % str(sys.exc_info()[1]))
self.logger.error('Exception information:\n%s' % format_exc())
sys.exit(1)
| Python | 0.000001 |
8e5ffc7ed1db1d17e55cf538fc9858705ecc9dd2 | Bump version to 1.20.4 | platformio_api/__init__.py | platformio_api/__init__.py | # Copyright 2014-present Ivan Kravets <me@ikravets.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging.config
import os
VERSION = (1, 20, 4)
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio-api"
__description__ = ("An API for PlatformIO")
__url__ = "https://github.com/ivankravets/platformio-api"
__author__ = "Ivan Kravets"
__email__ = "me@ikravets.com"
__license__ = "MIT License"
__copyright__ = "Copyright (C) 2014-2017 Ivan Kravets"
config = dict(
SQLALCHEMY_DATABASE_URI=None,
GITHUB_LOGIN=None,
GITHUB_PASSWORD=None,
DL_PIO_DIR=None,
DL_PIO_URL=None,
MAX_DLFILE_SIZE=1024 * 1024 * 150, # 150 Mb
# Fuzzy search will not be applied to words shorter than the value below
SOLR_FUZZY_MIN_WORD_LENGTH=3,
LOGGING=dict(version=1)
)
assert "PIOAPI_CONFIG_PATH" in os.environ
with open(os.environ.get("PIOAPI_CONFIG_PATH")) as f:
config.update(json.load(f))
# configure logging for packages
logging.basicConfig()
logging.config.dictConfig(config['LOGGING'])
# setup time zone to UTC globally
os.environ['TZ'] = "+00:00"
try:
from time import tzset
tzset()
except ImportError:
pass
| # Copyright 2014-present Ivan Kravets <me@ikravets.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging.config
import os
VERSION = (1, 20, 3)
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio-api"
__description__ = ("An API for PlatformIO")
__url__ = "https://github.com/ivankravets/platformio-api"
__author__ = "Ivan Kravets"
__email__ = "me@ikravets.com"
__license__ = "MIT License"
__copyright__ = "Copyright (C) 2014-2017 Ivan Kravets"
config = dict(
SQLALCHEMY_DATABASE_URI=None,
GITHUB_LOGIN=None,
GITHUB_PASSWORD=None,
DL_PIO_DIR=None,
DL_PIO_URL=None,
MAX_DLFILE_SIZE=1024 * 1024 * 150, # 150 Mb
# Fuzzy search will not be applied to words shorter than the value below
SOLR_FUZZY_MIN_WORD_LENGTH=3,
LOGGING=dict(version=1)
)
assert "PIOAPI_CONFIG_PATH" in os.environ
with open(os.environ.get("PIOAPI_CONFIG_PATH")) as f:
config.update(json.load(f))
# configure logging for packages
logging.basicConfig()
logging.config.dictConfig(config['LOGGING'])
# setup time zone to UTC globally
os.environ['TZ'] = "+00:00"
try:
from time import tzset
tzset()
except ImportError:
pass
| Python | 0 |
5d6fd6f627b6fe073d95499a58575532618ef484 | Add many=True to test_recursive | tests/test_relations.py | tests/test_relations.py | from rest_framework import serializers
from rest_framework.test import APISimpleTestCase
from drf_extra_fields.relations import (
PresentablePrimaryKeyRelatedField,
PresentableSlugRelatedField,
)
from .utils import MockObject, MockQueryset
class PresentationSerializer(serializers.Serializer):
def to_representation(self, instance):
return {"pk": instance.pk, "name": instance.name}
class RecursiveSerializer(serializers.Serializer):
pk = serializers.CharField()
recursive_field = PresentablePrimaryKeyRelatedField(
queryset=MockQueryset([]),
presentation_serializer="tests.test_relations.RecursiveSerializer",
)
recursive_fields = PresentablePrimaryKeyRelatedField(
queryset=MockQueryset([]),
presentation_serializer="tests.test_relations.RecursiveSerializer",
many=True
)
class TestPresentablePrimaryKeyRelatedField(APISimpleTestCase):
def setUp(self):
self.queryset = MockQueryset(
[
MockObject(pk=1, name="foo"),
MockObject(pk=2, name="bar"),
MockObject(pk=3, name="baz"),
]
)
self.instance = self.queryset.items[2]
self.field = PresentablePrimaryKeyRelatedField(
queryset=self.queryset, presentation_serializer=PresentationSerializer
)
def test_representation(self):
representation = self.field.to_representation(self.instance)
expected_representation = PresentationSerializer(self.instance).data
assert representation == expected_representation
class TestPresentableSlugRelatedField(APISimpleTestCase):
def setUp(self):
self.queryset = MockQueryset(
[
MockObject(pk=1, name="foo"),
MockObject(pk=2, name="bar"),
MockObject(pk=3, name="baz"),
]
)
self.instance = self.queryset.items[2]
self.field = PresentableSlugRelatedField(
slug_field="name",
queryset=self.queryset,
presentation_serializer=PresentationSerializer,
)
def test_representation(self):
representation = self.field.to_representation(self.instance)
expected_representation = PresentationSerializer(self.instance).data
assert representation == expected_representation
class TestRecursivePresentablePrimaryKeyRelatedField(APISimpleTestCase):
def setUp(self):
self.related_object = MockObject(
pk=3,
name="baz",
recursive_fields=[
MockObject(pk=6, name="foo", recursive_fields=[], recursive_field=None),
MockObject(pk=7, name="baz", recursive_fields=[], recursive_field=None)
],
recursive_field=MockObject(
pk=4,
name="foobar",
recursive_fields=[],
recursive_field=MockObject(
pk=5,
name="barbaz",
recursive_fields=[],
recursive_field=None
)
),
)
def test_recursive(self):
serializer = RecursiveSerializer(self.related_object)
assert serializer.data == {
'pk': '3',
'recursive_field': {
'pk': '4',
'recursive_field': {
'pk': '5',
'recursive_field': None,
'recursive_fields': []
},
'recursive_fields': []
},
'recursive_fields': [
{
'pk': '6',
'recursive_field': None,
'recursive_fields': []
},
{
'pk': '7',
'recursive_field': None,
'recursive_fields': []
}
]
}
| from rest_framework import serializers
from rest_framework.test import APISimpleTestCase
from drf_extra_fields.relations import (
PresentablePrimaryKeyRelatedField,
PresentableSlugRelatedField,
)
from .utils import MockObject, MockQueryset
class PresentationSerializer(serializers.Serializer):
def to_representation(self, instance):
return {"pk": instance.pk, "name": instance.name}
class RecursiveSerializer(serializers.Serializer):
pk = serializers.CharField()
recursive_field = PresentablePrimaryKeyRelatedField(
queryset=MockQueryset([]),
presentation_serializer="tests.test_relations.RecursiveSerializer",
)
class TestPresentablePrimaryKeyRelatedField(APISimpleTestCase):
def setUp(self):
self.queryset = MockQueryset(
[
MockObject(pk=1, name="foo"),
MockObject(pk=2, name="bar"),
MockObject(pk=3, name="baz"),
]
)
self.instance = self.queryset.items[2]
self.field = PresentablePrimaryKeyRelatedField(
queryset=self.queryset, presentation_serializer=PresentationSerializer
)
def test_representation(self):
representation = self.field.to_representation(self.instance)
expected_representation = PresentationSerializer(self.instance).data
assert representation == expected_representation
class TestPresentableSlugRelatedField(APISimpleTestCase):
def setUp(self):
self.queryset = MockQueryset(
[
MockObject(pk=1, name="foo"),
MockObject(pk=2, name="bar"),
MockObject(pk=3, name="baz"),
]
)
self.instance = self.queryset.items[2]
self.field = PresentableSlugRelatedField(
slug_field="name",
queryset=self.queryset,
presentation_serializer=PresentationSerializer,
)
def test_representation(self):
representation = self.field.to_representation(self.instance)
expected_representation = PresentationSerializer(self.instance).data
assert representation == expected_representation
class TestRecursivePresentablePrimaryKeyRelatedField(APISimpleTestCase):
def setUp(self):
self.related_object = MockObject(
pk=3,
name="baz",
recursive_field=MockObject(
pk=4,
name="foobar",
recursive_field=MockObject(
pk=5,
name="barbaz",
recursive_field=None)
),
)
def test_recursive(self):
serializer = RecursiveSerializer(self.related_object)
assert serializer.data == {
'pk': '3', 'recursive_field': {
'pk': '4', 'recursive_field': {
'pk': '5', 'recursive_field': None
}
}
}
| Python | 0.998278 |
14a2ad18e70b6bc35e8d64c56b37520ebdb9fa3c | Add tests for full resource name | tests/test_resources.py | tests/test_resources.py | # Copyright 2019 The resource-policy-evaluation-library Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from rpe.resources import Resource
from rpe.resources.gcp import GcpBigqueryDataset
from rpe.resources.gcp import GcpComputeInstance
from rpe.resources.gcp import GcpSqlInstance
from rpe.resources.gcp import GcpStorageBucket
from rpe.resources.gcp import GcpStorageBucketIamPolicy
test_project = "my_project"
test_resource_name = "my_resource"
test_cases = [
(
{
'resource_type': 'bigquery.datasets',
'resource_name': test_resource_name,
'project_id': test_project
},
GcpBigqueryDataset,
'gcp.bigquery.datasets',
'//bigquery.googleapis.com/projects/my_project/datasets/my_resource'
),
(
{
'resource_type': 'compute.instances',
'resource_name': test_resource_name,
'resource_location': 'us-central1-a',
'project_id': test_project
},
GcpComputeInstance,
'gcp.compute.instances',
'//compute.googleapis.com/projects/my_project/zones/us-central1-a/instances/my_resource'
),
(
{
'resource_type': 'sqladmin.instances',
'resource_name': test_resource_name,
'project_id': test_project
},
GcpSqlInstance,
'gcp.sqladmin.instances',
'//sql.googleapis.com/projects/my_project/instances/my_resource'
),
(
{
'resource_type': 'storage.buckets',
'resource_name': test_resource_name,
'project_id': test_project
},
GcpStorageBucket,
'gcp.storage.buckets',
'//storage.googleapis.com/buckets/my_resource'
),
(
{
'resource_type': 'storage.buckets.iam',
'resource_name': test_resource_name,
'project_id': test_project
},
GcpStorageBucketIamPolicy,
'gcp.storage.buckets.iam',
'//storage.googleapis.com/buckets/my_resource'
)
]
@pytest.mark.parametrize(
"input,cls,rtype",
[(c[0], c[1], c[2]) for c in test_cases],
ids=[c[1].__name__ for c in test_cases])
def test_gcp_resource_factory(input, cls, rtype):
r = Resource.factory("gcp", input)
assert r.__class__ == cls
assert r.type() == rtype
def test_gcp_resource_factory_invalid():
with pytest.raises(AssertionError):
Resource.factory('gcp', {})
@pytest.mark.parametrize(
"input,frn",
[(c[0], c[3]) for c in test_cases],
ids=[c[1].__name__ for c in test_cases])
def test_gcp_full_resource_name(input, frn):
r = Resource.factory("gcp", input)
assert r.full_resource_name() == frn
| # Copyright 2019 The resource-policy-evaluation-library Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from rpe.resources import Resource
from rpe.resources.gcp import GcpBigqueryDataset
from rpe.resources.gcp import GcpComputeInstance
from rpe.resources.gcp import GcpSqlInstance
from rpe.resources.gcp import GcpStorageBucket
from rpe.resources.gcp import GcpStorageBucketIamPolicy
test_project = "my_project"
test_resource_name = "my_resource"
test_cases = [
(
{
'resource_type': 'bigquery.datasets',
'resource_name': test_resource_name,
'project_id': test_project
},
GcpBigqueryDataset,
'gcp.bigquery.datasets'
),
(
{
'resource_type': 'compute.instances',
'resource_name': test_resource_name,
'resource_location': 'us-central1-a',
'project_id': test_project
},
GcpComputeInstance,
'gcp.compute.instances'
),
(
{
'resource_type': 'sqladmin.instances',
'resource_name': test_resource_name,
'project_id': test_project
},
GcpSqlInstance,
'gcp.sqladmin.instances'
),
(
{
'resource_type': 'storage.buckets',
'resource_name': test_resource_name,
'project_id': test_project
},
GcpStorageBucket,
'gcp.storage.buckets'
),
(
{
'resource_type': 'storage.buckets.iam',
'resource_name': test_resource_name,
'project_id': test_project
},
GcpStorageBucketIamPolicy,
'gcp.storage.buckets.iam'
)
]
@pytest.mark.parametrize(
"input,cls,rtype",
test_cases,
ids=[cls.__name__ for (_, cls, _) in test_cases])
def test_gcp_resource_factory(input, cls, rtype):
r = Resource.factory("gcp", input)
assert r.__class__ == cls
assert r.type() == rtype
def test_gcp_resource_factory_invalid():
with pytest.raises(AssertionError):
Resource.factory('gcp', {})
| Python | 0 |
b144eb21003fc3f2e13e3d88b93a947a458cae24 | test designed to fail confirmed - reverted | tests/test_simulator.py | tests/test_simulator.py | # test_simulator.py written by Duncan Murray 28/4/2015
import unittest
import os
import sys
root_folder = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + os.sep + ".." + os.sep + 'vais')
ref_folder = root_folder + os.sep + "data"
sys.path.append(root_folder)
import planet as planet
import battle as battle
import character as character
import simulator as simulator
test_folder = os.getcwd() + os.sep + 'test_results'
test_file = test_folder + os.sep + 'battle.txt'
#rules_file = ref_folder + os.sep + 'battle.rules'
class VaisSimulatorTest(unittest.TestCase):
def setup(self):
print('running simulator tests')
def test_01_instantiate_sim(self):
traits = character.CharacterCollection(ref_folder)
a1 = traits.generate_random_character()
a2 = traits.generate_random_character()
a3 = traits.generate_random_character()
world = planet.Planet('SimWorld', num_seeds=5, width=20, height=15, wind=0.3, rain=0.10, sun=0.3, lava=0.4)
actions = ['walk', 'run', 'fight', 'buy', 'sell', 'collect']
s = simulator.SimAdventureGame('Test of SimWorld', world, [a1, a2, a3], [(2,2), (3,4), (4,4)], actions)
s.run()
self.assertEqual(len(str(s)), 231)
def test_02_move_character(self):
"""
add a single character to a world and move them around
"""
traits = character.CharacterCollection(ref_folder)
a1 = traits.generate_random_character()
world = planet.Planet('SimWorld', num_seeds=5, width=20, height=15, wind=0.3, rain=0.10, sun=0.3, lava=0.4)
actions = ['walk']
s = simulator.SimAdventureGame('Test of SimWorld', world, [a1], [(2,2)], actions)
s.run()
self.assertEqual(len(str(s)), 143)
self.assertEqual(s.agent_locations[0]['x'], 2)
self.assertEqual(s.agent_locations[0]['y'], 2)
s.command({'name':'walk', 'type':'move', 'direction':[0,1]}, a1)
self.assertEqual(s.agent_locations[0]['x'], 2)
self.assertEqual(s.agent_locations[0]['y'], 2)
s.command({'name':'walk', 'type':'move', 'direction':[1,1]}, a1)
def test_03_SimGameOfLife(self):
traits = character.CharacterCollection(ref_folder)
a1 = traits.generate_random_character()
world = planet.Planet('SimWorld', num_seeds=5, width=20, height=15, wind=0.3, rain=0.10, sun=0.3, lava=0.4)
actions = ['walk']
s = simulator.SimAdventureGame('Test of SimWorld', world, [a1], [(2,2)], actions)
s.run()
print(s)
self.assertEqual(len(str(s)), 143)
def test_04_sim_fail(self):
self.assertNotEqual(1, 2)
if __name__ == '__main__':
unittest.main()
| # test_simulator.py written by Duncan Murray 28/4/2015
import unittest
import os
import sys
root_folder = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + os.sep + ".." + os.sep + 'vais')
ref_folder = root_folder + os.sep + "data"
sys.path.append(root_folder)
import planet as planet
import battle as battle
import character as character
import simulator as simulator
test_folder = os.getcwd() + os.sep + 'test_results'
test_file = test_folder + os.sep + 'battle.txt'
#rules_file = ref_folder + os.sep + 'battle.rules'
class VaisSimulatorTest(unittest.TestCase):
def setup(self):
print('running simulator tests')
def test_01_instantiate_sim(self):
traits = character.CharacterCollection(ref_folder)
a1 = traits.generate_random_character()
a2 = traits.generate_random_character()
a3 = traits.generate_random_character()
world = planet.Planet('SimWorld', num_seeds=5, width=20, height=15, wind=0.3, rain=0.10, sun=0.3, lava=0.4)
actions = ['walk', 'run', 'fight', 'buy', 'sell', 'collect']
s = simulator.SimAdventureGame('Test of SimWorld', world, [a1, a2, a3], [(2,2), (3,4), (4,4)], actions)
s.run()
self.assertEqual(len(str(s)), 231)
def test_02_move_character(self):
"""
add a single character to a world and move them around
"""
traits = character.CharacterCollection(ref_folder)
a1 = traits.generate_random_character()
world = planet.Planet('SimWorld', num_seeds=5, width=20, height=15, wind=0.3, rain=0.10, sun=0.3, lava=0.4)
actions = ['walk']
s = simulator.SimAdventureGame('Test of SimWorld', world, [a1], [(2,2)], actions)
s.run()
self.assertEqual(len(str(s)), 143)
self.assertEqual(s.agent_locations[0]['x'], 2)
self.assertEqual(s.agent_locations[0]['y'], 2)
s.command({'name':'walk', 'type':'move', 'direction':[0,1]}, a1)
self.assertEqual(s.agent_locations[0]['x'], 2)
self.assertEqual(s.agent_locations[0]['y'], 2)
s.command({'name':'walk', 'type':'move', 'direction':[1,1]}, a1)
def test_03_SimGameOfLife(self):
traits = character.CharacterCollection(ref_folder)
a1 = traits.generate_random_character()
world = planet.Planet('SimWorld', num_seeds=5, width=20, height=15, wind=0.3, rain=0.10, sun=0.3, lava=0.4)
actions = ['walk']
s = simulator.SimAdventureGame('Test of SimWorld', world, [a1], [(2,2)], actions)
s.run()
print(s)
self.assertEqual(len(str(s)), 143)
def test_04_sim_fail(self):
self.assertEqual(1, 2)
if __name__ == '__main__':
unittest.main()
| Python | 0 |
d81c6e4ce44b0ee63fa116cb69efce17b8bb2c3f | test getting message via POP | test/test_pop_connection.py | test/test_pop_connection.py | """Tests for POP connection handling."""
import os
import pathlib
import unittest
from maildaemon.config import load_config
from maildaemon.pop_connection import POPConnection
_HERE = pathlib.Path(__file__).parent
_TEST_CONFIG_PATH = _HERE.joinpath('maildaemon_test_config.json')
@unittest.skipUnless(os.environ.get('TEST_COMM') or os.environ.get('CI'),
'skipping tests that require server connection')
class Tests(unittest.TestCase):
config = load_config(_TEST_CONFIG_PATH)
def test_retrieve_message_ids(self):
for connection_name in ['test-pop', 'test-pop-ssl']:
with self.subTest(msg=connection_name):
connection = POPConnection.from_dict(self.config['connections'][connection_name])
connection.connect()
ids = connection.retrieve_message_ids()
alive = connection.is_alive()
connection.disconnect()
self.assertIsInstance(ids, list, msg=connection)
self.assertTrue(alive, msg=connection)
def test_retrieve_message_lines(self):
for connection_name in ['test-pop', 'test-pop-ssl']:
with self.subTest(msg=connection_name):
connection = POPConnection.from_dict(self.config['connections'][connection_name])
connection.connect()
lines = connection.retrieve_message_lines(1)
self.assertGreater(len(lines), 0, msg=connection)
| """Tests for POP connection handling."""
import os
import pathlib
import unittest
from maildaemon.config import load_config
from maildaemon.pop_connection import POPConnection
_HERE = pathlib.Path(__file__).parent
_TEST_CONFIG_PATH = _HERE.joinpath('maildaemon_test_config.json')
@unittest.skipUnless(os.environ.get('TEST_COMM') or os.environ.get('CI'),
'skipping tests that require server connection')
class Tests(unittest.TestCase):
config = load_config(_TEST_CONFIG_PATH)
def test_retrieve_message_ids(self):
for connection_name in ['test-pop', 'test-pop-ssl']:
with self.subTest(msg=connection_name):
connection = POPConnection.from_dict(self.config['connections'][connection_name])
connection.connect()
ids = connection.retrieve_message_ids()
alive = connection.is_alive()
connection.disconnect()
self.assertIsInstance(ids, list, msg=connection)
self.assertTrue(alive, msg=connection)
| Python | 0 |
39a1c6c8c3795775dc8811e8e195feaa4e973cd8 | remove comments | tests/test_validator.py | tests/test_validator.py | # from unittest.mock import patch
import json
import unittest
from dacsspace.validator import Validator
class TestValidator(unittest.TestCase):
def test_validator(self):
json_file = "/Users/aberish/Documents/GitHub/DACSspace/fixtures/resource.json"
with open(json_file, 'r') as f:
json_data = json.load(f)
result = Validator().validate_data(json_data)
self.assertTrue(isinstance(result, dict))
self.assertEqual(result["valid"], "True")
| # from unittest.mock import patch
import json
import unittest
from dacsspace.validator import Validator
class TestValidator(unittest.TestCase):
def test_validator(self):
json_file = "/Users/aberish/Documents/GitHub/DACSspace/fixtures/resource.json"
with open(json_file, 'r') as f:
json_data = json.load(f)
result = Validator().validate_data(json_data)
self.assertTrue(isinstance(result, dict))
self.assertEqual(result["valid"], "True")
# HELP: I don't know what this does, but I saw it used in other tests
# if __name__ == "__main__":
# unittest.main()
| Python | 0 |
c2b55844bff3de39ac9a0a4bd8860306da731662 | fix for testing 401 after redirection | testsuite/test_views.py | testsuite/test_views.py | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Tests for communities views."""
from flask import url_for
from invenio.testsuite import InvenioTestCase, make_test_suite, run_test_suite
class CommunitiesViewTest(InvenioTestCase):
"""Test communities view functions."""
def test_home_communities_page_availability(self):
"""communities - availability of main page"""
response = self.client.get(url_for('communities.index'))
self.assert200(response)
def test_new_community_page_availability(self):
"""communities - availability of new community page"""
self.login('admin', '')
response = self.client.get(url_for('communities.new'))
self.assert200(response)
self.logout()
def test_new_community_page_unauthorized(self):
"""communities - new communities restricted to logged in users"""
response = self.client.get(url_for('communities.new'),
follow_redirects=True)
self.assert401(response)
TEST_SUITE = make_test_suite(CommunitiesViewTest)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
| # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Tests for communities views."""
from flask import url_for, current_app
from invenio.testsuite import InvenioTestCase, make_test_suite, \
run_test_suite
class CommunitiesViewTest(InvenioTestCase):
""" Test communities view functions. """
def test_home_communities_page_availability(self):
"""communities - availability of main page"""
response = self.client.get(url_for('communities.index'))
self.assert200(response)
def test_new_community_page_availability(self):
"""communities - availability of new community page"""
self.login('admin', '')
response = self.client.get(url_for('communities.new'))
self.assert200(response)
self.logout()
def test_new_community_page_unauthorized(self):
"""communities - new communities restricted to logged in users"""
response = self.client.get(url_for('communities.new'))
self.assert401(response)
TEST_SUITE = make_test_suite(CommunitiesViewTest)
if __name__ == "__main__":
run_test_suite(TEST_SUITE) | Python | 0 |
77199b8c6b06054c7741433ec2fadd654a636677 | add hour var | tilejetlogs/tilelogs.py | tilejetlogs/tilelogs.py | def buildTileRequestDocument(tileorigin, tilesource, x, y, z, status, datetime, ip):
r = {
'ip': ip,
'origin': tileorigin if tileorigin else "",
'source': tilesource,
'location': z+'/'+x+'/'+y,
'z': z,
'status': status,
'year': datetime.strftime('%Y'),
'month': datetime.strftime('%Y-%m'),
'date': datetime.strftime('%Y-%m-%d'),
'hour': datetime.strftime('%Y-%m-%d-%H'),
'date_iso': datetime.isoformat()
}
return r
| def buildTileRequestDocument(tileorigin, tilesource, x, y, z, status, datetime, ip):
r = {
'ip': ip,
'origin': tileorigin if tileorigin else "",
'source': tilesource,
'location': z+'/'+x+'/'+y,
'z': z,
'status': status,
'year': datetime.strftime('%Y'),
'month': datetime.strftime('%Y-%m'),
'date': datetime.strftime('%Y-%m-%d'),
'date_iso': datetime.isoformat()
}
return r
| Python | 0.000016 |
b9faf604095e799b3c3cd1e6a98bb9a87c64340b | add unit tests for rds clone | test/unit/test_disco_rds.py | test/unit/test_disco_rds.py | """
Tests of disco_rds
"""
import unittest
from mock import MagicMock, patch
from disco_aws_automation.disco_rds import DiscoRDS
from disco_aws_automation.exceptions import RDSEnvironmentError
from test.helpers.patch_disco_aws import get_mock_config
TEST_ENV_NAME = 'unittestenv'
TEST_VPC_ID = 'vpc-56e10e3d' # the hard coded VPC Id that moto will always return
def _get_vpc_mock():
"""Nastily copied from test_disco_elb"""
vpc_mock = MagicMock()
vpc_mock.environment_name = TEST_ENV_NAME
vpc_mock.vpc = MagicMock()
vpc_mock.vpc.id = TEST_VPC_ID
return vpc_mock
def _get_key_mock(key_name):
if key_name == 'rds/db-name/master_user_password':
return 'database_name_key'
elif key_name == 'rds/unittestenv-db-id/master_user_password':
return 'database-id-key'
else:
raise KeyError("Key not found")
class DiscoRDSTests(unittest.TestCase):
"""Test DiscoRDS class"""
def setUp(self):
self.rds = DiscoRDS(_get_vpc_mock())
self.rds.client = MagicMock()
self.rds.get_rds_security_group_id = MagicMock(return_value='fake_security_id')
self.rds.config_rds = get_mock_config({
'some-env-db-name': {
'engine': 'oracle',
'allocated_storage': '100',
'db_instance_class': 'db.m4.2xlarge',
'engine_version': '12.1.0.2.v2',
'master_username': 'foo'
}
})
self.rds.domain_name = 'example.com'
def test_get_db_parameter_group_family(self):
"""Tests that get_db_parameter_group_family handles all the expected cases"""
self.assertEquals("postgresql9.3", self.rds.get_db_parameter_group_family("postgresql", "9.3.1"))
self.assertEquals("oracle-se2-12.1",
self.rds.get_db_parameter_group_family("oracle-se2", "12.1.0.2.v2"))
self.assertEquals("mysql123.5", self.rds.get_db_parameter_group_family("MySQL", "123.5"))
# pylint: disable=unused-argument
@patch('disco_aws_automation.DiscoS3Bucket.get_key', side_effect=_get_key_mock)
def test_get_master_password(self, get_key_mock):
"""test getting the master password for an instance using either the db name or id as the s3 key"""
self.assertEquals('database_name_key', self.rds.get_master_password(TEST_ENV_NAME, 'db-name'))
self.assertEquals('database-id-key', self.rds.get_master_password(TEST_ENV_NAME, 'db-id'))
# pylint: disable=unused-argument
@patch('disco_aws_automation.DiscoS3Bucket.get_key', side_effect=_get_key_mock)
def test_clone_existing_db(self, get_key_mock):
"""test that cloning throws an error when the destination db already exists"""
self.rds.client.describe_db_snapshots.return_value = {
'DBInstances': [{
'DBInstanceIdentifier': 'unittestenv-db-name'
}]
}
with(self.assertRaises(RDSEnvironmentError)):
self.rds.clone('some-env', 'db-name')
# pylint: disable=unused-argument
@patch('disco_aws_automation.disco_rds.DiscoRoute53')
@patch('disco_aws_automation.DiscoS3Bucket.get_key', side_effect=_get_key_mock)
def test_clone(self, get_key_mock, r53_mock):
"""test cloning a database"""
self.rds._get_db_instance = MagicMock(return_value=None)
self.rds.client.describe_db_snapshots.return_value = {
'DBSnapshots': [{
'DBSnapshotIdentifier': 'foo-snapshot'
}]
}
self.rds.client.describe_db_instances.return_value = {
'DBInstances': [{
'Endpoint': {
'Address': 'foo.example.com'
}
}]
}
self.rds.clone('some-env', 'db-name')
self.rds.client.restore_db_instance_from_db_snapshot.assert_called_once_with(
AutoMinorVersionUpgrade=True,
DBInstanceClass='db.m4.2xlarge',
DBInstanceIdentifier='unittestenv-db-name',
DBSnapshotIdentifier='foo-snapshot',
DBSubnetGroupName='unittestenv-db-name',
Engine='oracle',
Iops=0,
LicenseModel='bring-your-own-license',
MultiAZ=True,
Port=1521,
PubliclyAccessible=False)
r53_mock.return_value.create_record.assert_called_once_with('example.com',
'unittestenv-db-name.example.com.',
'CNAME',
'foo.example.com')
| """
Tests of disco_rds
"""
import unittest
from mock import MagicMock, patch
from disco_aws_automation.disco_rds import DiscoRDS
TEST_ENV_NAME = 'unittestenv'
TEST_VPC_ID = 'vpc-56e10e3d' # the hard coded VPC Id that moto will always return
def _get_vpc_mock():
"""Nastily copied from test_disco_elb"""
vpc_mock = MagicMock()
vpc_mock.environment_name = TEST_ENV_NAME
vpc_mock.vpc = MagicMock()
vpc_mock.vpc.id = TEST_VPC_ID
return vpc_mock
class DiscoRDSTests(unittest.TestCase):
"""Test DiscoRDS class"""
def test_get_db_parameter_group_family(self):
"""Tests that get_db_parameter_group_family handles all the expected cases"""
rds = DiscoRDS(_get_vpc_mock())
self.assertEquals("postgresql9.3", rds.get_db_parameter_group_family("postgresql", "9.3.1"))
self.assertEquals("oracle-se2-12.1", rds.get_db_parameter_group_family("oracle-se2", "12.1.0.2.v2"))
self.assertEquals("mysql123.5", rds.get_db_parameter_group_family("MySQL", "123.5"))
def test_get_master_password(self):
"""test getting the master password for an instance using either the db name or identifier"""
rds = DiscoRDS(_get_vpc_mock())
def _get_key_mock(key_name):
if key_name == 'rds/db-name/master_user_password':
return 'new-key'
elif key_name == 'rds/unittestenv-db-id/master_user_password':
return 'old-key'
else:
raise KeyError("Key not found")
with patch("disco_aws_automation.DiscoS3Bucket.get_key", side_effect=_get_key_mock):
self.assertEquals('new-key', rds.get_master_password(TEST_ENV_NAME, 'db-name'))
self.assertEquals('old-key', rds.get_master_password(TEST_ENV_NAME, 'db-id'))
if __name__ == '__main__':
unittest.main()
| Python | 0 |
d8556707aa3ab0bc89878e0b5daaaeb7b54616ae | Disable images | zephyr/lib/bugdown.py | zephyr/lib/bugdown.py | import re
import markdown
class Bugdown(markdown.Extension):
def extendMarkdown(self, md, md_globals):
del md.inlinePatterns['image_link']
del md.inlinePatterns['image_reference']
# We need to re-initialize the markdown engine every 30 messages
# due to some sort of performance leak in the markdown library.
MAX_MD_ENGINE_USES = 30
_md_engine = None
_use_count = 0
# A link starts after whitespace, and cannot contain spaces,
# end parentheses, or end brackets (which would confuse Markdown).
# FIXME: Use one of the actual linkification extensions.
_link_regex = re.compile(r'(\s|\A)(?P<url>https?://[^\s\])]+)')
# Pad heading markers to make Markdown ignore them
# FIXME: Write a real extension for the markdown library
_heading_regex = re.compile(r'^([#-=])', flags=re.MULTILINE)
def _linkify(match):
url = match.group('url')
return ' [%s](%s) ' % (url, url)
def convert(md):
"""Convert Markdown to HTML, with Humbug-specific settings and hacks."""
global _md_engine, _use_count
if _md_engine is None:
_md_engine = markdown.Markdown(
extensions = ['fenced_code', 'codehilite', 'nl2br', Bugdown()],
safe_mode = 'escape',
output_format = 'xhtml')
md = _heading_regex.sub(r' \1', md)
md = _link_regex.sub(_linkify, md)
try:
html = _md_engine.convert(md)
except:
# FIXME: Do something more reasonable here!
html = '<p>[Humbug note: Sorry, we could not understand the formatting of your message]</p>'
_use_count += 1
if _use_count >= MAX_MD_ENGINE_USES:
_md_engine = None
_use_count = 0
return html
| import re
import markdown
# We need to re-initialize the markdown engine every 30 messages
# due to some sort of performance leak in the markdown library.
MAX_MD_ENGINE_USES = 30
_md_engine = None
_use_count = 0
# A link starts after whitespace, and cannot contain spaces,
# end parentheses, or end brackets (which would confuse Markdown).
# FIXME: Use one of the actual linkification extensions.
_link_regex = re.compile(r'(\s|\A)(?P<url>https?://[^\s\])]+)')
# Pad heading markers to make Markdown ignore them
# FIXME: Write a real extension for the markdown library
_heading_regex = re.compile(r'^([#-=])', flags=re.MULTILINE)
def _linkify(match):
url = match.group('url')
return ' [%s](%s) ' % (url, url)
def convert(md):
"""Convert Markdown to HTML, with Humbug-specific settings and hacks."""
global _md_engine, _use_count
if _md_engine is None:
_md_engine = markdown.Markdown(
extensions = ['fenced_code', 'codehilite', 'nl2br'],
safe_mode = 'escape',
output_format = 'xhtml')
md = _heading_regex.sub(r' \1', md)
md = _link_regex.sub(_linkify, md)
try:
html = _md_engine.convert(md)
except:
# FIXME: Do something more reasonable here!
html = '<p>[Humbug note: Sorry, we could not understand the formatting of your message]</p>'
_use_count += 1
if _use_count >= MAX_MD_ENGINE_USES:
_md_engine = None
_use_count = 0
return html
| Python | 0 |
e4d32def2ef91518198e6a500908ea3839c43257 | Fix typo | cairis/data/DimensionDAO.py | cairis/data/DimensionDAO.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from cairis.core.ARM import *
from cairis.daemon.CairisHTTPError import CairisHTTPError, ARMHTTPError
from cairis.data.CairisDAO import CairisDAO
from cairis.core.MySQLDatabaseProxy import MySQLDatabaseProxy
from http.client import BAD_REQUEST, NOT_FOUND
__author__ = 'Shamal Faily'
class DimensionDAO(CairisDAO):
def __init__(self, session_id):
CairisDAO.__init__(self, session_id)
def getDimensions(self,table,id):
try:
permissableDimensions = ['access_right', 'architectural_pattern', 'asset', 'asset_reference', 'asset_type','attacker','attacker_reference', 'behavioural_variable', 'capability','characteristic_synopsis', 'component', 'concept_reference','connector', 'countermeasure','countermeasure_reference', 'countermeasure_value', 'datastore', 'detection_mechanism', 'dfd_filter', 'document_reference', 'domainproperty','domainproperty_reference', 'entity','environment', 'environment_reference','external_document', 'goal', 'goal_reference','goal_category_type','interface','likelihood','locations','misusability_case','misusecase','misusecase_reference','motivation','obstacle','obstacle_category_type','obstacle_reference','persona','persona_characteristic','persona_characteristic_synopsis','persona_implied_process','persona_reference','persona_type','priority_type', 'privilege', 'protocol', 'reference_synopsis','requirement', 'requirement_reference', 'requirement_type','response', 'response_reference', 'risk', 'risk_class','risk_reference','role', 'role_reference', 'role_type', 'securitypattern','severity', 'surface_type', 'task', 'task_characteristic', 'task_reference','template_asset', 'template_goal', 'template_requirement','trace_dimension','threat', 'threat_reference','threat_type', 'threat_value', 'usecase', 'vulnerability','vulnerability_reference', 'vulnerability_type']
if (table not in permissableDimensions):
raise CairisHTTPError(BAD_REQUEST,'Invalid dimension',table + ' is not a permissable dimension')
if (table == 'persona_characteristic_synopsis'):
return self.getDimensionNames(table,'')
else:
return sorted(self.db_proxy.getDimensions(table,id).keys())
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
def getDimensionNames(self,table,environment):
try:
permissableDimensions = ['asset','asset_value','attacker','countermeasure','datastore','detection_mechanism','dfd_filter','entity','goal','misusecase','obstacle','persona', 'requirement','response','risk','role','task','threat','usecase', 'vulnerability']
if (table not in permissableDimensions):
raise CairisHTTPError(BAD_REQUEST,'Invalid dimension',table + ' is not a permissable dimension when specifying environment')
if (self.db_proxy.nameExists(environment,'environment') == False):
raise CairisHTTPError(NOT_FOUND,'Unknown environment',environment + ' does not exist')
return self.db_proxy.getDimensionNames(table,environment)
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
| # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from cairis.core.ARM import *
from cairis.daemon.CairisHTTPError import CairisHTTPError, ARMHTTPError
from cairis.data.CairisDAO import CairisDAO
from cairis.core.MySQLDatabaseProxy import MySQLDatabaseProxy
from http.client import BAD_REQUEST, NOT_FOUND
__author__ = 'Shamal Faily'
class DimensionDAO(CairisDAO):
def __init__(self, session_id):
CairisDAO.__init__(self, session_id)
def getDimensions(self,table,id):
try:
permissableDimensions = ['access_right', 'architectural_pattern', 'asset', 'asset_reference', 'asset_type','attacker','attacker_reference', 'behavioural_variable', 'capability','characteristic_synopsis', 'component', 'concept_reference','connector', 'countermeasure' 'countermeasure_reference', 'countermeasure_value', 'datastore', 'detection_mechanism', 'dfd_filter', 'document_reference', 'domainproperty','domainproperty_reference', 'entity','environment', 'environment_reference','external_document', 'goal', 'goal_reference','goal_category_type','interface','likelihood','locations','misusability_case','misusecase','misusecase_reference','motivation','obstacle','obstacle_category_type','obstacle_reference','persona','persona_characteristic','persona_characteristic_synopsis','persona_implied_process','persona_reference','persona_type','priority_type', 'privilege', 'protocol', 'reference_synopsis','requirement', 'requirement_reference', 'requirement_type','response', 'response_reference', 'risk', 'risk_class','risk_reference','role', 'role_reference', 'role_type', 'securitypattern','severity', 'surface_type', 'task', 'task_characteristic', 'task_reference','template_asset', 'template_goal', 'template_requirement','trace_dimension','threat', 'threat_reference','threat_type', 'threat_value', 'usecase', 'vulnerability','vulnerability_reference', 'vulnerability_type']
if (table not in permissableDimensions):
raise CairisHTTPError(BAD_REQUEST,'Invalid dimension',table + ' is not a permissable dimension')
if (table == 'persona_characteristic_synopsis'):
return self.getDimensionNames(table,'')
else:
return sorted(self.db_proxy.getDimensions(table,id).keys())
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
def getDimensionNames(self,table,environment):
try:
permissableDimensions = ['asset','asset_value','attacker','countermeasure','datastore','detection_mechanism','dfd_filter','entity','goal','misusecase','obstacle','persona', 'requirement','response','risk','role','task','threat','usecase', 'vulnerability']
if (table not in permissableDimensions):
raise CairisHTTPError(BAD_REQUEST,'Invalid dimension',table + ' is not a permissable dimension when specifying environment')
if (self.db_proxy.nameExists(environment,'environment') == False):
raise CairisHTTPError(NOT_FOUND,'Unknown environment',environment + ' does not exist')
return self.db_proxy.getDimensionNames(table,environment)
except DatabaseProxyException as ex:
self.close()
raise ARMHTTPError(ex)
| Python | 0.999999 |
4c1d0877fabf3f95c488e58d2460a9ca2330b3eb | Support --host for devserver | zeus/cli/devserver.py | zeus/cli/devserver.py | import click
import os
import socket
import sys
from subprocess import list2cmdline
from honcho.manager import Manager
from .base import cli
DEFAULT_HOST_NAME = socket.gethostname().split(".", 1)[0].lower()
@cli.command()
@click.option("--environment", default="development", help="The environment name.")
@click.option("--workers/--no-workers", default=False)
@click.option("--host", "-h", default="127.0.0.1")
@click.option("--port", "-p", default=8080)
@click.option("--ngrok/--no-ngrok", default=False)
@click.option("--ngrok-domain", default="zeus-{}".format(DEFAULT_HOST_NAME))
@click.option("--pubsub/--no-pubsub", default=True)
@click.option("--pubsub-port", default=8090)
def devserver(
environment, workers, host, port, ngrok, ngrok_domain, pubsub, pubsub_port
):
os.environ.setdefault("FLASK_DEBUG", "1")
os.environ["NODE_ENV"] = environment
if pubsub:
os.environ["PUBSUB_ENDPOINT"] = "http://localhost:{}".format(pubsub_port)
if ngrok:
root_url = "https://{}.ngrok.io".format(ngrok_domain)
os.environ["SSL"] = "1"
os.environ["SERVER_NAME"] = "{}.ngrok.io".format(ngrok_domain)
else:
root_url = "http://{}:{}".format(host, port)
click.echo("Launching Zeus on {}".format(root_url))
# TODO(dcramer): pass required attributes to 'run' directly instead
# of relying on FLASK_DEBUG
daemons = [
("web", ["zeus", "run", "--host={}".format(host), "--port={}".format(port)]),
(
"webpack",
[
"node_modules/.bin/webpack",
"--watch",
"--config=config/webpack.config.js",
],
),
]
if pubsub:
daemons.append(
(
"pubsub",
[
"zeus",
"pubsub",
"--host={}".format(host),
"--port={}".format(pubsub_port),
],
)
)
if workers:
daemons.append(("worker", ["zeus", "worker", "--cron", "--log-level=INFO"]))
if ngrok:
daemons.append(
(
"ngrok",
["ngrok", "http", "-subdomain={}".format(ngrok_domain), str(port)],
)
)
cwd = os.path.realpath(
os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)
)
manager = Manager()
for name, cmd in daemons:
manager.add_process(name, list2cmdline(cmd), quiet=False, cwd=cwd)
manager.loop()
sys.exit(manager.returncode)
| import click
import os
import socket
import sys
from subprocess import list2cmdline
from honcho.manager import Manager
from .base import cli
DEFAULT_HOST_NAME = socket.gethostname().split(".", 1)[0].lower()
@cli.command()
@click.option("--environment", default="development", help="The environment name.")
@click.option("--workers/--no-workers", default=False)
@click.option("--port", "-p", default=8080)
@click.option("--ngrok/--no-ngrok", default=False)
@click.option("--ngrok-domain", default="zeus-{}".format(DEFAULT_HOST_NAME))
@click.option("--pubsub/--no-pubsub", default=True)
@click.option("--pubsub-port", default=8090)
def devserver(environment, workers, port, ngrok, ngrok_domain, pubsub, pubsub_port):
os.environ.setdefault("FLASK_DEBUG", "1")
os.environ["NODE_ENV"] = environment
if pubsub:
os.environ["PUBSUB_ENDPOINT"] = "http://localhost:{}".format(pubsub_port)
if ngrok:
root_url = "https://{}.ngrok.io".format(ngrok_domain)
os.environ["SSL"] = "1"
os.environ["SERVER_NAME"] = "{}.ngrok.io".format(ngrok_domain)
else:
root_url = "http://localhost:{}".format(port)
click.echo("Launching Zeus on {}".format(root_url))
# TODO(dcramer): pass required attributes to 'run' directly instead
# of relying on FLASK_DEBUG
daemons = [
("web", ["zeus", "run", "--port={}".format(port)]),
(
"webpack",
[
"node_modules/.bin/webpack",
"--watch",
"--config=config/webpack.config.js",
],
),
]
if pubsub:
daemons.append(("pubsub", ["zeus", "pubsub", "--port={}".format(pubsub_port)]))
if workers:
daemons.append(("worker", ["zeus", "worker", "--cron", "--log-level=INFO"]))
if ngrok:
daemons.append(
(
"ngrok",
["ngrok", "http", "-subdomain={}".format(ngrok_domain), str(port)],
)
)
cwd = os.path.realpath(
os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)
)
manager = Manager()
for name, cmd in daemons:
manager.add_process(name, list2cmdline(cmd), quiet=False, cwd=cwd)
manager.loop()
sys.exit(manager.returncode)
| Python | 0 |
1748d039feb40ddb1ceef0cf2f7b49270d0aae6e | Change to support Python3 | carmen/resolvers/profile.py | carmen/resolvers/profile.py | """Resolvers based on Twitter user profile data."""
import re
import warnings
from ..names import *
from ..resolver import AbstractResolver, register
STATE_RE = re.compile(r'.+,\s*(\w+)')
NORMALIZATION_RE = re.compile(r'\s+|\W')
def normalize(location_name, preserve_commas=False):
"""Normalize *location_name* by stripping punctuation and collapsing
runs of whitespace, and return the normalized name."""
def replace(match):
if preserve_commas and ',' in match.group(0):
return ','
return ' '
return NORMALIZATION_RE.sub(replace, location_name).strip().lower()
@register('profile')
class ProfileResolver(AbstractResolver):
"""A resolver that locates a tweet by matching the tweet author's
profile location against known locations."""
name = 'profile'
def __init__(self):
self.location_name_to_location = {}
def add_location(self, location):
aliases = list(location.aliases)
aliases_already_added = set()
for alias in aliases:
if alias in aliases_already_added:
continue
if alias in self.location_name_to_location:
warnings.warn('Duplicate location name "%s"' % alias)
else:
self.location_name_to_location[alias] = location
# Additionally add a normalized version of the alias
# stripped of punctuation, and with runs of whitespace
# reduced to single spaces.
normalized = normalize(alias)
if normalized != alias:
aliases.append(normalized)
aliases_already_added.add(alias)
def resolve_tweet(self, tweet):
import sys
location_string = tweet.get('user', {}).get('location', '')
if sys.version_info[0] < 3:
location_string = location_string.encode('utf-8')
if not location_string:
return None
normalized = normalize(location_string)
if normalized in self.location_name_to_location:
return (False, self.location_name_to_location[normalized])
# Try again with commas.
normalized = normalize(location_string, preserve_commas=True)
match = STATE_RE.search(normalized)
if match:
after_comma = match.group(1)
location_name = None
if after_comma in US_STATES or after_comma in COUNTRIES:
location_name = after_comma
elif after_comma in US_STATE_ABBREVIATIONS:
location_name = US_STATE_ABBREVIATIONS[after_comma]
elif after_comma in COUNTRY_CODES:
location_name = COUNTRY_CODES[after_comma]
if location_name in self.location_name_to_location:
return (False, self.location_name_to_location[location_name])
return None
| """Resolvers based on Twitter user profile data."""
import re
import warnings
from ..names import *
from ..resolver import AbstractResolver, register
STATE_RE = re.compile(r'.+,\s*(\w+)')
NORMALIZATION_RE = re.compile(r'\s+|\W')
def normalize(location_name, preserve_commas=False):
"""Normalize *location_name* by stripping punctuation and collapsing
runs of whitespace, and return the normalized name."""
def replace(match):
if preserve_commas and ',' in match.group(0):
return ','
return ' '
return NORMALIZATION_RE.sub(replace, location_name).strip().lower()
@register('profile')
class ProfileResolver(AbstractResolver):
"""A resolver that locates a tweet by matching the tweet author's
profile location against known locations."""
name = 'profile'
def __init__(self):
self.location_name_to_location = {}
def add_location(self, location):
aliases = list(location.aliases)
aliases_already_added = set()
for alias in aliases:
if alias in aliases_already_added:
continue
if alias in self.location_name_to_location:
warnings.warn('Duplicate location name "%s"' % alias)
else:
self.location_name_to_location[alias] = location
# Additionally add a normalized version of the alias
# stripped of punctuation, and with runs of whitespace
# reduced to single spaces.
normalized = normalize(alias)
if normalized != alias:
aliases.append(normalized)
aliases_already_added.add(alias)
def resolve_tweet(self, tweet):
location_string = tweet.get('user', {}).get('location', '')
if not location_string:
return None
normalized = normalize(location_string)
if normalized in self.location_name_to_location:
return (False, self.location_name_to_location[normalized])
# Try again with commas.
normalized = normalize(location_string, preserve_commas=True)
match = STATE_RE.search(normalized)
if match:
after_comma = match.group(1)
location_name = None
if after_comma in US_STATES or after_comma in COUNTRIES:
location_name = after_comma
elif after_comma in US_STATE_ABBREVIATIONS:
location_name = US_STATE_ABBREVIATIONS[after_comma]
elif after_comma in COUNTRY_CODES:
location_name = COUNTRY_CODES[after_comma]
if location_name in self.location_name_to_location:
return (False, self.location_name_to_location[location_name])
return None
| Python | 0 |
d9b7be65aae78a76454cae4f1f75029f1fa5084b | rename mapper to mapfxn to avoid confusion with mrjob.MRJob.mapper() | specializers/ftdock/cloud_dock.py | specializers/ftdock/cloud_dock.py | from mrjob.protocol import PickleProtocol as protocol
from asp.jit import mapreduce_support as mr
import cPickle as pickle
class FtdockMRJob(mr.AspMRJob):
DEFAULT_INPUT_PROTOCOL = 'pickle'
DEFAULT_PROTOCOL = 'pickle'
def configure_options(self):
super(mr.AspMRJob, self).configure_options()
self.add_file_option('--ftdockargs')
def job_runner_kwargs(self):
config = super(mr.AspMRJob, self).job_runner_kwargs()
config['file_upload_args'] += [('--ftdockargs', "/Users/driscoll/sejits/cloud_ftdock/pickled_args")]
config['cmdenv']['PYTHONPATH'] = "/Users/driscoll/sejits/asp:/Users/driscoll/sejits/ftdock_v2.0"
return config
def mapper(self, key, value):
"""
Each mapper executes ftdock for a combination (qi, qj, qk)
"""
from ftdock_main import ftdock
arguments = pickle.load(open('pickled_args'))
geometry_res = ftdock(key[0], key[1], key[2], *arguments)
yield 1, geometry_res
def reducer(self, key, values):
"""
The reducer just emits the list of geometries
"""
result = []
for temp in values:
result.append(temp)
yield 1, result
class AllCombMap(object):
def __init__(self, lists_to_combine, *ftdock_args):
self._lists_to_combine = lists_to_combine
self._ftdock_args = ftdock_args
def execute(self, nproc=1):
cloud_flag = True
mapfxn = self.ftdock_using_mapreduce if cloud_flag else self.ftdock_classic
return mapfxn(self._lists_to_combine, self._ftdock_args)
def ftdock_using_mapreduce(self, lists_to_combine, ftdock_args):
"""
Perform docking experiment using MapReduce
"""
print "Map-Reduce execution"
# Dump the ftdock_args in a file
pickle.dump(ftdock_args, open('pickled_args','w'))
# Add a map task for each point in the search space
import itertools
task_args = [protocol.write(x, "") for x in itertools.product(*lists_to_combine)]
import asp.jit.asp_module as asp_module
mod = asp_module.ASPModule(use_mapreduce=True)
mod.add_mr_function("ftdock_mr", FtdockMRJob)
kv_pairs = mod.ftdock_mr(task_args)
return kv_pairs[0][1]
def ftdock_classic(self, lists_to_combine, ftdock_args):
"""
Perform docking experiment using AllCombMap
"""
raise NotImplementedError
"""
print "Classic execution"
geometry_list = AllCombMap(lists_to_combine, ftdock, *ftdock_args).execute(nproc=2)
return geometry_list
"""
# this appears to be necessary because this script will be called as __main__ on
# every worker node.
if __name__ == '__main__':
FtdockMRJob().run()
| from mrjob.protocol import PickleProtocol as protocol
from asp.jit import mapreduce_support as mr
import cPickle as pickle
class FtdockMRJob(mr.AspMRJob):
DEFAULT_INPUT_PROTOCOL = 'pickle'
DEFAULT_PROTOCOL = 'pickle'
def configure_options(self):
super(mr.AspMRJob, self).configure_options()
self.add_file_option('--ftdockargs')
def job_runner_kwargs(self):
config = super(mr.AspMRJob, self).job_runner_kwargs()
config['file_upload_args'] += [('--ftdockargs', "/Users/driscoll/sejits/cloud_ftdock/pickled_args")]
config['cmdenv']['PYTHONPATH'] = "/Users/driscoll/sejits/asp:/Users/driscoll/sejits/ftdock_v2.0"
return config
def mapper(self, key, value):
"""
Each mapper executes ftdock for a combination (qi, qj, qk)
"""
from ftdock_main import ftdock
arguments = pickle.load(open('pickled_args'))
geometry_res = ftdock(key[0], key[1], key[2], *arguments)
yield 1, geometry_res
def reducer(self, key, values):
"""
The reducer just emits the list of geometries
"""
result = []
for temp in values:
result.append(temp)
yield 1, result
class AllCombMap(object):
def __init__(self, lists_to_combine, *ftdock_args):
self._lists_to_combine = lists_to_combine
self._ftdock_args = ftdock_args
def execute(self, nproc=1):
cloud_flag = True
mapper = self.ftdock_using_mapreduce if cloud_flag else self.ftdock_classic
return mapper(self._lists_to_combine, self._ftdock_args)
def ftdock_using_mapreduce(self, lists_to_combine, ftdock_args):
"""
Perform docking experiment using MapReduce
"""
print "Map-Reduce execution"
# Dump the ftdock_args in a file
pickle.dump(ftdock_args, open('pickled_args','w'))
# Add a map task for each point in the search space
import itertools
task_args = [protocol.write(x, "") for x in itertools.product(*lists_to_combine)]
import asp.jit.asp_module as asp_module
mod = asp_module.ASPModule(use_mapreduce=True)
mod.add_mr_function("ftdock_mr", FtdockMRJob)
kv_pairs = mod.ftdock_mr(task_args)
return kv_pairs[0][1]
def ftdock_classic(self, lists_to_combine, ftdock_args):
"""
Perform docking experiment using AllCombMap
"""
raise NotImplementedError
"""
print "Classic execution"
geometry_list = AllCombMap(lists_to_combine, ftdock, *ftdock_args).execute(nproc=2)
return geometry_list
"""
# this appears to be necessary because this script will be called as __main__ on
# every worker node.
if __name__ == '__main__':
FtdockMRJob().run()
| Python | 0 |
c8a280d6466623b8d76fa01c12ebf295151d35d6 | remove primary key constraint | wim-adaptor/vtn-api/database/sqlalchemy_declaritive.py | wim-adaptor/vtn-api/database/sqlalchemy_declaritive.py | import os
import sys
from sqlalchemy import create_engine, Column, ForeignKey, Integer, String
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Connectivity(Base):
__tablename__ = 'connectivity'
# define the columns for the table
segment = Column(String(250),nullable=False)
bridge_name = Column(String(250), nullable=False)
port_id = Column(String(250))
location = Column(String(250))
# Create engine that stores data in the local directory's
engine = create_engine('sqlite:///wim_info.db')
# Create the Table
Base.metadata.create_all(engine)
| import os
import sys
from sqlalchemy import create_engine, Column, ForeignKey, Integer, String
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Connectivity(Base):
__tablename__ = 'connectivity'
# define the columns for the table
segment = Column(String(250),nullable=False)
bridge_name = Column(String(250), nullable=False)
port_id = Column(String(250),primary_key=True)
location = Column(String(250))
# Create engine that stores data in the local directory's
engine = create_engine('sqlite:///wim_info.db')
# Create the Table
Base.metadata.create_all(engine)
| Python | 0.000384 |
f984db30c4d4cab1377d21a73ec0b802590f8a51 | Update sqlalchemy migrate scripts for postgres | trove/db/sqlalchemy/migrate_repo/versions/014_update_instance_flavor_id.py | trove/db/sqlalchemy/migrate_repo/versions/014_update_instance_flavor_id.py | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy.schema import MetaData
from trove.db.sqlalchemy.migrate_repo.schema import Integer
from trove.db.sqlalchemy.migrate_repo.schema import String
from trove.db.sqlalchemy.migrate_repo.schema import Table
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# pgsql <= 8.3 was lax about char->other casting but this was tightened up
# in 8.4+. We now have to specify the USING clause for the cast to succeed.
# NB: The generated sqlalchemy query doesn't support this, so this override
# is needed.
if migrate_engine.name == 'postgresql':
migrate_engine.execute('ALTER TABLE instances ALTER COLUMN flavor_id '
'TYPE INTEGER USING flavor_id::integer')
else:
instances = Table('instances', meta, autoload=True)
#modify column
instances.c.flavor_id.alter(type=Integer())
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# int->char casts in pgsql still work fine without any USING clause,
# so downgrade is not affected.
# modify column:
instances = Table('instances', meta, autoload=True)
instances.c.flavor_id.alter(type=String(36))
| # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy.schema import MetaData
from trove.db.sqlalchemy.migrate_repo.schema import Integer
from trove.db.sqlalchemy.migrate_repo.schema import String
from trove.db.sqlalchemy.migrate_repo.schema import Table
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
#modify column
instances.c.flavor_id.alter(type=Integer())
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# modify column:
instances = Table('instances', meta, autoload=True)
instances.c.flavor_id.alter(type=String(36))
| Python | 0.000001 |
dbe1ac7fda9188e59479ff4716141651d627f76c | Fix cheroot.test.test_errors doc spelling | cheroot/test/test_errors.py | cheroot/test/test_errors.py | """Test suite for ``cheroot.errors``."""
import pytest
from cheroot import errors
from .._compat import IS_LINUX, IS_MACOS, IS_WINDOWS
@pytest.mark.parametrize(
'err_names,err_nums',
(
(('', 'some-nonsense-name'), []),
(
(
'EPROTOTYPE', 'EAGAIN', 'EWOULDBLOCK',
'WSAEWOULDBLOCK', 'EPIPE',
),
(91, 11, 32) if IS_LINUX else
(32, 35, 41) if IS_MACOS else
(32, 10041, 11, 10035) if IS_WINDOWS else
(),
),
),
)
def test_plat_specific_errors(err_names, err_nums):
"""Test that ``plat_specific_errors`` gets correct error numbers list."""
actual_err_nums = errors.plat_specific_errors(*err_names)
assert len(actual_err_nums) == len(err_nums)
assert sorted(actual_err_nums) == sorted(err_nums)
| """Test suite for ``cheroot.errors``."""
import pytest
from cheroot import errors
from .._compat import IS_LINUX, IS_MACOS, IS_WINDOWS
@pytest.mark.parametrize(
'err_names,err_nums',
(
(('', 'some-nonsense-name'), []),
(
(
'EPROTOTYPE', 'EAGAIN', 'EWOULDBLOCK',
'WSAEWOULDBLOCK', 'EPIPE',
),
(91, 11, 32) if IS_LINUX else
(32, 35, 41) if IS_MACOS else
(32, 10041, 11, 10035) if IS_WINDOWS else
(),
),
),
)
def test_plat_specific_errors(err_names, err_nums):
"""Test that plat_specific_errors retrieves correct err num list."""
actual_err_nums = errors.plat_specific_errors(*err_names)
assert len(actual_err_nums) == len(err_nums)
assert sorted(actual_err_nums) == sorted(err_nums)
| Python | 0.000442 |
423a15d7c8841b40bddbd129b2abfb1135f0b7c0 | fix date parsing in logsearch | scripts/logfetch/search.py | scripts/logfetch/search.py | import os
import re
import sys
import fnmatch
import logfetch_base
from termcolor import colored
def find_cached_logs(args):
matching_logs = []
log_fn_match = get_matcher(args)
for filename in os.listdir(args.dest):
if fnmatch.fnmatch(filename, log_fn_match) and in_date_range(args, filename):
if args.verbose:
sys.stderr.write(colored('Including log {0}\n'.format(filename), 'magenta'))
matching_logs.append('{0}/{1}'.format(args.dest, filename))
else:
if args.verbose:
sys.stderr.write(colored('Excluding log {0}, not in date range\n'.format(filename), 'magenta'))
return matching_logs
def in_date_range(args, filename):
timestamps = re.findall(r"-\d{13}-", filename)
if timestamps:
return logfetch_base.is_in_date_range(args, int(str(timestamps[-1]).replace("-", "")[0:-3]))
else:
return True
def get_matcher(args):
if args.taskId:
if 'filename' in args.file_pattern and args.logtype:
return '{0}*{1}*'.format(args.taskId, args.logtype)
else:
return '{0}*'.format(args.taskId)
elif args.deployId and args.requestId:
if 'filename' in args.file_pattern and args.logtype:
return '{0}-{1}*{2}*'.format(args.requestId, args.deployId, args.logtype)
else:
return '{0}-{1}*'.format(args.requestId, args.deployId)
else:
if 'filename' in args.file_pattern and args.logtype:
return '{0}*{1}*'.format(args.requestId, args.logtype)
else:
return '{0}*'.format(args.requestId) | import os
import re
import sys
import fnmatch
import logfetch_base
from termcolor import colored
def find_cached_logs(args):
matching_logs = []
log_fn_match = get_matcher(args)
for filename in os.listdir(args.dest):
if fnmatch.fnmatch(filename, log_fn_match) and in_date_range(args, filename):
if args.verbose:
sys.stderr.write(colored('Including log {0}\n'.format(filename), 'magenta'))
matching_logs.append('{0}/{1}'.format(args.dest, filename))
else:
if args.verbose:
sys.stderr.write(colored('Excluding log {0}, not in date range\n'.format(filename), 'magenta'))
return matching_logs
def in_date_range(args, filename):
timestamps = re.findall(r"\d{13}", filename)
if timestamps:
return logfetch_base.is_in_date_range(args, int(str(timestamps[-1])[0:-3]))
else:
return True
def get_matcher(args):
if args.taskId:
if 'filename' in args.file_pattern and args.logtype:
return '{0}*{1}*'.format(args.taskId, args.logtype)
else:
return '{0}*'.format(args.taskId)
elif args.deployId and args.requestId:
if 'filename' in args.file_pattern and args.logtype:
return '{0}-{1}*{2}*'.format(args.requestId, args.deployId, args.logtype)
else:
return '{0}-{1}*'.format(args.requestId, args.deployId)
else:
if 'filename' in args.file_pattern and args.logtype:
return '{0}*{1}*'.format(args.requestId, args.logtype)
else:
return '{0}*'.format(args.requestId) | Python | 0.000005 |
fc1d468d6602022405d4959ea8d12c825a1916f0 | Add AuthToken model | passwordless/models.py | passwordless/models.py | from datetime import timedelta
import uuid
from django.db import models
from django.utils import timezone
# Create your models here.
class User(models.Model):
"""
User model
This User model eschews passwords, relying instead on emailed OTP tokens.
"""
username = models.CharField(max_length=30, unique=True)
email = models.EmailField(null=True)
is_active = models.BooleanField(default=False)
is_superuser = models.BooleanField(default=False)
date_joined = models.DateTimeField(auto_now_add=True)
@property
def is_authenticated(self):
#Used to tell authenticated Users from anonymous ones
return True
@property
def is_anonymous(self):
#This is not an anonymous user
return False
def __str__(self):
return self.username
class AnonymousUser:
"""
An object to represent an anonymous/unauthenticated user
"""
username = ''
email = None
is_active = False
is_superuser = False
date_joined = None
@property
def is_authenticated(self):
#Anonymous sessions are not authenticated
return False
@property
def is_anonymous(self):
return True
def __str__(self):
return "Anonymous User"
def make_token():
"""
Generate a random token suitable for activation/confirmation via email
A hex-encoded random UUID has plenty of entropy to be secure enough for our
needs.
"""
return uuid.uuid4().hex
class AuthToken(models.Model):
"""
OTP Token for passwordless authentication
"""
user = models.OneToOneField(User, primary_key=True)
token = models.CharField(max_length=40, default=make_token)
date_sent = models.DateTimeField(default=timezone.now)
_expiration_hours = 24
@property
def expiration_date(self):
return self.date_sent + timedelta(hours=self._expiration_hours)
@property
def is_valid(self):
return self.expiration_date >= timezone.now()
| from django.db import models
# Create your models here.
class User(models.Model):
"""
User model
This User model eschews passwords, relying instead on emailed OTP tokens.
"""
username = models.CharField(max_length=30, unique=True)
email = models.EmailField(null=True)
is_active = models.BooleanField(default=False)
is_superuser = models.BooleanField(default=False)
date_joined = models.DateTimeField(auto_now_add=True)
@property
def is_authenticated(self):
#Used to tell authenticated Users from anonymous ones
return True
@property
def is_anonymous(self):
#This is not an anonymous user
return False
def __str__(self):
return self.username
class AnonymousUser:
"""
An object to represent an anonymous/unauthenticated user
"""
username = ''
email = None
is_active = False
is_superuser = False
date_joined = None
@property
def is_authenticated(self):
#Anonymous sessions are not authenticated
return False
@property
def is_anonymous(self):
return True
def __str__(self):
return "Anonymous User"
| Python | 0 |
d9e9e4e9cce22c608ed39b0db1d5edc7ae277332 | Correct metaclass implementation | patchboard/resource.py | patchboard/resource.py | # resource.py
#
# Copyright 2014 BitVault.
from __future__ import print_function
import json
from exception import PatchboardError
class ResourceType(type):
"""A metaclass for resource classes."""
# Must override to supply default arguments
def __new__(cls, name, patchboard, definition, schema, mapping):
return type.__new__(cls, name, (Resource,), {})
def __init__(cls, name, patchboard, definition, schema, mapping):
setattr(cls, 'api', classmethod(lambda(self_): patchboard.api))
setattr(cls, 'schema', classmethod(lambda(self_): mapping))
if schema:
if u'properties' in schema:
for name, schema_def in schema[u'properties'].iteritems():
setattr(
cls,
name,
lambda(self): self.attributes[name])
if schema.get(u'additionalProperties', False) is not False:
# FIXME: doesn't take the block the ruby code does
def fn(self, name, *args):
if len(args) == 0:
return self.attributes[name]
else:
return super(cls, self).method_missing(name, *args)
setattr(cls, 'method_missing', fn)
setattr(
cls,
'generate_url',
classmethod(
lambda(self_, params): mapping.generate_url(params)))
for name, action in definition[u'actions'].iteritems():
# FIXME: create actions
# FIXME: implement correctly
setattr(cls, name, lambda(self): False)
# Must be called last
super(ResourceType, cls).__init__(name, (Resource,), {})
class Resource(object):
"""Base class for resources"""
@classmethod
def decorate(cls, instance, attributes):
# TODO: non destructive decoration
# TODO: add some sort of validation for the input attributes.
if cls.schema and u'properties' in cls.schema:
context = instance.context
properties = cls.schema[u'properties']
for key, sub_schema in properties.iteritems():
if key not in attributes:
next
value = attributes[key]
mapping = cls.api.find_mapping(sub_schema)
if mapping:
if mapping.query:
# TODO: find a way to define this at runtime,
# not once for every instance.
def fn(self, params={}):
params[u'url'] = value[u'url']
url = mapping.generate_url(params)
return mapping.cls(context, {u'url': url})
setattr(instance, key, fn)
else:
attributes[key] = mapping.cls(context, value)
else:
attributes[key] = cls.api.decorate(
context,
sub_schema,
value)
return attributes
def __init__(self, context, attributes={}):
self.context = context
self.attributes = Resource.decorate(self, attributes)
self.url = self.attributes[u'url']
# TODO: implement
#def __str__(self):
def __len__(self):
return len(self.attributes)
def __getitem__(self, key):
return self.attributes[key]
def __setitem__(self, key, value):
self.attributes[key] = value
#def __delitem__(self, key):
# del self.attributes[key]
def __contains__(self, obj):
return (obj in self.attributes)
def curl(self):
raise PatchboardError(u"Resource.curl() not implemented")
def to_hash(self):
return self.attributes
def to_json(self):
return json.generate(self.attributes)
| # resource.py
#
# Copyright 2014 BitVault.
from __future__ import print_function
import json
from exception import PatchboardError
class ResourceType(type):
"""A metaclass for resource classes."""
# Must override to supply default arguments
def __new__(cls, name, patchboard, definition, schema, mapping):
return type.__new__(cls, name, (Resource,), {})
def __init__(cls, name, patchboard, definition, schema, mapping):
super(ResourceType, cls).__init__(name, (Resource,), {})
setattr(cls, 'api', classmethod(lambda(self_): patchboard.api))
setattr(cls, 'schema', classmethod(lambda(self_): mapping))
if schema:
if u'properties' in schema:
for name, schema_def in schema[u'properties'].iteritems():
setattr(
cls,
name,
lambda(self): self.attributes[name])
if schema.get(u'additionalProperties', False) is not False:
# FIXME: doesn't take the block the ruby code does
def fn(self, name, *args):
if len(args) == 0:
return self.attributes[name]
else:
return super(cls, self).method_missing(name, *args)
setattr(cls, 'method_missing', fn)
setattr(
cls,
'generate_url',
classmethod(
lambda(self_, params): mapping.generate_url(params)))
for name, action in definition[u'actions'].iteritems():
# FIXME: create actions
# FIXME: implement correctly
setattr(cls, name, lambda(self): False)
class Resource(object):
"""Base class for resources"""
@classmethod
def decorate(cls, instance, attributes):
# TODO: non destructive decoration
# TODO: add some sort of validation for the input attributes.
if cls.schema and u'properties' in cls.schema:
context = instance.context
properties = cls.schema[u'properties']
for key, sub_schema in properties.iteritems():
if key not in attributes:
next
value = attributes[key]
mapping = cls.api.find_mapping(sub_schema)
if mapping:
if mapping.query:
# TODO: find a way to define this at runtime,
# not once for every instance.
def fn(self, params={}):
params[u'url'] = value[u'url']
url = mapping.generate_url(params)
return mapping.cls(context, {u'url': url})
setattr(instance, key, fn)
else:
attributes[key] = mapping.cls(context, value)
else:
attributes[key] = cls.api.decorate(
context,
sub_schema,
value)
return attributes
def __init__(self, context, attributes={}):
self.context = context
self.attributes = Resource.decorate(self, attributes)
self.url = self.attributes[u'url']
# TODO: implement
#def __str__(self):
def __len__(self):
return len(self.attributes)
def __getitem__(self, key):
return self.attributes[key]
def __setitem__(self, key, value):
self.attributes[key] = value
#def __delitem__(self, key):
# del self.attributes[key]
def __contains__(self, obj):
return (obj in self.attributes)
def curl(self):
raise PatchboardError(u"Resource.curl() not implemented")
def to_hash(self):
return self.attributes
def to_json(self):
return json.generate(self.attributes)
| Python | 0.000002 |
5114bf3960b944c193c37ef8ecbcac50ae098d02 | Add InvalidLengthError class | pathvalidate/_error.py | pathvalidate/_error.py | # encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <gogogo.vm@gmail.com>
"""
from __future__ import absolute_import
from __future__ import unicode_literals
class NullNameError(ValueError):
"""
Raised when a name is empty.
"""
class InvalidCharError(ValueError):
"""
Raised when includes invalid character(s) within a string.
"""
class InvalidLengthError(ValueError):
"""
Raised when a string too long/short.
"""
| # encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <gogogo.vm@gmail.com>
"""
from __future__ import absolute_import
from __future__ import unicode_literals
class NullNameError(ValueError):
"""
Raised when a name is empty.
"""
class InvalidCharError(ValueError):
"""
Raised when includes invalid character(s) within a string.
"""
| Python | 0 |
b479491e914c271a41ba92c958c6e3d42ccdb799 | add get_followers to twitter api | polbotcheck/twitter_api.py | polbotcheck/twitter_api.py | import tweepy
import json
from keys import myauth
import pprint
import time
import db
auth = tweepy.OAuthHandler(myauth['consumer_key'], myauth['consumer_secret'])
auth.set_access_token(myauth['access_token'], myauth['access_token_secret'] )
api = tweepy.API(auth,wait_on_rate_limit=True, wait_on_rate_limit_notify=True,\
retry_count=3, retry_delay=5, retry_errors=set([401, 404, 500, 503]))
def limit_handled(cursor):
while True:
try:
yield cursor.next()
except tweepy.RateLimitError:
timestamp = time.strftime("%d.%m.%Y %H:%M:%S", time.localtime())
print('Warning: Rate limit reached!' + timestamp)
time.sleep(15 * 60)
def get_tweets(screen_name):
timestamp = time.strftime("%d.%m.%Y %H:%M:%S", time.localtime())
print(timestamp)
content = []
for tweet in limit_handled(tweepy.Cursor(api.user_timeline, id=screen_name, count=200).items()):
content.append(tweet.text)
retweets = get_retweets(tweet.id)
db.saveRetweets(tweet, retweets)
return content
# def get_all_retweeters(screen_name):
# timestamp = time.strftime("%d.%m.%Y %H:%M:%S", time.localtime())
# print(timestamp)
# all_retweeters = []
# for tweet in limit_handled(tweepy.Cursor(api.user_timeline, id=screen_name, count=200).items()):
# print(tweet.id)
# retweeters = get_retweets(tweet.id)
# # somehow get to retweeters
# # all_retweeters.append(retweeters_per_tweet)
# return all_retweeters
def get_retweets(tweet_id):
timestamp = time.strftime("%d.%m.%Y %H:%M:%S", time.localtime())
print(timestamp)
content = []
for tweet in api.retweets(id=tweet_id, count=200):
content.append(tweet)
return content
def get_followers(screen_name):
timestamp = time.strftime("%d.%m.%Y %H:%M:%S", time.localtime())
print(timestamp)
followers =[]
for user in limit_handled(tweepy.Cursor(twitter_api.followers, screen_name=screen_name, count=200).items()):
followers.append(user)
return followers
if __name__ == "__main__":
# example to get list all tweets (text)
#tweets_save = True
name = '@malechanissen'
content = get_tweets(name)
# if tweets_save == True:
# with open('sample_tweets.json', 'w') as json_out:
# json.dump(content, json_out)
# print('samples have been saved')
# example get user_ids of who retweeted tweet with specific id
#retweeters_save = False
#status_id = '837968136074891264'
#retweets = get_retweets(status_id)
#print(retweets)
#
#db.saveRetweets()
#if retweeters_save == True:
# with open('retweeters.json', 'w') as json_out:
# json.dump(retweeters, json_out)
# example to get all retweeters associated with an user
# print(get_all_retweeters(screen_name=name))
| import tweepy
import json
from keys import myauth
import pprint
import time
import db
auth = tweepy.OAuthHandler(myauth['consumer_key'], myauth['consumer_secret'])
auth.set_access_token(myauth['access_token'], myauth['access_token_secret'] )
api = tweepy.API(auth,wait_on_rate_limit=True, wait_on_rate_limit_notify=True,\
retry_count=3, retry_delay=5, retry_errors=set([401, 404, 500, 503]))
def limit_handled(cursor):
while True:
try:
yield cursor.next()
except tweepy.RateLimitError:
timestamp = time.strftime("%d.%m.%Y %H:%M:%S", time.localtime())
print('Warning: Rate limit reached!' + timestamp)
time.sleep(15 * 60)
def get_tweets(screen_name):
timestamp = time.strftime("%d.%m.%Y %H:%M:%S", time.localtime())
print(timestamp)
content = []
for tweet in limit_handled(tweepy.Cursor(api.user_timeline, id=screen_name, count=200).items()):
content.append(tweet.text)
retweets = get_retweets(tweet.id)
db.saveRetweets(tweet, retweets)
return content
# def get_all_retweeters(screen_name):
# timestamp = time.strftime("%d.%m.%Y %H:%M:%S", time.localtime())
# print(timestamp)
# all_retweeters = []
# for tweet in limit_handled(tweepy.Cursor(api.user_timeline, id=screen_name, count=200).items()):
# print(tweet.id)
# retweeters = get_retweets(tweet.id)
# # somehow get to retweeters
# # all_retweeters.append(retweeters_per_tweet)
# return all_retweeters
def get_retweets(tweet_id):
timestamp = time.strftime("%d.%m.%Y %H:%M:%S", time.localtime())
print(timestamp)
content = []
for tweet in api.retweets(id=tweet_id, count=200):
content.append(tweet)
return content
if __name__ == "__main__":
# example to get list all tweets (text)
#tweets_save = True
name = '@malechanissen'
content = get_tweets(name)
# if tweets_save == True:
# with open('sample_tweets.json', 'w') as json_out:
# json.dump(content, json_out)
# print('samples have been saved')
# example get user_ids of who retweeted tweet with specific id
#retweeters_save = False
#status_id = '837968136074891264'
#retweets = get_retweets(status_id)
#print(retweets)
#
#db.saveRetweets()
#if retweeters_save == True:
# with open('retweeters.json', 'w') as json_out:
# json.dump(retweeters, json_out)
# example to get all retweeters associated with an user
# print(get_all_retweeters(screen_name=name))
| Python | 0 |
41df2254187bd895e1884563ac0cc3a4353ced5b | use string types instead of unicode in web.querystring | circuits/web/querystring.py | circuits/web/querystring.py | # -*- coding: utf-8 -*-
try:
from urlparse import parse_qsl
except ImportError:
from urllib.parse import parse_qsl # NOQA
from circuits.six import iteritems, string_types
def parse(data):
obj = QueryStringParser(data)
return obj.result
class QueryStringToken(object):
ARRAY = "ARRAY"
OBJECT = "OBJECT"
KEY = "KEY"
class QueryStringParser(object):
def __init__(self, data):
self.result = {}
if isinstance(data, string_types):
sorted_pairs = self._sorted_from_string(data)
else:
sorted_pairs = self._sorted_from_obj(data)
[self.process(x) for x in sorted_pairs]
def _sorted_from_string(self, data):
stage1 = parse_qsl(data)
stage2 = [(x[0].strip(), x[1].strip()) for x in stage1]
return sorted(stage2, key=lambda p: p[0])
def _sorted_from_obj(self, data):
# data is a list of the type generated by parse_qsl
if isinstance(data, list):
items = data
else:
# complex objects:
try:
# django.http.QueryDict,
items = [(i[0], j) for i in data.lists() for j in i[1]]
except AttributeError:
# webob.multidict.MultiDict
# werkzeug.datastructures.MultiDict
items = iteritems(data)
return sorted(items, key=lambda p: p[0])
def process(self, pair):
key = pair[0]
value = pair[1]
#faster than invoking a regex
try:
key.index("[")
self.parse(key, value)
return
except ValueError:
pass
try:
key.index(".")
self.parse(key, value)
return
except ValueError:
pass
self.result[key] = value
def parse(self, key, value):
ref = self.result
tokens = self.tokens(key)
for token in tokens:
token_type, key = token
if token_type == QueryStringToken.ARRAY:
if key not in ref:
ref[key] = []
ref = ref[key]
elif token_type == QueryStringToken.OBJECT:
if key not in ref:
ref[key] = {}
ref = ref[key]
elif token_type == QueryStringToken.KEY:
try:
ref = ref[key]
next(tokens)
# TypeError is for pet[]=lucy&pet[]=ollie
# if the array key is empty a type error will be raised
except (IndexError, KeyError, TypeError):
# the index didn't exist
# so we look ahead to see what we are setting
# there is not a next token
# set the value
try:
next_token = next(tokens)
if next_token[0] == QueryStringToken.ARRAY:
ref.append([])
ref = ref[key]
elif next_token[0] == QueryStringToken.OBJECT:
try:
ref[key] = {}
except IndexError:
ref.append({})
ref = ref[key]
except StopIteration:
try:
ref.append(value)
except AttributeError:
ref[key] = value
return
def tokens(self, key):
buf = ""
for char in key:
if char == "[":
yield QueryStringToken.ARRAY, buf
buf = ""
elif char == ".":
yield QueryStringToken.OBJECT, buf
buf = ""
elif char == "]":
try:
yield QueryStringToken.KEY, int(buf)
buf = ""
except ValueError:
yield QueryStringToken.KEY, None
else:
buf = buf + char
if len(buf) > 0:
yield QueryStringToken.KEY, buf
else:
raise StopIteration()
| # -*- coding: utf-8 -*-
try:
from urlparse import parse_qsl
except ImportError:
from urllib.parse import parse_qsl # NOQA
from circuits.six import iteritems, text_type
def parse(data):
obj = QueryStringParser(data)
return obj.result
class QueryStringToken(object):
ARRAY = "ARRAY"
OBJECT = "OBJECT"
KEY = "KEY"
class QueryStringParser(object):
def __init__(self, data):
self.result = {}
if isinstance(data, text_type):
sorted_pairs = self._sorted_from_string(data)
else:
sorted_pairs = self._sorted_from_obj(data)
[self.process(x) for x in sorted_pairs]
def _sorted_from_string(self, data):
stage1 = parse_qsl(data)
stage2 = [(x[0].strip(), x[1].strip()) for x in stage1]
return sorted(stage2, key=lambda p: p[0])
def _sorted_from_obj(self, data):
# data is a list of the type generated by parse_qsl
if isinstance(data, list):
items = data
else:
# complex objects:
try:
# django.http.QueryDict,
items = [(i[0], j) for i in data.lists() for j in i[1]]
except AttributeError:
# webob.multidict.MultiDict
# werkzeug.datastructures.MultiDict
items = iteritems(data)
return sorted(items, key=lambda p: p[0])
def process(self, pair):
key = pair[0]
value = pair[1]
#faster than invoking a regex
try:
key.index("[")
self.parse(key, value)
return
except ValueError:
pass
try:
key.index(".")
self.parse(key, value)
return
except ValueError:
pass
self.result[key] = value
def parse(self, key, value):
ref = self.result
tokens = self.tokens(key)
for token in tokens:
token_type, key = token
if token_type == QueryStringToken.ARRAY:
if key not in ref:
ref[key] = []
ref = ref[key]
elif token_type == QueryStringToken.OBJECT:
if key not in ref:
ref[key] = {}
ref = ref[key]
elif token_type == QueryStringToken.KEY:
try:
ref = ref[key]
next(tokens)
# TypeError is for pet[]=lucy&pet[]=ollie
# if the array key is empty a type error will be raised
except (IndexError, KeyError, TypeError):
# the index didn't exist
# so we look ahead to see what we are setting
# there is not a next token
# set the value
try:
next_token = next(tokens)
if next_token[0] == QueryStringToken.ARRAY:
ref.append([])
ref = ref[key]
elif next_token[0] == QueryStringToken.OBJECT:
try:
ref[key] = {}
except IndexError:
ref.append({})
ref = ref[key]
except StopIteration:
try:
ref.append(value)
except AttributeError:
ref[key] = value
return
def tokens(self, key):
buf = ""
for char in key:
if char == "[":
yield QueryStringToken.ARRAY, buf
buf = ""
elif char == ".":
yield QueryStringToken.OBJECT, buf
buf = ""
elif char == "]":
try:
yield QueryStringToken.KEY, int(buf)
buf = ""
except ValueError:
yield QueryStringToken.KEY, None
else:
buf = buf + char
if len(buf) > 0:
yield QueryStringToken.KEY, buf
else:
raise StopIteration()
| Python | 0.000005 |
042edce052d5307fff8dfbce8c08b72fb72af7f1 | Remove some noise | ckanext/groupadmin/authz.py | ckanext/groupadmin/authz.py | '''This module monkey patches functions in ckan/authz.py and replaces the
default roles with custom roles and decorates
has_user_permission_for_group_org_org to allow a GroupAdmin to admin groups.
GroupAdmins can manage all organizations/groups, but have no other sysadmin
powers.
'''
from ckan import authz, model
from ckan.plugins import toolkit
from ckanext.groupadmin.model import GroupAdmin
authz.ROLE_PERMISSIONS.update({'group_admin': ['read', 'manage_group']})
def _trans_role_group_admin():
return toolkit._('Group Admin')
authz._trans_role_group_admin = _trans_role_group_admin
def is_group_admin_decorator(method):
def decorate_has_user_permission_for_group_or_org(group_id, user_name,
permission):
user_id = authz.get_user_id_for_username(user_name, allow_none=True)
if not user_id:
return False
if GroupAdmin.is_user_group_admin(model.Session, user_id):
return True
return method(group_id, user_name, permission)
return decorate_has_user_permission_for_group_or_org
authz.has_user_permission_for_group_or_org = is_group_admin_decorator(
authz.has_user_permission_for_group_or_org)
| '''This module monkey patches functions in ckan/authz.py and replaces the
default roles with custom roles and decorates
has_user_permission_for_group_org_org to allow a GroupAdmin to admin groups.
GroupAdmins can manage all organizations/groups, but have no other sysadmin
powers.
'''
from ckan import authz, model
from ckan.common import OrderedDict
from ckan.plugins import toolkit
from ckanext.groupadmin.model import GroupAdmin
import logging
log = logging.getLogger(__name__)
old_auth_roles = authz.ROLE_PERMISSIONS
authz.ROLE_PERMISSIONS.update({'group_admin': ['read', 'manage_group']})
log.info(authz.ROLE_PERMISSIONS)
def _trans_role_group_admin():
return toolkit._('Group Admin')
authz._trans_role_group_admin = _trans_role_group_admin
def is_group_admin_decorator(method):
def decorate_has_user_permission_for_group_or_org(group_id, user_name,
permission):
user_id = authz.get_user_id_for_username(user_name, allow_none=True)
if not user_id:
return False
if GroupAdmin.is_user_group_admin(model.Session, user_id):
return True
return method(group_id, user_name, permission)
return decorate_has_user_permission_for_group_or_org
authz.has_user_permission_for_group_or_org = is_group_admin_decorator(
authz.has_user_permission_for_group_or_org)
| Python | 0.000199 |
afb400e16c1335531f259218a8b9937de48644e9 | Update stream health health api url | polyaxon/checks/streams.py | polyaxon/checks/streams.py | from checks.base import Check
from checks.results import Result
from libs.api import get_settings_ws_api_url
from libs.http import safe_request
class StreamsCheck(Check):
@classmethod
def run(cls):
response = safe_request('{}/_health'.format(get_settings_ws_api_url()), 'GET')
status_code = response.status_code
if status_code == 200:
result = Result()
else:
result = Result(message='Service is not healthy, response {}'.format(status_code),
severity=Result.ERROR)
return {'STREAMS': result}
| from checks.base import Check
from checks.results import Result
from libs.api import get_settings_ws_api_url
from libs.http import safe_request
class StreamsCheck(Check):
@classmethod
def run(cls):
response = safe_request(get_settings_ws_api_url(), 'GET')
status_code = response.status_code
if status_code == 200:
result = Result()
else:
result = Result(message='Service is not healthy, response {}'.format(status_code),
severity=Result.ERROR)
return {'STREAMS': result}
| Python | 0.000001 |
35f9f3b3a1ca9174194975e5281682c2712b653f | add get_absolute_url to article categories too | project/articles/models.py | project/articles/models.py | from django.db import models
from django.utils.translation import ugettext as _
from django.utils.timezone import now
from markitup.fields import MarkupField
from autoslug import AutoSlugField
from sorl.thumbnail import ImageField
class Category(models.Model):
class Meta:
verbose_name = _('Category')
verbose_name_plural = _('Categories')
name = models.CharField(max_length=25, verbose_name=_('name'))
slug = AutoSlugField(populate_from='name', unique=True)
def __unicode__(self):
return self.name
@models.permalink
def get_absolute_url(self):
return 'articles:category', (), {'slug': self.slug}
class ArticleManager(models.Manager):
def published(self):
q = self.get_query_set()
return q.filter(pub_date__lte=now())
class Article(models.Model):
objects = ArticleManager()
class Meta:
ordering = ('-pub_date',)
verbose_name = _('Article')
verbose_name_plural = _('Articles')
title = models.CharField(max_length=100, verbose_name=_('title'))
description = MarkupField(blank=True, verbose_name=_('description'),
help_text=_('populated from body if not given'))
body = MarkupField(verbose_name=_('body'))
image = ImageField(blank=True, upload_to='images',
verbose_name=_('image'))
pub_date = models.DateTimeField(default=now,
verbose_name=_('publication date'))
categories = models.ManyToManyField(Category, blank=True, null=True,
verbose_name=_('categories'))
slug = AutoSlugField(populate_from='title', unique=True)
def __unicode__(self):
return self.title
@models.permalink
def get_absolute_url(self):
return 'articles:detail', (), {'slug': self.slug}
| from django.db import models
from django.utils.translation import ugettext as _
from django.utils.timezone import now
from markitup.fields import MarkupField
from autoslug import AutoSlugField
from sorl.thumbnail import ImageField
class Category(models.Model):
class Meta:
verbose_name = _('Category')
verbose_name_plural = _('Categories')
name = models.CharField(max_length=25, verbose_name=_('name'))
slug = AutoSlugField(populate_from='name', unique=True)
def __unicode__(self):
return self.name
class ArticleManager(models.Manager):
def published(self):
q = self.get_query_set()
return q.filter(pub_date__lte=now())
class Article(models.Model):
objects = ArticleManager()
class Meta:
ordering = ('-pub_date',)
verbose_name = _('Article')
verbose_name_plural = _('Articles')
title = models.CharField(max_length=100, verbose_name=_('title'))
description = MarkupField(blank=True, verbose_name=_('description'),
help_text=_('populated from body if not given'))
body = MarkupField(verbose_name=_('body'))
image = ImageField(blank=True, upload_to='images',
verbose_name=_('image'))
pub_date = models.DateTimeField(default=now,
verbose_name=_('publication date'))
categories = models.ManyToManyField(Category, blank=True, null=True,
verbose_name=_('categories'))
slug = AutoSlugField(populate_from='title', unique=True)
def __unicode__(self):
return self.title
@models.permalink
def get_absolute_url(self):
return 'articles:detail', (), {'slug': self.slug}
| Python | 0 |
0623212baaccb938e19891a50cca58b33b339f9c | Improve version handling | oddt/__init__.py | oddt/__init__.py | """Open Drug Discovery Toolkit
==============================
Universal and easy to use resource for various drug discovery tasks,
ie docking, virutal screening, rescoring.
Attributes
----------
toolkit : module,
Toolkits backend module, currenlty OpenBabel [ob] and RDKit [rdk].
This setting is toolkit-wide, and sets given toolkit as default
"""
from __future__ import absolute_import
import os
import subprocess
import warnings
import six
try:
from oddt.toolkits import ob
except ImportError as e:
ob = None
try:
from oddt.toolkits import rdk
except ImportError as e:
rdk = None
toolkit = None
if 'ODDT_TOOLKIT' in os.environ:
if os.environ['ODDT_TOOLKIT'] in ['ob', 'openbabel']:
if ob is None:
warnings.warn('OpenBabel toolkit is forced by ODDT_TOOLKIT, '
'but can\'t be imported')
toolkit = ob
elif os.environ['ODDT_TOOLKIT'] in ['rdk', 'rdkit']:
if rdk is None:
warnings.warn('RDKit toolkit is forced by ODDT_TOOLKIT, '
'but can\'t be imported')
toolkit = rdk
else:
raise EnvironmentError('ODDT_TOOLKIT is set to invalid value: "%s". '
'Use one of ["ob", "openbabel"] for OpenBabel '
'or ["rdk", "rdkit"] for RDKit' % os.environ['ODDT_TOOLKIT'])
elif ob:
toolkit = ob
elif rdk:
toolkit = rdk
else:
warnings.warn('No toolkit is present. Install OpenBabel or RDKit')
def get_version():
home = os.path.dirname(__file__)
git_v = None
v = '0.4.1'
if os.path.isdir(home + '/../.git'):
try:
git_v = subprocess.check_output(['git',
'describe',
'--tags'], cwd=home).strip()
if git_v and six.PY3:
git_v = git_v.decode('latin-1')
except subprocess.CalledProcessError: # catch errors, eg. no git installed
pass
if git_v:
v = git_v
return v
__version__ = get_version()
__all__ = ['toolkit']
def random_seed(i):
"""
Set global random seed for all underlying components.
Use 'brute-force' approach, by setting undelying libraries' seeds.
Parameters
----------
i: int
integer used as seed for random number generators
"""
from numpy.random import seed as np_seed
from random import seed as python_seed
# python's random module
python_seed(i)
# numpy random module
np_seed(i)
| """Open Drug Discovery Toolkit
==============================
Universal and easy to use resource for various drug discovery tasks,
ie docking, virutal screening, rescoring.
Attributes
----------
toolkit : module,
Toolkits backend module, currenlty OpenBabel [ob] and RDKit [rdk].
This setting is toolkit-wide, and sets given toolkit as default
"""
from __future__ import absolute_import
import os
import subprocess
import warnings
try:
from oddt.toolkits import ob
except ImportError as e:
ob = None
try:
from oddt.toolkits import rdk
except ImportError as e:
rdk = None
toolkit = None
if 'ODDT_TOOLKIT' in os.environ:
if os.environ['ODDT_TOOLKIT'] in ['ob', 'openbabel']:
if ob is None:
warnings.warn('OpenBabel toolkit is forced by ODDT_TOOLKIT, '
'but can\'t be imported')
toolkit = ob
elif os.environ['ODDT_TOOLKIT'] in ['rdk', 'rdkit']:
if rdk is None:
warnings.warn('RDKit toolkit is forced by ODDT_TOOLKIT, '
'but can\'t be imported')
toolkit = rdk
else:
raise EnvironmentError('ODDT_TOOLKIT is set to invalid value: "%s". '
'Use one of ["ob", "openbabel"] for OpenBabel '
'or ["rdk", "rdkit"] for RDKit' % os.environ['ODDT_TOOLKIT'])
elif ob:
toolkit = ob
elif rdk:
toolkit = rdk
else:
warnings.warn('No toolkit is present. Install OpenBabel or RDKit')
def get_version():
home = os.path.dirname(__file__)
git_v = None
v = '0.4.1'
if os.path.isdir(home + '/../.git'):
try:
git_v = str(subprocess.check_output(['git',
'describe',
'--tags'], cwd=home).strip())
except subprocess.CalledProcessError: # catch errors, eg. no git installed
pass
if git_v:
v = git_v
return v
__version__ = get_version()
__all__ = ['toolkit']
def random_seed(i):
"""
Set global random seed for all underlying components.
Use 'brute-force' approach, by setting undelying libraries' seeds.
Parameters
----------
i: int
integer used as seed for random number generators
"""
from numpy.random import seed as np_seed
from random import seed as python_seed
# python's random module
python_seed(i)
# numpy random module
np_seed(i)
| Python | 0.000002 |
f4d41f9a75f464dcf2dca2953536ed28c6221b33 | numPages should be an int | server/central_psparser.py | server/central_psparser.py | import logging
import json
import re
class PSParser():
def __init__(self):
self.logger = logging.getLogger("PSParser")
self.logger.info("Loaded PostScript Parser")
def __getPSFromJID(self, jid):
jobFile = open(jid, 'r')
job = json.load(jobFile)
jobFile.close()
return job["postscript"]
def isDuplex(self, jid):
ps = self.__getPSFromJID(jid)
if("/Duplex true" in ps):
self.logger.debug("job %s is duplex enabled", jid)
return True
else:
self.logger.debug("job %s is not duplex enabled", jid)
return False
def pageCount(self, jid):
ps = self.__getPSFromJID(jid)
numPages = None
self.logger.debug("Computing page count for %s", jid)
rgxresult = re.search('%%Pages: [0-9]*', ps)
logging.debug("rgxresult: {0}".format(rgxresult.group(0)))
if(rgxresult != None) :
numPages = int(re.search('%%Pages: [0-9]*', ps).group(0).split(" ")[1])
self.logger.debug("File is adobe compliant, suspect to be {0} pages".format(numPages))
else:
self.logger.error("File is not adobe compliant, page count indeterminate.")
return numPages
| import logging
import json
import re
class PSParser():
def __init__(self):
self.logger = logging.getLogger("PSParser")
self.logger.info("Loaded PostScript Parser")
def __getPSFromJID(self, jid):
jobFile = open(jid, 'r')
job = json.load(jobFile)
jobFile.close()
return job["postscript"]
def isDuplex(self, jid):
ps = self.__getPSFromJID(jid)
if("/Duplex true" in ps):
self.logger.debug("job %s is duplex enabled", jid)
return True
else:
self.logger.debug("job %s is not duplex enabled", jid)
return False
def pageCount(self, jid):
ps = self.__getPSFromJID(jid)
numPages = None
self.logger.debug("Computing page count for %s", jid)
rgxresult = re.search('%%Pages: [0-9]*', ps)
logging.debug("rgxresult: {0}".format(rgxresult.group(0)))
if(rgxresult != None) :
numPages = re.search('%%Pages: [0-9]*', ps).group(0).split(" ")[1]
self.logger.debug("File is adobe compliant, suspect to be {0} pages".format(numPages))
else:
self.logger.error("File is not adobe compliant, page count indeterminate.")
return numPages
| Python | 0.999947 |
edb04d8e0ae03c9244b7d934fd713efbb94d5a58 | Add api url to album and link | opps/api/urls.py | opps/api/urls.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, url, include
from tastypie.api import Api
from opps.containers.api import Container
from opps.articles.api import Post, Album, Link
from .conf import settings
_api = Api(api_name=settings.OPPS_API_NAME)
_api.register(Container())
_api.register(Post())
_api.register(Album())
_api.register(Link())
urlpatterns = patterns(
'',
url(r'^', include(_api.urls)),
)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, url, include
from tastypie.api import Api
from opps.containers.api import Container
from opps.articles.api import Post
from .conf import settings
_api = Api(api_name=settings.OPPS_API_NAME)
_api.register(Container())
_api.register(Post())
urlpatterns = patterns(
'',
url(r'^', include(_api.urls)),
)
| Python | 0 |
3df0f14a9e2625081a1b1f51aef997d18b4a9b50 | Remove Logging | pabiana/brain.py | pabiana/brain.py | import importlib
import logging
import multiprocessing as mp
import os
import signal
from os import path
import pip
from . import _default_clock, load_interfaces, repo
def main(*args):
args = list(args)
stop_pip = False
if '-X' in args:
stop_pip = True
args.remove('-X')
if '-C' in args:
args.append('clock:clock')
args.remove('-C')
if len(args) > 1:
logging.info('Starting %s processes', len(args))
signal.signal(signal.SIGINT, lambda *args, **kwargs: None)
mp.set_start_method('spawn')
for module_area_name in args:
process = mp.Process(target=run, args=(module_area_name, stop_pip))
process.start()
else:
run(*args, stop_pip=stop_pip)
def run(module_area_name, stop_pip=False):
module_name, area_name = module_area_name.split(':')
repo['base-path'] = os.getcwd()
repo['module-name'] = module_name
repo['area-name'] = area_name
intf_path = path.join(repo['base-path'], 'interfaces.json')
if path.isfile(intf_path):
load_interfaces(intf_path)
req_path = path.join(repo['base-path'], module_name, 'requirements.txt')
if not stop_pip and path.isfile(req_path):
pip.main(['install', '--upgrade', '-r', req_path])
try:
mod = importlib.import_module(module_name)
except ImportError:
if module_name == 'clock' and area_name == 'clock':
mod = _default_clock
else:
raise
if hasattr(mod, 'setup'):
mod.setup()
if hasattr(mod, 'area'):
if hasattr(mod, 'config'):
params = {'clock_name': mod.config['clock-name']}
if 'clock-slot' in mod.config:
if mod.config['clock-slot'] is not None:
params['clock_slot'] = mod.config['clock-slot']
if 'subscriptions' in mod.config:
if mod.config['subscriptions'] is not None:
params['subscriptions'] = mod.config['subscriptions']
mod.area.setup(**params)
if 'context-values' in mod.config:
mod.area.context.update(mod.config['context-values'])
mod.area.run()
elif hasattr(mod, 'clock'):
if hasattr(mod, 'config'):
params = {}
if 'timeout' in mod.config:
if mod.config['timeout'] is not None:
params['timeout'] = mod.config['timeout']
if 'use-template' in mod.config:
if mod.config['use-template'] is not None:
params['use_template'] = mod.config['use-template']
mod.clock.setup(**params)
mod.clock.run()
elif hasattr(mod, 'runner'):
if hasattr(mod.runner, 'setup'):
params = {}
if hasattr(mod, 'config'):
params.update(mod.config)
mod.runner.setup(**params)
mod.runner.run()
| import importlib
import logging
import multiprocessing as mp
import os
import signal
from os import path
import pip
from . import _default_clock, load_interfaces, repo
def main(*args):
args = list(args)
stop_pip = False
if '-X' in args:
stop_pip = True
args.remove('-X')
if '-C' in args:
args.append('clock:clock')
args.remove('-C')
if len(args) > 1:
logging.info('Starting %s processes', len(args))
signal.signal(signal.SIGINT, lambda *args, **kwargs: None)
mp.set_start_method('spawn')
for module_area_name in args:
process = mp.Process(target=run, args=(module_area_name, stop_pip))
process.start()
else:
run(*args, stop_pip=stop_pip)
def run(module_area_name, stop_pip=False):
module_name, area_name = module_area_name.split(':')
repo['base-path'] = os.getcwd()
repo['module-name'] = module_name
repo['area-name'] = area_name
intf_path = path.join(repo['base-path'], 'interfaces.json')
if path.isfile(intf_path):
load_interfaces(intf_path)
req_path = path.join(repo['base-path'], module_name, 'requirements.txt')
if not stop_pip and path.isfile(req_path):
pip.main(['install', '--upgrade', '-r', req_path])
try:
mod = importlib.import_module(module_name)
except ImportError:
logging.info('Import Error %s:%s', module_name, area_name)
if module_name is 'clock' and area_name is 'clock':
mod = _default_clock
logging.info('Module %s', mod)
else:
raise
if hasattr(mod, 'setup'):
mod.setup()
if hasattr(mod, 'area'):
if hasattr(mod, 'config'):
params = {'clock_name': mod.config['clock-name']}
if 'clock-slot' in mod.config:
if mod.config['clock-slot'] is not None:
params['clock_slot'] = mod.config['clock-slot']
if 'subscriptions' in mod.config:
if mod.config['subscriptions'] is not None:
params['subscriptions'] = mod.config['subscriptions']
mod.area.setup(**params)
if 'context-values' in mod.config:
mod.area.context.update(mod.config['context-values'])
mod.area.run()
elif hasattr(mod, 'clock'):
if hasattr(mod, 'config'):
params = {}
if 'timeout' in mod.config:
if mod.config['timeout'] is not None:
params['timeout'] = mod.config['timeout']
if 'use-template' in mod.config:
if mod.config['use-template'] is not None:
params['use_template'] = mod.config['use-template']
mod.clock.setup(**params)
mod.clock.run()
elif hasattr(mod, 'runner'):
if hasattr(mod.runner, 'setup'):
params = {}
if hasattr(mod, 'config'):
params.update(mod.config)
mod.runner.setup(**params)
mod.runner.run()
| Python | 0.000002 |
e82d477194393ff3142f6c25c5db4c7b7f2a98a5 | Call ConsoleViewer init | simpleai/search/viewers.py | simpleai/search/viewers.py | # coding: utf-8
from os import path
from threading import Thread
from time import sleep
class DummyViewer(object):
def start(self):
pass
def new_iteration(self, fringe):
pass
def chosen_node(self, node, is_goal):
pass
def expanded(self, node, successors):
pass
class ConsoleViewer(DummyViewer):
def __init__(self, interactive=True):
self.interactive = interactive
def pause(self):
if self.interactive:
raw_input('> press Enter ')
def output(self, *args):
print ' '.join(map(str, args))
def new_iteration(self, fringe):
self.output(' **** New iteration ****')
self.output(len(fringe), 'elements in fringe:', fringe)
self.pause()
def chosen_node(self, node, is_goal):
self.output('Chosen node:', node)
if is_goal:
self.output('Is goal!')
else:
self.output('Not goal')
self.pause()
def expanded(self, node, successors):
self.output('Expand:', node)
self.output(len(successors), 'successors:', successors)
self.pause()
class WebViewer(ConsoleViewer):
def __init__(self, host='127.0.0.1', port=8000):
super(WebViewer, self).__init__(interactive=True)
self.host = host
self.port = port
self.paused = True
self.events = []
web_template_path = path.join(path.dirname(__file__), 'web_viewer.html')
self.web_template = open(web_template_path).read()
def start(self):
from bottle import route, run
route('/')(self.web_status)
route('/next')(self.web_next)
t = Thread(target=run, kwargs=dict(host=self.host, port=self.port))
t.daemon = True
t.start()
self.pause()
def web_status(self):
from bottle import template
return template(self.web_template, events=self.events)
def web_next(self):
from bottle import redirect
self.paused = False
while not self.paused:
sleep(0.1)
redirect('/')
def pause(self):
self.paused = True
while self.paused:
sleep(0.1)
def output(self, *args):
self.events.append(' '.join(map(str, args)))
| # coding: utf-8
from os import path
from threading import Thread
from time import sleep
class DummyViewer(object):
def start(self):
pass
def new_iteration(self, fringe):
pass
def chosen_node(self, node, is_goal):
pass
def expanded(self, node, successors):
pass
class ConsoleViewer(DummyViewer):
def __init__(self, interactive=True):
self.interactive = interactive
def pause(self):
if self.interactive:
raw_input('> press Enter ')
def output(self, *args):
print ' '.join(map(str, args))
def new_iteration(self, fringe):
self.output(' **** New iteration ****')
self.output(len(fringe), 'elements in fringe:', fringe)
self.pause()
def chosen_node(self, node, is_goal):
self.output('Chosen node:', node)
if is_goal:
self.output('Is goal!')
else:
self.output('Not goal')
self.pause()
def expanded(self, node, successors):
self.output('Expand:', node)
self.output(len(successors), 'successors:', successors)
self.pause()
class WebViewer(ConsoleViewer):
def __init__(self, host='127.0.0.1', port=8000):
self.host = host
self.port = port
self.paused = True
self.events = []
web_template_path = path.join(path.dirname(__file__), 'web_viewer.html')
self.web_template = open(web_template_path).read()
def start(self):
from bottle import route, run
route('/')(self.web_status)
route('/next')(self.web_next)
t = Thread(target=run, kwargs=dict(host=self.host, port=self.port))
t.daemon = True
t.start()
self.pause()
def web_status(self):
from bottle import template
return template(self.web_template, events=self.events)
def web_next(self):
from bottle import redirect
self.paused = False
while not self.paused:
sleep(0.1)
redirect('/')
def pause(self):
self.paused = True
while self.paused:
sleep(0.1)
def output(self, *args):
self.events.append(' '.join(map(str, args)))
| Python | 0 |
e165d16660cb10a7008be2b7566a6db471dafde0 | Fixing a typo that handles generating new mailbox names. | django_mailbox/management/commands/processincomingmessage.py | django_mailbox/management/commands/processincomingmessage.py | import email
import logging
import rfc822
import sys
from django.core.management.base import BaseCommand
from django_mailbox.models import Mailbox
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class Command(BaseCommand):
def handle(self, *args, **options):
message = email.message_from_string(sys.stdin.read())
if message:
mailbox = self.get_mailbox_for_message(message)
mailbox.process_incoming_message(message)
logger.info("Message received from %s" % message['from'])
else:
logger.warning("Message not processable.")
def get_mailbox_for_message(self, message):
email_address = rfc822.parseaddr(message['from'])[1][0:255]
mailbox, created = Mailbox.objects.get_or_create(
name=email_address,
)
return mailbox
| import email
import logging
import rfc822
import sys
from django.core.management.base import BaseCommand
from django_mailbox.models import Mailbox
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class Command(BaseCommand):
def handle(self, *args, **options):
message = email.message_from_string(sys.stdin.read())
if message:
mailbox = self.get_mailbox_for_message(message)
mailbox.process_incoming_message(message)
logger.info("Message received from %s" % message['from'])
else:
logger.warning("Message not processable.")
def get_mailbox_for_message(self, message):
email_address = rfc822.parseaddr(message['from'][1][0:255])
mailbox, created = Mailbox.objects.get_or_create(
name=email_address,
)
return mailbox
| Python | 0.999963 |
cb117d449c3e5d611951a5d1a9efbafd34525238 | fix finding files | chandra_suli/make_lightcurve.py | chandra_suli/make_lightcurve.py | #!/usr/bin/env python
"""
Generate lightcurves for each candidate given a list of candidates
"""
import argparse
import os
import sys
import numpy as np
import astropy.io.fits as pyfits
import matplotlib.pyplot as plt
import seaborn as sbs
from chandra_suli import find_files
from chandra_suli import logging_system
from chandra_suli.run_command import CommandRunner
from chandra_suli.sanitize_filename import sanitize_filename
if __name__=="__main__":
parser = argparse.ArgumentParser(description='Generate light curves for transients listed in a'
'master list')
parser.add_argument("--masterfile", help="Path to file containing list of transients",
required=True, type=str)
parser.add_argument("--data_path", help="Path to directory containing data of all obsids", required = True,
type=str)
# Get the logger
logger = logging_system.get_logger(os.path.basename(sys.argv[0]))
# Get the command runner
runner = CommandRunner(logger)
args = parser.parse_args()
# Get data from masterfile
data_path = sanitize_filename(args.data_path)
masterfile = sanitize_filename(args.masterfile)
transient_data = np.array(np.recfromtxt(masterfile, names=True), ndmin=1)
for transient in transient_data:
obsid = transient['Obsid']
ccd = transient['CCD']
candidate = transient['Candidate']
tstart = transient['Tstart']
tstop = transient['Tstop']
# use region file from xtdac and cut region
regions = find_files.find_files(os.path.join(data_path, str(obsid)), "ccd_%s_%s_filtered_candidate_%s.reg" %(ccd, obsid, candidate))
event_file = find_files.find_files(os.path.join(data_path, str(obsid)), "ccd_%s_%s_filtered.fits" %(ccd, obsid))[0]
if len(regions) != 1:
raise IOError("More than one region file found")
else:
region = regions[0]
evt_reg = "ccd_%s_%s_filtered_candidate_%s_reg.fits" %(ccd, obsid, candidate)
cmd_line = "ftcopy \'%s[EVENTS][regfilter(\"%s\")]\' %s clobber=yes " %(event_file, region, evt_reg)
runner.run(cmd_line)
data = pyfits.getdata(evt_reg)
sbs.set(font_scale=2)
sbs.set_style('white')
fig = plt.figure(figsize=(15, 15 / 1.33333))
duration = tstop - tstart
bins = np.arange(-10 * duration, 10 * duration, duration)
time = data.field("TIME")
rate, obins, _ = plt.hist(time - tstart, bins, weights=np.ones(time.shape[0]) * 1.0 / duration,
color='white')
# Centers of the bins
tc = (bins[:-1] + bins[1:]) / 2.0
plt.errorbar(tc, rate, yerr=np.sqrt(rate * duration) / duration, fmt='.')
plt.axvline(0, linestyle=':')
plt.axvline(duration, linestyle=':')
plt.xlabel("Time since trigger (s)")
plt.ylabel("Count rate (cts/s)")
plt.title("Transient Lightcurve\nObsID = %s, CCD ID = %s, Candidate=%s\n" %(obsid, ccd, candidate))
plot_file = "ccd_%s_%s_candidate_%s_lightcurve.png" %(ccd, obsid, candidate)
plt.savefig(plot_file)
os.rename(plot_file, os.path.join(data_path, str(obsid), plot_file))
os.rename(evt_reg,os.path.join(data_path, str(obsid), evt_reg))
plt.close()
| #!/usr/bin/env python
"""
Generate lightcurves for each candidate given a list of candidates
"""
import argparse
import os
import sys
import numpy as np
import astropy.io.fits as pyfits
import matplotlib.pyplot as plt
import seaborn as sbs
from chandra_suli import find_files
from chandra_suli import logging_system
from chandra_suli.run_command import CommandRunner
from chandra_suli.sanitize_filename import sanitize_filename
if __name__=="__main__":
parser = argparse.ArgumentParser(description='Generate light curves for transients listed in a'
'master list')
parser.add_argument("--masterfile", help="Path to file containing list of transients",
required=True, type=str)
parser.add_argument("--data_path", help="Path to directory containing data of all obsids", required = True,
type=str)
# Get the logger
logger = logging_system.get_logger(os.path.basename(sys.argv[0]))
# Get the command runner
runner = CommandRunner(logger)
args = parser.parse_args()
# Get data from masterfile
data_path = sanitize_filename(args.data_path)
masterfile = sanitize_filename(args.masterfile)
transient_data = np.array(np.recfromtxt(masterfile, names=True), ndmin=1)
for transient in transient_data:
obsid = transient['Obsid']
ccd = transient['CCD']
candidate = transient['Candidate']
tstart = transient['Tstart']
tstop = transient['Tstop']
# use region file from xtdac and cut region
regions = find_files.find_files(str(obsid), "ccd_%s_%s_filtered_candidate_%s.reg" %(ccd, obsid, candidate))
event_file = find_files.find_files(str(obsid), "ccd_%s_%s_filtered.fits" %(ccd, obsid))[0]
if len(regions) != 1:
raise IOError("More than one region file found")
else:
region = regions[0]
evt_reg = "ccd_%s_%s_filtered_candidate_%s_reg.fits" %(ccd, obsid, candidate)
cmd_line = "ftcopy \'%s[EVENTS][regfilter(\"%s\")]\' %s clobber=yes " %(event_file, region, evt_reg)
runner.run(cmd_line)
data = pyfits.getdata(evt_reg)
sbs.set(font_scale=2)
sbs.set_style('white')
fig = plt.figure(figsize=(15, 15 / 1.33333))
duration = tstop - tstart
bins = np.arange(-10 * duration, 10 * duration, duration)
time = data.field("TIME")
rate, obins, _ = plt.hist(time - tstart, bins, weights=np.ones(time.shape[0]) * 1.0 / duration,
color='white')
# Centers of the bins
tc = (bins[:-1] + bins[1:]) / 2.0
plt.errorbar(tc, rate, yerr=np.sqrt(rate * duration) / duration, fmt='.')
plt.axvline(0, linestyle=':')
plt.axvline(duration, linestyle=':')
plt.xlabel("Time since trigger (s)")
plt.ylabel("Count rate (cts/s)")
plt.title("Transient Lightcurve\nObsID = %s, CCD ID = %s, Candidate=%s\n" %(obsid, ccd, candidate))
plot_file = "ccd_%s_%s_candidate_%s_lightcurve.png" %(ccd, obsid, candidate)
plt.savefig(plot_file)
os.rename(plot_file, os.path.join(data_path, str(obsid), plot_file))
os.rename(evt_reg,os.path.join(data_path, str(obsid), evt_reg))
plt.close()
| Python | 0.000366 |
cfa8b88e3d86e560415260eb596dd3bbdab52736 | Fix test of auto_backup_download | auto_backup_download/tests/test_auto_backup_download.py | auto_backup_download/tests/test_auto_backup_download.py | # -*- coding: utf-8 -*-
# Copyright 2017 Onestein (<http://www.onestein.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo.tests import common
from odoo.exceptions import Warning
class TestAutoBackupDownload(common.TransactionCase):
def test_01_create_not_existing(self):
backup_dir = self.env.ref(
'auto_backup_download.default_backup_directory')
# test method get_dir()
with self.assertRaises(Warning):
backup_dir.get_dir()
# test reload list of directory
with self.assertRaises(Warning):
backup_dir.reload()
def test_02_create_existing(self):
backup_dir = self.env.ref(
'auto_backup_download.default_backup_directory')
self.env['db.backup'].create({
'name': 'Test Backup 1',
'folder': '/tmp'
})
# test method get_dir()
full_dir = backup_dir.get_dir()
self.assertEqual(full_dir[-1], '/')
# test computed field file_ids
self.assertGreaterEqual(len(backup_dir.file_ids), 0)
# test count list of directory
self.assertEqual(len(full_dir.file_ids), full_dir.file_count)
# test reload list of directory
full_dir.reload()
self.assertEqual(len(full_dir.file_ids), full_dir.file_count)
| # -*- coding: utf-8 -*-
# Copyright 2017 Onestein (<http://www.onestein.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo.tests import common
from odoo.exceptions import Warning
class TestAutoBackupDownload(common.TransactionCase):
def test_01_create_not_existing(self):
backup_dir = self.env.ref(
'auto_backup_download.default_backup_directory')
# test method get_dir()
with self.assertRaises(Warning):
backup_dir.get_dir()
# test computed field file_ids
self.assertEqual(len(backup_dir.file_ids), 0)
# test count list of directory
self.assertEqual(len(backup_dir.file_ids), backup_dir.file_count)
# test reload list of directory
with self.assertRaises(Warning):
backup_dir.reload()
self.assertEqual(len(backup_dir.file_ids), 0)
self.assertEqual(len(backup_dir.file_ids), backup_dir.file_count)
def test_02_create_existing(self):
backup_dir = self.env.ref(
'auto_backup_download.default_backup_directory')
self.env['db.backup'].create({
'name': 'Test Backup 1',
'folder': '/tmp'
})
# test method get_dir()
full_dir = backup_dir.get_dir()
self.assertEqual(full_dir[-1], '/')
# test computed field file_ids
self.assertGreaterEqual(len(backup_dir.file_ids), 0)
# test count list of directory
self.assertEqual(len(full_dir.file_ids), full_dir.file_count)
# test reload list of directory
full_dir.reload()
self.assertEqual(len(full_dir.file_ids), full_dir.file_count)
| Python | 0.000003 |
79ec3f3ebabd1625830952ecdec1b0761c2b5324 | Rewrite serialization of Attempt submit response | web/attempts/rest.py | web/attempts/rest.py | import json
from django.db import transaction
from rest_framework import validators, decorators, status
from rest_framework.authentication import TokenAuthentication
from rest_framework.response import Response
from rest_framework.serializers import ModelSerializer, Field
from rest_framework.viewsets import ModelViewSet
from .models import Attempt
class WritableJSONField(Field):
def to_internal_value(self, data):
return data
class JSONStringField(Field):
"""
Store a JSON object in a TextField.
When object is received store its json dump.
When object is retrieved load JSON object from string representation.
"""
def to_internal_value(self, data):
return json.dumps(data)
def to_representation(self, value):
return json.loads(value)
class AttemptSerializer(ModelSerializer):
"""
Serialize an Attempt object.
"""
secret = WritableJSONField(write_only=True, required=False)
feedback = JSONStringField()
class Meta:
model = Attempt
@staticmethod
def check_secret(validated_data):
# Check and remove secret from the validated_data dictionary
user_secret = validated_data.pop('secret', '[]')
secret_matches = validated_data['part'].check_secret(user_secret)[0]
if not secret_matches:
validated_data['valid'] = False
def create(self, validated_data):
self.check_secret(validated_data)
return super(AttemptSerializer, self).create(validated_data)
def update(self, instance, validated_data):
self.check_secret(validated_data)
return super(AttemptSerializer, self).update(instance, validated_data)
class AttemptViewSet(ModelViewSet):
"""
A viewset for viewing and editing Attempt instances.
"""
serializer_class = AttemptSerializer
queryset = Attempt.objects.all()
@decorators.list_route(methods=['post'], authentication_classes=[TokenAuthentication])
@transaction.atomic
def submit(self, request):
serializer = AttemptSerializer(data=request.data, many=True, partial=True)
def _f(validator):
return not isinstance(validator, validators.UniqueTogetherValidator)
serializer.child.validators = filter(_f, serializer.child.validators)
if serializer.is_valid():
attempts = []
for attempt_data in serializer.validated_data:
AttemptSerializer.check_secret(attempt_data)
attempts.append(
Attempt.objects.update_or_create(
user=request.user,
part=attempt_data['part'],
defaults=attempt_data)[0])
data = AttemptSerializer(attempts, many=True).data
return Response(data, status=status.HTTP_200_OK)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| from django.db import transaction
from rest_framework import validators, decorators, status
from rest_framework.authentication import TokenAuthentication
from rest_framework.response import Response
from rest_framework.serializers import ModelSerializer, Field
from rest_framework.viewsets import ModelViewSet
from .models import Attempt
class WritableJSONField(Field):
def to_internal_value(self, data):
return data
class AttemptSerializer(ModelSerializer):
"""
Serialize an Attempt object.
"""
secret = WritableJSONField(write_only=True, required=False)
class Meta:
model = Attempt
@staticmethod
def check_secret(validated_data):
# Check and remove secret from the validated_data dictionary
user_secret = validated_data.pop('secret', '[]')
secret_matches = validated_data['part'].check_secret(user_secret)[0]
if not secret_matches:
validated_data['valid'] = False
def create(self, validated_data):
self.check_secret(validated_data)
return super(AttemptSerializer, self).create(validated_data)
def update(self, instance, validated_data):
self.check_secret(validated_data)
return super(AttemptSerializer, self).update(instance, validated_data)
class AttemptViewSet(ModelViewSet):
"""
A viewset for viewing and editing Attempt instances.
"""
serializer_class = AttemptSerializer
queryset = Attempt.objects.all()
@decorators.list_route(methods=['post'], authentication_classes=[TokenAuthentication])
@transaction.atomic
def submit(self, request):
serializer = AttemptSerializer(data=request.data, many=True, partial=True)
def _f(validator):
return not isinstance(validator, validators.UniqueTogetherValidator)
serializer.child.validators = filter(_f, serializer.child.validators)
if serializer.is_valid():
attempts = []
for attempt_data in serializer.validated_data:
AttemptSerializer.check_secret(attempt_data)
attempt, _ = Attempt.objects.update_or_create(
user=request.user,
part=attempt_data['part'],
defaults=attempt_data)
attempts.append({
'part': attempt.part.pk,
'solution': attempt.solution,
'valid': attempt.valid,
'feedback': attempt.feedback,
})
response = {'attempts': attempts}
return Response(response, status=status.HTTP_200_OK)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| Python | 0 |
c3996af1f7b201355d1cbcd6ef4c8fe420c8b67e | Fix lint | solutions/uri/1028/1028.py | solutions/uri/1028/1028.py | def gcd(a, b):
while b > 0:
a, b = b, a % b
return a
n = int(input())
for line in range(n):
a, b = map(int, input().split())
print(gcd(a, b))
| import sys
def gcd(a, b):
while b > 0:
a, b = b, a % b
return a
n = int(input())
for line in range(n):
a, b = map(int, input().split())
print(gcd(a, b))
| Python | 0.000032 |
5fd1f7cbe9534a47c4dc837773f22f6f177fdcf5 | Update affineHacker: fixed imports and typo | books/CrackingCodesWithPython/Chapter15/affineHacker.py | books/CrackingCodesWithPython/Chapter15/affineHacker.py | # Affine Cipher Hacker
# https://www.nostarch.com/crackingcodes/ (BSD Licensed)
from books.CrackingCodesWithPython.pyperclip import copy
from books.CrackingCodesWithPython.Chapter14.affineCipher import decryptMessage, SYMBOLS, getKeyParts
from books.CrackingCodesWithPython.Chapter13.cryptomath import gcd
from books.CrackingCodesWithPython.Chapter11.detectEnglish import isEnglish
SILENT_MODE = False
def main():
# You might want to copy & paste this text from the source code at
# https://www.nostarch.com/crackingcodes/.
myMessage = """5QG9ol3La6QI93!xQxaia6faQL9QdaQG1!!axQARLa!!A
uaRLQADQALQG93!xQxaGaAfaQ1QX3o1RQARL9Qda!AafARuQLX1LQALQI1
iQX3o1RN"Q-5!1RQP36ARu"""
hackedMessage = hackAffine(myMessage)
if hackedMessage != None:
# The plaintext is displayed on the screen. For the convenience of
# the user, we copy the text of the code to the clipboard:
print('Copying hacked message to clipboard:')
print(hackedMessage)
copy(hackedMessage)
else:
print('Failed to hack encryption.')
def hackAffine(message):
print('Hacking...')
# Python programs can be stopped at any time by pressing Ctrl-C (on
# Windows) or Ctrl-D (on macOS and Linux):
print('(Press Ctrl-C or Ctrl-D to quit at any time.)')
# Brute-force by looping through every possible key:
for key in range(len(SYMBOLS) ** 2):
keyA = getKeyParts(key)[0]
if gcd(keyA, len(SYMBOLS)) != 1:
continue
decryptedText = decryptMessage(key, message)
if not SILENT_MODE:
print('Tried Key %s... (%s)' % (key, decryptedText[:40]))
if isEnglish(decryptedText):
# Check with the user if the decrypted key has been found:
print()
print('Possible encryption hack:')
print('Key: %s' % (key))
print('Decrypted message: ' + decryptedText[:200])
print()
print('Enter D for done, or just press Enter to continue hacking:')
response = input('> ')
if response.strip().upper().startswith('D'):
return decryptedText
return None
# If affineHacker.py is run (instead of imported as a module), call
# the main() function:
if __name__ == '__main__':
main() | # Affine Cipher Hacker
# https://www.nostarch.com/crackingcodes/ (BSD Licensed)
import pyperclip, affineCipher, detectEnglish, cryptomath
SILENT_MODE = False
def main():
# You might want to copy & paste this text from the source code at
# https://www.nostarch.com/crackingcodes/.
myMessage = """5QG9ol3La6QI93!xQxaia6faQL9QdaQG1!!axQARLa!!A
uaRLQADQALQG93!xQxaGaAfaQ1QX3o1RQARL9Qda!AafARuQLX1LQALQI1
iQX3o1RN"Q-5!1RQP36ARu"""
hackedMessage = hackAffine(myMessage)
if hackedMessage != None:
# The plaintext is displayed on the screen. For the convenience of
# the user, we copy the text of the code to the clipboard:
print('Copying hacked message to clipboard:')
print(hackedMessage)
pyperclip.copy(hackedMessage)
else:
print('Failed to hack encryption.')
def hackAffine(message):
print('Hacking...')
# Python programs can be stopped at any time by pressing Ctrl-C (on
# Windows) or Ctrl-D (on macOS and Linux):
print('(Press Ctrl-C or Ctrl-D to quit at any time.)')
# Brute-force by looping through every possible key:
for key in range(len(affineCipher.SYMBOLS) ** 2):
keyA = affineCipher.getKeyParts(key)[0]
if cryptomath.gcd(keyA, len(affineCipher.SYMBOLS)) ! = 1:
continue
decryptedText = affineCipher.decryptMessage(key, message)
if not SILENT_MODE:
print('Tried Key %s... (%s)' % (key, decryptedText[:40]))
if detectEnglish.isEnglish(decryptedText):
# Check with the user if the decrypted key has been found:
print()
print('Possible encryption hack:')
print('Key: %s' % (key))
print('Decrypted message: ' + decryptedText[:200])
print()
print('Enter D for done, or just press Enter to continue hacking:')
response = input('> ')
if response.strip().upper().startswith('D'):
return decryptedText
return None
# If affineHacker.py is run (instead of imported as a module), call
# the main() function:
if __name__ == '__main__':
main() | Python | 0 |
b6727f6bd9f3d8ffe59b17f157180a8fa5e61467 | Fix typo. | py/vttest/fakezk_config.py | py/vttest/fakezk_config.py | # Copyright 2013 Google Inc. All Rights Reserved.
"""Generate a config file for fakezk topology."""
__author__ = 'enisoc@google.com (Anthony Yeh)'
import base64
import codecs
import json
class FakeZkConfig(object):
"""Create fakezk config for use as static topology for vtgate."""
def __init__(self, mysql_port, cell='test_cell', host='127.0.0.1'):
self.keyspaces = {}
self.served_from = {}
self.host = host
self.cell = cell
self.mysql_port = mysql_port
def add_shard(self, keyspace, shard, vt_port):
"""Add a shard to the config."""
# compute the start and end
start = ''
end = ''
if '-' in shard:
parts = shard.split('-', 2)
start = parts[0]
end = parts[1]
if keyspace not in self.keyspaces:
self.keyspaces[keyspace] = []
self.keyspaces[keyspace].append({
'shard': shard,
'vt_port': vt_port,
'start': start,
'end': end,
})
def add_redirect(self, from_keyspace, to_keyspace):
"""Set a keyspace to be ServedFrom another."""
self.served_from[from_keyspace] = to_keyspace
def keyspace_id_as_base64(self, s):
raw = codecs.decode(s, 'hex')
return base64.b64encode(raw)
def as_json(self):
"""Return the config as JSON. This is a proto3 version of SrvKeyspace."""
result = {}
tablet_types_str = ['master', 'replica', 'rdonly']
tablet_types_int = [2, 3, 4]
sharding_colname = 'keyspace_id'
sharding_coltype = 1
for keyspace, shards in self.keyspaces.iteritems():
shard_references = []
for shard in shards:
key_range = {}
if shard['start']:
key_range['start'] = self.keyspace_id_as_base64(shard['start'])
if shard['end']:
key_range['end'] = self.keyspace_id_as_base64(shard['end'])
shard_references.append({
'name': shard['shard'],
'key_range': key_range,
})
for dbtype in tablet_types_str:
path = '/zk/%s/vt/ns/%s/%s/%s' % (self.cell, keyspace,
shard['shard'], dbtype)
result[path] = {
'entries': [
{
'uid': 0,
'host': self.host,
'port_map': {
'mysql': self.mysql_port,
'vt': shard['vt_port'],
},
},
],
}
path = '/zk/%s/vt/ns/%s' % (self.cell, keyspace)
partitions = []
for tablet_type in tablet_types_int:
partitions.append({
'served_type': tablet_type,
'shard_references': shard_references,
})
result[path] = {
'partitions': partitions,
'sharding_column_name': sharding_colname,
'sharding_column_type': sharding_coltype,
}
for from_keyspace, to_keyspace in self.served_from.iteritems():
path = '/zk/%s/vt/ns/%s' % (self.cell, from_keyspace)
served_from = []
for dbtype in tablet_types_int:
served_from.append({
'tablet_type': dbtype,
'keyspace': to_keyspace,
})
result[path] = {
'served_from': served_from,
}
return json.dumps(result)
| # Copyright 2013 Google Inc. All Rights Reserved.
"""Generate a config file for fakezk topology."""
__author__ = 'enisoc@google.com (Anthony Yeh)'
import base64
import codecs
import json
class FakeZkConfig(object):
"""Create fakezk config for use as static topology for vtgate."""
def __init__(self, mysql_port, cell='test_cell', host='127.0.0.1'):
self.keyspaces = {}
self.served_from = {}
self.host = host
self.cell = cell
self.mysql_port = mysql_port
def add_shard(self, keyspace, shard, vt_port):
"""Add a shard to the config."""
# compute the start and end
start = ''
end = ''
if '-' in shard:
parts = shard.split('-', 2)
start = parts[0]
end = parts[1]
if keyspace not in self.keyspaces:
self.keyspaces[keyspace] = []
self.keyspaces[keyspace].append({
'shard': shard,
'vt_port': vt_port,
'start': start,
'end': end,
})
def add_redirect(self, from_keyspace, to_keyspace):
"""Set a keyspace to be ServedFrom another."""
self.served_from[from_keyspace] = to_keyspace
def keyspace_id_as_base64(self, s):
raw = codecs.decode(s, 'hex')
return base64.b64encode(raw)
def as_json(self):
"""Return the config as JSON. This is a proto3 version of SrkKeyspace."""
result = {}
tablet_types_str = ['master', 'replica', 'rdonly']
tablet_types_int = [2, 3, 4]
sharding_colname = 'keyspace_id'
sharding_coltype = 1
for keyspace, shards in self.keyspaces.iteritems():
shard_references = []
for shard in shards:
key_range = {}
if shard['start']:
key_range['start'] = self.keyspace_id_as_base64(shard['start'])
if shard['end']:
key_range['end'] = self.keyspace_id_as_base64(shard['end'])
shard_references.append({
'name': shard['shard'],
'key_range': key_range,
})
for dbtype in tablet_types_str:
path = '/zk/%s/vt/ns/%s/%s/%s' % (self.cell, keyspace,
shard['shard'], dbtype)
result[path] = {
'entries': [
{
'uid': 0,
'host': self.host,
'port_map': {
'mysql': self.mysql_port,
'vt': shard['vt_port'],
},
},
],
}
path = '/zk/%s/vt/ns/%s' % (self.cell, keyspace)
partitions = []
for tablet_type in tablet_types_int:
partitions.append({
'served_type': tablet_type,
'shard_references': shard_references,
})
result[path] = {
'partitions': partitions,
'sharding_column_name': sharding_colname,
'sharding_column_type': sharding_coltype,
}
for from_keyspace, to_keyspace in self.served_from.iteritems():
path = '/zk/%s/vt/ns/%s' % (self.cell, from_keyspace)
served_from = []
for dbtype in tablet_types_int:
served_from.append({
'tablet_type': dbtype,
'keyspace': to_keyspace,
})
result[path] = {
'served_from': served_from,
}
return json.dumps(result)
| Python | 0.001604 |
d607de07ae3aaa2a245b8eb90cb42ca3e29f6e33 | add lambda sample | 05.Function.py | 05.Function.py | #-*- encoding: utf-8 -*-
# Error
#def func():
def func():
pass
def func(num, num1=1, num2=2):
print(num, num1, num2)
func(1, 3, 4) # 1 3 4
func(5) # 5 1 2
# Error
#func()
def func(**args):
for k, v in args.items():
print('key: ' + k, 'value: ' + v)
for k in args.keys():
print('key: ' + k, 'value: ' + args[k])
func(name = "rxb", age = "24")
def func(name, age):
print('name: ' + name, 'age: ' + age)
people = {"name": "rxb", "age": "24"}
func(**people) # name: rxb age: 24
def func(num, *args):
print(num)
for a in args:
print(a)
func(1, 2, 3, 4, 5, 6)
def func(num, num1):
print(num, num1)
func(num1 = 2, num = 1) # 1 2
d = {
"num": 3,
"num1": 4
}
func(**d) # 3 4
t = (4, 5)
func(*t) # 4 5
def func():
'''
The documentation of the func
'''
print("func")
print(func.__doc__)
l = lambda num1, num2: num1 + num2
print(l(2, 3)) # 5
def func2(func, num1, num2):
return func(num1, num2)
def func(num1, num2):
return num1 + num2
print(func2(func, 3, 4)) # 7
print(func2(lambda a, b: a - b, 7, 4)) # 3 | #-*- encoding: utf-8 -*-
# Error
#def func():
def func():
pass
def func(num, num1=1, num2=2):
print(num, num1, num2)
func(1, 3, 4) # 1 3 4
func(5) # 5 1 2
# Error
#func()
def func(**args):
for k, v in args.items():
print('key: ' + k, 'value: ' + v)
for k in args.keys():
print('key: ' + k, 'value: ' + args[k])
func(name = "rxb", age = "24")
def func(name, age):
print('name: ' + name, 'age: ' + age)
people = {"name": "rxb", "age": "24"}
func(**people) # name: rxb age: 24
def func(num, *args):
print(num)
for a in args:
print(a)
func(1, 2, 3, 4, 5, 6)
def func(num, num1):
print(num, num1)
func(num1 = 2, num = 1) # 1 2
d = {
"num": 3,
"num1": 4
}
func(**d) # 3 4
t = (4, 5)
func(*t) # 4 5
def func():
'''
The documentation of the func
'''
print("func")
print(func.__doc__)
l = lambda num1, num2: num1 + num2
print(l(2, 3)) # 5
| Python | 0.000001 |
b439017a21ac01ee7fda275753effaf5d103a120 | Change IP. | pybossa/signer/__init__.py | pybossa/signer/__init__.py | # -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2015 SciFabric LTD.
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
from itsdangerous import URLSafeTimedSerializer
from werkzeug import generate_password_hash, check_password_hash
class Signer(object):
def __init__(self, app=None):
self.app = app
if app is not None: # pragma: no cover
self.init_app(app)
def init_app(self, app):
key = app.config['ITSDANGEROUSKEY']
self.signer = URLSafeTimedSerializer(key)
def loads(self, string, **kwargs):
return self.signer.loads(string, **kwargs)
def dumps(self, key, **kwargs):
return self.signer.dumps(key, **kwargs)
def generate_password_hash(self, password):
return generate_password_hash(password)
def check_password_hash(self, passwd_hash, password):
return check_password_hash(passwd_hash, password)
| # -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2013 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
from itsdangerous import URLSafeTimedSerializer
from werkzeug import generate_password_hash, check_password_hash
class Signer(object):
def __init__(self, app=None):
self.app = app
if app is not None: # pragma: no cover
self.init_app(app)
def init_app(self, app):
key = app.config['ITSDANGEROUSKEY']
self.signer = URLSafeTimedSerializer(key)
def loads(self, string, **kwargs):
return self.signer.loads(string, **kwargs)
def dumps(self, key, **kwargs):
return self.signer.dumps(key, **kwargs)
def generate_password_hash(self, password):
return generate_password_hash(password)
def check_password_hash(self, passwd_hash, password):
return check_password_hash(passwd_hash, password)
| Python | 0 |
54d39aaf8c31a5827ae7338fefe7a1d6a19d52cf | Add missing docstring. | pyslvs_ui/info/__init__.py | pyslvs_ui/info/__init__.py | # -*- coding: utf-8 -*-
"""'info' module contains Pyslvs program information."""
__all__ = [
'KERNELS',
'SYS_INFO',
'ARGUMENTS',
'HAS_SLVS',
'Kernel',
'check_update',
'PyslvsAbout',
'html',
'logger',
'XStream',
'size_format',
]
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2016-2020"
__license__ = "AGPL"
__email__ = "pyslvs@gmail.com"
from .info import KERNELS, SYS_INFO, ARGUMENTS, HAS_SLVS, Kernel
from .about import PyslvsAbout, html, check_update
from .logging_handler import logger, XStream
def size_format(num: float) -> str:
"""Calculate file size."""
if num <= 0:
return "0 B"
for u in ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z'):
if abs(num) < 1024:
return f"{num:3.1f} {u}B"
num /= 1024
return f"{num:.1f} YB"
| # -*- coding: utf-8 -*-
"""'info' module contains Pyslvs program information."""
__all__ = [
'KERNELS',
'SYS_INFO',
'ARGUMENTS',
'HAS_SLVS',
'Kernel',
'check_update',
'PyslvsAbout',
'html',
'logger',
'XStream',
'size_format',
]
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2016-2020"
__license__ = "AGPL"
__email__ = "pyslvs@gmail.com"
from .info import KERNELS, SYS_INFO, ARGUMENTS, HAS_SLVS, Kernel
from .about import PyslvsAbout, html, check_update
from .logging_handler import logger, XStream
def size_format(num: float) -> str:
if num <= 0:
return "0 B"
for u in ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z'):
if abs(num) < 1024:
return f"{num:3.1f} {u}B"
num /= 1024
return f"{num:.1f} YB"
| Python | 0.000005 |
7b19611d30dfc9091823ae3d960ab2790dfe9cfc | Apply a blur filter automatically for each detected face | python/blur_human_faces.py | python/blur_human_faces.py | import requests
import json
imgUrl = 'https://pixlab.io/images/m3.jpg' # Target picture we want to blur any face on
# Detect all human faces in a given image via /facedetect first and blur all of them later via /mogrify.
# https://pixlab.io/cmd?id=facedetect and https://pixlab.io/cmd?id=mogrify for additional information.
req = requests.get('https://api.pixlab.io/facedetect',params={
'img': imgUrl,
'key':'PIXLAB_API_KEY',
})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
exit();
total = len(reply['faces']) # Total detected faces
print(str(total)+" faces were detected")
if total < 1:
# No faces were detected, exit immediately
exit()
# Pass the facial coordinates for each detected face untouched to mogrify
coordinates = reply['faces']
# Call mogrify & blur the face(s)
req = requests.post('https://api.pixlab.io/mogrify',headers={'Content-Type':'application/json'},data=json.dumps({
'img': imgUrl,
'key':'PIXLAB_API_KEY',
'cord': coordinates # The field of interest
}))
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
print ("Blurred Picture URL: "+ reply['ssl_link'])
| import requests
import json
imgUrl = 'https://pixlab.io/images/m3.jpg' # Target picture we want to blur any face on
# Detect all human faces in a given image via /facedetect first and blur all of them later via /mogrify.
# https://pixlab.io/cmd?id=facedetect and https://pixlab.io/cmd?id=mogrify for additional information.
req = requests.get('https://api.pixlab.io/facedetect',params={
'img': imgUrl,
'key':'PIXLAB_API_KEY',
})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
exit();
total = len(reply['faces']) # Total detected faces
print(str(total)+" faces were detected")
if total < 1:
# No faces were detected, exit immediately
exit()
# Pass the detected faces coordinates untouched to mogrify
coordinates = reply['faces']
# Call mogrify & blur the face(s)
req = requests.post('https://api.pixlab.io/mogrify',headers={'Content-Type':'application/json'},data=json.dumps({
'img': imgUrl,
'key':'PIXLAB_API_KEY',
'cord': coordinates # The field of interest
}))
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
print ("Blurred Picture URL: "+ reply['ssl_link'])
| Python | 0 |
8a950dbfb1281216ed270bf6363c7a71d857133f | Make datetime and time +00:00 handling behavior consistent. Fix #3. | webhooks/encoders.py | webhooks/encoders.py | """
Serialize data to/from JSON
Inspired by https://github.com/django/django/blob/master/django/core/serializers/json.py
"""
# Avoid shadowing the standard library json module
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
import decimal
import json
class WebHooksJSONEncoder(json.JSONEncoder):
"""
A JSONEncoder that can encode date/time and decimal types.
"""
def default(self, o):
# See "Date Time String Format" in the ECMA-262 specification.
if isinstance(o, datetime.datetime):
r = o.isoformat()
if o.microsecond:
r = r[:23] + r[26:]
if r.endswith('+00:00'):
r = r[:-6] + 'Z'
return r
elif isinstance(o, datetime.date):
return o.isoformat()
elif isinstance(o, datetime.time):
r = o.isoformat()
if o.microsecond:
r = r[:12]
if r.endswith('+00:00'):
r = r[:-6] + 'Z'
return r
elif isinstance(o, decimal.Decimal):
return str(o)
else:
return super(WebHooksJSONEncoder, self).default(o)
| """
Serialize data to/from JSON
Inspired by https://github.com/django/django/blob/master/django/core/serializers/json.py
"""
# Avoid shadowing the standard library json module
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
import decimal
import json
class WebHooksJSONEncoder(json.JSONEncoder):
"""
A JSONEncoder that can encode date/time and decimal types.
"""
def default(self, o):
# See "Date Time String Format" in the ECMA-262 specification.
if isinstance(o, datetime.datetime):
r = o.isoformat()
if o.microsecond:
r = r[:23] + r[26:]
if r.endswith('+00:00'):
r = r[:-6] + 'Z'
return r
elif isinstance(o, datetime.date):
return o.isoformat()
elif isinstance(o, datetime.time):
r = o.isoformat()
if o.microsecond:
r = r[:12]
return r
elif isinstance(o, decimal.Decimal):
return str(o)
else:
return super(WebHooksJSONEncoder, self).default(o)
| Python | 0.000001 |
8120b641ccb66b088fa70c028e5be542bf561dfd | Update lex_attrs.py (#5608) | spacy/lang/hy/lex_attrs.py | spacy/lang/hy/lex_attrs.py | # coding: utf8
from __future__ import unicode_literals
from ...attrs import LIKE_NUM
_num_words = [
"զրո",
"մեկ",
"երկու",
"երեք",
"չորս",
"հինգ",
"վեց",
"յոթ",
"ութ",
"ինը",
"տասը",
"տասնմեկ",
"տասներկու",
"տասներեք",
"տասնչորս",
"տասնհինգ",
"տասնվեց",
"տասնյոթ",
"տասնութ",
"տասնինը",
"քսան" "երեսուն",
"քառասուն",
"հիսուն",
"վաթսուն",
"յոթանասուն",
"ութսուն",
"իննսուն",
"հարյուր",
"հազար",
"միլիոն",
"միլիարդ",
"տրիլիոն",
"քվինտիլիոն",
]
def like_num(text):
if text.startswith(("+", "-", "±", "~")):
text = text[1:]
text = text.replace(",", "").replace(".", "")
if text.isdigit():
return True
if text.count("/") == 1:
num, denom = text.split("/")
if num.isdigit() and denom.isdigit():
return True
if text.lower() in _num_words:
return True
return False
LEX_ATTRS = {LIKE_NUM: like_num}
| # coding: utf8
from __future__ import unicode_literals
from ...attrs import LIKE_NUM
_num_words = [
"զրօ",
"մէկ",
"երկու",
"երեք",
"չորս",
"հինգ",
"վեց",
"յոթ",
"ութ",
"ինը",
"տասը",
"տասնմեկ",
"տասներկու",
"տասներեք",
"տասնչորս",
"տասնհինգ",
"տասնվեց",
"տասնյոթ",
"տասնութ",
"տասնինը",
"քսան" "երեսուն",
"քառասուն",
"հիսուն",
"վաթցսուն",
"յոթանասուն",
"ութսուն",
"ինիսուն",
"հարյուր",
"հազար",
"միլիոն",
"միլիարդ",
"տրիլիոն",
"քվինտիլիոն",
]
def like_num(text):
if text.startswith(("+", "-", "±", "~")):
text = text[1:]
text = text.replace(",", "").replace(".", "")
if text.isdigit():
return True
if text.count("/") == 1:
num, denom = text.split("/")
if num.isdigit() and denom.isdigit():
return True
if text.lower() in _num_words:
return True
return False
LEX_ATTRS = {LIKE_NUM: like_num}
| Python | 0 |
cd8024c762bf5bae8caf210b9224548bee55ee04 | Bump version to 6.1.5a3 | platformio/__init__.py | platformio/__init__.py | # Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
VERSION = (6, 1, "5a3")
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio"
__description__ = (
"A professional collaborative platform for embedded development. "
"Cross-platform IDE and Unified Debugger. "
"Static Code Analyzer and Remote Unit Testing. "
"Multi-platform and Multi-architecture Build System. "
"Firmware File Explorer and Memory Inspection. "
"IoT, Arduino, CMSIS, ESP-IDF, FreeRTOS, libOpenCM3, mbedOS, Pulp OS, SPL, "
"STM32Cube, Zephyr RTOS, ARM, AVR, Espressif (ESP8266/ESP32), FPGA, "
"MCS-51 (8051), MSP430, Nordic (nRF51/nRF52), NXP i.MX RT, PIC32, RISC-V, "
"STMicroelectronics (STM8/STM32), Teensy"
)
__url__ = "https://platformio.org"
__author__ = "PlatformIO Labs"
__email__ = "contact@piolabs.com"
__license__ = "Apache Software License"
__copyright__ = "Copyright 2014-present PlatformIO Labs"
__accounts_api__ = "https://api.accounts.platformio.org"
__registry_mirror_hosts__ = [
"registry.platformio.org",
"registry.nm1.platformio.org",
]
__pioremote_endpoint__ = "ssl:host=remote.platformio.org:port=4413"
__default_requests_timeout__ = (10, None) # (connect, read)
__core_packages__ = {
"contrib-piohome": "~3.4.2",
"contrib-pysite": "~2.%d%d.0" % (sys.version_info.major, sys.version_info.minor),
"tool-scons": "~4.40400.0",
"tool-cppcheck": "~1.270.0",
"tool-clangtidy": "~1.120001.0",
"tool-pvs-studio": "~7.18.0",
}
__check_internet_hosts__ = [
"185.199.110.153", # Github.com
"88.198.170.159", # platformio.org
"github.com",
] + __registry_mirror_hosts__
| # Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
VERSION = (6, 1, "5a2")
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio"
__description__ = (
"A professional collaborative platform for embedded development. "
"Cross-platform IDE and Unified Debugger. "
"Static Code Analyzer and Remote Unit Testing. "
"Multi-platform and Multi-architecture Build System. "
"Firmware File Explorer and Memory Inspection. "
"IoT, Arduino, CMSIS, ESP-IDF, FreeRTOS, libOpenCM3, mbedOS, Pulp OS, SPL, "
"STM32Cube, Zephyr RTOS, ARM, AVR, Espressif (ESP8266/ESP32), FPGA, "
"MCS-51 (8051), MSP430, Nordic (nRF51/nRF52), NXP i.MX RT, PIC32, RISC-V, "
"STMicroelectronics (STM8/STM32), Teensy"
)
__url__ = "https://platformio.org"
__author__ = "PlatformIO Labs"
__email__ = "contact@piolabs.com"
__license__ = "Apache Software License"
__copyright__ = "Copyright 2014-present PlatformIO Labs"
__accounts_api__ = "https://api.accounts.platformio.org"
__registry_mirror_hosts__ = [
"registry.platformio.org",
"registry.nm1.platformio.org",
]
__pioremote_endpoint__ = "ssl:host=remote.platformio.org:port=4413"
__default_requests_timeout__ = (10, None) # (connect, read)
__core_packages__ = {
"contrib-piohome": "~3.4.2",
"contrib-pysite": "~2.%d%d.0" % (sys.version_info.major, sys.version_info.minor),
"tool-scons": "~4.40400.0",
"tool-cppcheck": "~1.270.0",
"tool-clangtidy": "~1.120001.0",
"tool-pvs-studio": "~7.18.0",
}
__check_internet_hosts__ = [
"185.199.110.153", # Github.com
"88.198.170.159", # platformio.org
"github.com",
] + __registry_mirror_hosts__
| Python | 0 |
12c22ebdf3c7e84f5f9c6b32329f343c8317f11b | Correct comments | python/dbtools/__init__.py | python/dbtools/__init__.py | '''
This library provides database access routines.
It's based on the re-usable parts of tailoredstats.
Owain Kenway
'''
'''
Generally abstract away DB queries, such that all complexity is replaced with:
dbtools.dbquery(db, query)
'''
def dbquery(db, query, mysqlhost="mysql.external.legion.ucl.ac.uk", mysqlport = 3306 ):
from auth.secrets import Secrets
import MySQLdb # Note need mysqlclient package from pypi
# Set up our authentication.
s = Secrets()
# Connect to database.
conn = MySQLdb.Connect(host=mysqlhost,
port=mysqlport,
user=s.dbuser,
passwd=s.dbpasswd,
db=db)
# Set up cursor.
cursor = conn.cursor(MySQLdb.cursors.DictCursor)
print(">>> DEBUG SQL query: " + query)
# Run query.
cursor.execute(query)
# Dump output.
output = cursor.fetchall()
# Tidy up.
cursor.close()
conn.close()
return output
# Generate a valid SQL list from a python one.
def sqllist(pylist):
sqlstr="("
if type(pylist) == str:
sqlstr = sqlstr + "'" + pylist + "')"
else:
for a in pylist:
if sqlstr!= "(":
sqlstr = sqlstr + ", "
sqlstr = sqlstr + "'" + a + "'"
sqlstr = sqlstr + ")"
return sqlstr
# Build owner limit string for queries.
def onlimits(users="*"):
query = ""
# if users != * then construct a node list.
if users != "*":
userlist = sqllist(users)
query = query + " and owner in " + userlist
return query
| '''
This library provides database access routines.
It's based on the re-usable parts of tailoredstats.
Owain Kenway
'''
'''
Generally abstract away DB queries, such that all complexity is replaced with:
dbtools.dbquery(db, query)
'''
def dbquery(db, query, mysqlhost="mysql.external.legion.ucl.ac.uk", mysqlport = 3306 ):
from auth.secrets import Secrets
import MySQLdb # Note need mysqlclient package from pypi
# Set up our authentication.
s = Secrets()
# Connect to database.
conn = MySQLdb.Connect(host=mysqlhost,
port=mysqlport,
user=s.dbuser,
passwd=s.dbpasswd,
db=db)
# Set up cursor.
cursor = conn.cursor(MySQLdb.cursors.DictCursor)
print(">>> DEBUG SQL query: " + query)
# Run query.
cursor.execute(query)
# Dump output.
output = cursor.fetchall()
# Tidy up.
cursor.close()
conn.close()
return output
# Generate a valid SQL list from a python one.
def sqllist(pylist):
sqlstr="("
if type(pylist) == str:
sqlstr = sqlstr + "'" + pylist + "')"
else:
for a in pylist:
if sqlstr!= "(":
sqlstr = sqlstr + ", "
sqlstr = sqlstr + "'" + a + "'"
sqlstr = sqlstr + ")"
return sqlstr
# Build owner/node limit string for queries.
def onlimits(users="*"):
query = ""
# if users != * then construct a node list.
if users != "*":
userlist = sqllist(users)
query = query + " and owner in " + userlist
return query
| Python | 0.000023 |
8d459d86d33992129726ef177ed24fe8a00e9b75 | Bump version to 4.0.0rc1 | platformio/__init__.py | platformio/__init__.py | # Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VERSION = (4, 0, "0rc1")
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio"
__description__ = (
"An open source ecosystem for IoT development. "
"Cross-platform IDE and unified debugger. "
"Remote unit testing and firmware updates. "
"Arduino, ARM mbed, Espressif (ESP8266/ESP32), STM32, PIC32, nRF51/nRF52, "
"FPGA, CMSIS, SPL, AVR, Samsung ARTIK, libOpenCM3")
__url__ = "https://platformio.org"
__author__ = "PlatformIO"
__email__ = "contact@platformio.org"
__license__ = "Apache Software License"
__copyright__ = "Copyright 2014-present PlatformIO"
__apiurl__ = "https://api.platformio.org"
| # Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VERSION = (4, 0, "0b3")
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio"
__description__ = (
"An open source ecosystem for IoT development. "
"Cross-platform IDE and unified debugger. "
"Remote unit testing and firmware updates. "
"Arduino, ARM mbed, Espressif (ESP8266/ESP32), STM32, PIC32, nRF51/nRF52, "
"FPGA, CMSIS, SPL, AVR, Samsung ARTIK, libOpenCM3")
__url__ = "https://platformio.org"
__author__ = "PlatformIO"
__email__ = "contact@platformio.org"
__license__ = "Apache Software License"
__copyright__ = "Copyright 2014-present PlatformIO"
__apiurl__ = "https://api.platformio.org"
| Python | 0 |
5f917746e86c733d37c56e15a97f7aecb73fa75f | fix bug comparing string with int. int(games) | python/guess_the_player.py | python/guess_the_player.py | # importing modules
import os
import csv
import time
import random
import tweepy
import player
# secrets
consumer_key = os.getenv('c_key')
consumer_secret = os.getenv('c_secret')
access_token = os.getenv('a_token')
access_token_secret = os.getenv('a_secret')
# authentication
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# processing
with open('player_history.csv') as csvfile:
row = random.choice([a for a in list(csv.DictReader(csvfile)) if int(a['Games']) > 1])
po = player.player(
row['Player Name'],
row['Goals'],
row['Games'],
row['Starter'],
row['Sub'],
row['Active'],
row['Debut']
)
api.update_status(status=po.get_guess_player_string())
time.sleep(10*60)
api.update_status(status=f"#GuessThePlayer Well done if you got it, the answer was: {po.name} #CFC #Chelsea")
| # importing modules
import os
import csv
import time
import random
import tweepy
import player
# secrets
consumer_key = os.getenv('c_key')
consumer_secret = os.getenv('c_secret')
access_token = os.getenv('a_token')
access_token_secret = os.getenv('a_secret')
# authentication
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# processing
with open('player_history.csv') as csvfile:
row = random.choice([a for a in list(csv.DictReader(csvfile)) if a['Games'] > 1])
po = player.player(
row['Player Name'],
row['Goals'],
row['Games'],
row['Starter'],
row['Sub'],
row['Active'],
row['Debut']
)
api.update_status(status=po.get_guess_player_string())
time.sleep(10*60)
api.update_status(status=f"#GuessThePlayer Well done if you got it, the answer was: {po.name} #CFC #Chelsea")
| Python | 0 |
a172a17c815e8fcbe0f8473c6bac1ea1d9714817 | Bump version to 4.4.0a4 | platformio/__init__.py | platformio/__init__.py | # Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VERSION = (4, 4, "0a4")
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio"
__description__ = (
"A new generation collaborative platform for embedded development. "
"Cross-platform IDE and Unified Debugger. "
"Static Code Analyzer and Remote Unit Testing. "
"Multi-platform and Multi-architecture Build System. "
"Firmware File Explorer and Memory Inspection. "
"Professional development environment for Embedded, IoT, Arduino, CMSIS, ESP-IDF, "
"FreeRTOS, libOpenCM3, mbedOS, Pulp OS, SPL, STM32Cube, Zephyr RTOS, ARM, AVR, "
"Espressif (ESP8266/ESP32), FPGA, MCS-51 (8051), MSP430, Nordic (nRF51/nRF52), "
"NXP i.MX RT, PIC32, RISC-V, STMicroelectronics (STM8/STM32), Teensy"
)
__url__ = "https://platformio.org"
__author__ = "PlatformIO"
__email__ = "contact@platformio.org"
__license__ = "Apache Software License"
__copyright__ = "Copyright 2014-present PlatformIO"
__apiurl__ = "https://api.platformio.org"
__accounts_api__ = "https://api.accounts.platformio.org"
__registry_api__ = "https://api.registry.platformio.org"
__pioremote_endpoint__ = "ssl:host=remote.platformio.org:port=4413"
| # Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VERSION = (4, 4, "0a3")
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio"
__description__ = (
"A new generation collaborative platform for embedded development. "
"Cross-platform IDE and Unified Debugger. "
"Static Code Analyzer and Remote Unit Testing. "
"Multi-platform and Multi-architecture Build System. "
"Firmware File Explorer and Memory Inspection. "
"Professional development environment for Embedded, IoT, Arduino, CMSIS, ESP-IDF, "
"FreeRTOS, libOpenCM3, mbedOS, Pulp OS, SPL, STM32Cube, Zephyr RTOS, ARM, AVR, "
"Espressif (ESP8266/ESP32), FPGA, MCS-51 (8051), MSP430, Nordic (nRF51/nRF52), "
"NXP i.MX RT, PIC32, RISC-V, STMicroelectronics (STM8/STM32), Teensy"
)
__url__ = "https://platformio.org"
__author__ = "PlatformIO"
__email__ = "contact@platformio.org"
__license__ = "Apache Software License"
__copyright__ = "Copyright 2014-present PlatformIO"
__apiurl__ = "https://api.platformio.org"
__accounts_api__ = "https://api.accounts.platformio.org"
__registry_api__ = "https://api.registry.platformio.org"
__pioremote_endpoint__ = "ssl:host=remote.platformio.org:port=4413"
| Python | 0 |
54921c5dbdc68893fe45649d07d067818c36889b | Bump version to 4.0.0b3 | platformio/__init__.py | platformio/__init__.py | # Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VERSION = (4, 0, "0b3")
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio"
__description__ = (
"An open source ecosystem for IoT development. "
"Cross-platform IDE and unified debugger. "
"Remote unit testing and firmware updates. "
"Arduino, ARM mbed, Espressif (ESP8266/ESP32), STM32, PIC32, nRF51/nRF52, "
"FPGA, CMSIS, SPL, AVR, Samsung ARTIK, libOpenCM3")
__url__ = "https://platformio.org"
__author__ = "PlatformIO"
__email__ = "contact@platformio.org"
__license__ = "Apache Software License"
__copyright__ = "Copyright 2014-present PlatformIO"
__apiurl__ = "https://api.platformio.org"
| # Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VERSION = (4, 0, "0b2")
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio"
__description__ = (
"An open source ecosystem for IoT development. "
"Cross-platform IDE and unified debugger. "
"Remote unit testing and firmware updates. "
"Arduino, ARM mbed, Espressif (ESP8266/ESP32), STM32, PIC32, nRF51/nRF52, "
"FPGA, CMSIS, SPL, AVR, Samsung ARTIK, libOpenCM3")
__url__ = "https://platformio.org"
__author__ = "PlatformIO"
__email__ = "contact@platformio.org"
__license__ = "Apache Software License"
__copyright__ = "Copyright 2014-present PlatformIO"
__apiurl__ = "https://api.platformio.org"
| Python | 0 |
302a102a3c72224d2039df35fe4b292e9dd540d3 | fix typo in docstring | client/libs/logdog/bootstrap.py | client/libs/logdog/bootstrap.py | # Copyright 2016 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import collections
import os
from . import stream, streamname
class NotBootstrappedError(RuntimeError):
"""Raised when the current environment is missing Butler bootstrap variables.
"""
_ButlerBootstrapBase = collections.namedtuple('_ButlerBootstrapBase',
('project', 'prefix', 'streamserver_uri', 'coordinator_host',
'namespace'))
class ButlerBootstrap(_ButlerBootstrapBase):
"""Loads LogDog Butler bootstrap parameters from the environment.
LogDog Butler adds variables describing the LogDog stream parameters to the
environment when it bootstraps an application. This class probes the
environment and identifies those parameters.
"""
# TODO(iannucci): move all of these to LUCI_CONTEXT
_ENV_PROJECT = 'LOGDOG_STREAM_PROJECT'
_ENV_PREFIX = 'LOGDOG_STREAM_PREFIX'
_ENV_STREAM_SERVER_PATH = 'LOGDOG_STREAM_SERVER_PATH'
_ENV_COORDINATOR_HOST = 'LOGDOG_COORDINATOR_HOST'
_ENV_NAMESPACE = 'LOGDOG_NAMESPACE'
@classmethod
def probe(cls, env=None):
"""Returns (ButlerBootstrap): The probed bootstrap environment.
Args:
env (dict): The environment to probe. If None, `os.getenv` will be used.
Raises:
NotBootstrappedError if the current environment is not bootstrapped.
"""
if env is None:
env = os.environ
def _check(kind, val):
if not val:
return val
try:
streamname.validate_stream_name(val)
return val
except ValueError as exp:
raise NotBootstrappedError('%s (%s) is invalid: %s' % (kind, val, exp))
streamserver_uri = env.get(cls._ENV_STREAM_SERVER_PATH)
if not streamserver_uri:
raise NotBootstrappedError('No streamserver in bootstrap environment.')
return cls(
project=env.get(cls._ENV_PROJECT, ''),
prefix=_check("Prefix", env.get(cls._ENV_PREFIX, '')),
streamserver_uri=streamserver_uri,
coordinator_host=env.get(cls._ENV_COORDINATOR_HOST, ''),
namespace=_check("Namespace", env.get(cls._ENV_NAMESPACE, '')))
def stream_client(self, reg=None):
"""Returns: (StreamClient) stream client for the bootstrap streamserver URI.
If the Butler accepts external stream connections, it will export a
streamserver URI in the environment. This will create a StreamClient
instance to operate on the streamserver if one is defined.
Args:
reg (stream.StreamProtocolRegistry or None): The stream protocol registry
to use to create the stream. If None, the default global registry will
be used (recommended).
Raises:
ValueError: If no streamserver URI is present in the environment.
"""
reg = reg or stream._default_registry
return reg.create(
self.streamserver_uri,
project=self.project,
prefix=self.prefix,
coordinator_host=self.coordinator_host,
namespace=self.namespace)
| # Copyright 2016 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import collections
import os
from . import stream, streamname
class NotBootstrappedError(RuntimeError):
"""Raised when the current environment is missing Butler bootstrap variables.
"""
_ButlerBootstrapBase = collections.namedtuple('_ButlerBootstrapBase',
('project', 'prefix', 'streamserver_uri', 'coordinator_host',
'namespace'))
class ButlerBootstrap(_ButlerBootstrapBase):
"""Loads LogDog Butler bootstrap parameters from the environment.
LogDog Butler adds variables describing the LogDog stream parameters to the
environment when it bootstraps an application. This class probes the
environment and identifies those parameters.
"""
# TODO(iannucci): move all of these to LUCI_CONTEXT
_ENV_PROJECT = 'LOGDOG_STREAM_PROJECT'
_ENV_PREFIX = 'LOGDOG_STREAM_PREFIX'
_ENV_STREAM_SERVER_PATH = 'LOGDOG_STREAM_SERVER_PATH'
_ENV_COORDINATOR_HOST = 'LOGDOG_COORDINATOR_HOST'
_ENV_NAMESPACE = 'LOGDOG_NAMESPACE'
@classmethod
def probe(cls, env=None):
"""Returns (ButlerBootstrap): The probed bootstrap environment.
Args:
env (dict): The environment to probe. If None, `os.getenv` will be used.
Raises:
NotBootstrappedError if the current environment is not boostrapped.
"""
if env is None:
env = os.environ
def _check(kind, val):
if not val:
return val
try:
streamname.validate_stream_name(val)
return val
except ValueError as exp:
raise NotBootstrappedError('%s (%s) is invalid: %s' % (kind, val, exp))
streamserver_uri = env.get(cls._ENV_STREAM_SERVER_PATH)
if not streamserver_uri:
raise NotBootstrappedError('No streamserver in bootstrap environment.')
return cls(
project=env.get(cls._ENV_PROJECT, ''),
prefix=_check("Prefix", env.get(cls._ENV_PREFIX, '')),
streamserver_uri=streamserver_uri,
coordinator_host=env.get(cls._ENV_COORDINATOR_HOST, ''),
namespace=_check("Namespace", env.get(cls._ENV_NAMESPACE, '')))
def stream_client(self, reg=None):
"""Returns: (StreamClient) stream client for the bootstrap streamserver URI.
If the Butler accepts external stream connections, it will export a
streamserver URI in the environment. This will create a StreamClient
instance to operate on the streamserver if one is defined.
Args:
reg (stream.StreamProtocolRegistry or None): The stream protocol registry
to use to create the stream. If None, the default global registry will
be used (recommended).
Raises:
ValueError: If no streamserver URI is present in the environment.
"""
reg = reg or stream._default_registry
return reg.create(
self.streamserver_uri,
project=self.project,
prefix=self.prefix,
coordinator_host=self.coordinator_host,
namespace=self.namespace)
| Python | 0.020727 |
74ecf023ef13fdba6378d6b50b3eaeb06b9e0c97 | Rename env vars & modify query | rebuild_dependant_repos.py | rebuild_dependant_repos.py | import os, sys, re
import requests
from github import Github
CIRCLECI_BASEURL = "https://circleci.com/api/v2"
CIRCLECI_ACCESS_TOKEN = os.environ["AVATAO_CIRCLECI_TOKEN"]
GITHUB_ACCESS_TOKEN = os.environ["AVATAO_GITHUB_TOKEN"]
g = Github(GITHUB_ACCESS_TOKEN)
if len(sys.argv) < 2:
raise AttributeError("The image name is required as the first argument.")
image_name = sys.argv[1]
image_name = re.sub(r"[^a-zA-Z0-9-]", " ", image_name)
query = "org:avatao-content language:Dockerfile " + image_name
print("Searching GitHub with query: '%s'" % query)
code_search = g.search_code(query)
circleci_project_slugs = set()
for result in code_search:
circleci_project_slugs.add(f"gh/{result.repository.organization.login}/{result.repository.name}")
print("Found %d candidate repositories." % len(circleci_project_slugs))
current_item = 1
for slug in circleci_project_slugs:
print("[%d/%d] Triggering CI pipeline for: %s" % (current_item, len(circleci_project_slugs), slug))
requests.post(f"{CIRCLECI_BASEURL}/project/{slug}/pipeline", headers={"Circle-Token": CIRCLECI_ACCESS_TOKEN})
current_item += 1
| import os, sys, re, logging
import requests
from github import Github
logging.basicConfig(level=logging.DEBUG)
CIRCLECI_BASEURL = "https://circleci.com/api/v2"
CIRCLECI_ACCESS_TOKEN = os.environ["TAO_CIRCLECI_TOKEN"]
GITHUB_ACCESS_TOKEN = os.environ["TAO_GITHUB_TOKEN"]
g = Github(GITHUB_ACCESS_TOKEN)
if len(sys.argv) < 2:
raise AttributeError("The image name is required as the first argument.")
image_name = sys.argv[1]
image_name = re.sub(r"[^a-zA-Z0-9-]", " ", image_name)
query = "org:avatao-content language:Dockerfile FROM " + image_name
logging.debug("Searching GitHub with query: '%s'", query)
code_search = g.search_code(query)
circleci_project_slugs = set()
for result in code_search:
circleci_project_slugs.add(f"gh/{result.repository.organization.login}/{result.repository.name}")
logging.debug("Found %d candidate repositories.", len(circleci_project_slugs))
current_item = 1
for slug in circleci_project_slugs:
logging.debug("[%d/%d] Triggering CI pipeline for: %s", current_item, len(circleci_project_slugs), slug)
requests.post(f"{CIRCLECI_BASEURL}/project/{slug}/pipeline", headers={"Circle-Token": CIRCLECI_ACCESS_TOKEN})
current_item += 1
| Python | 0 |
b43b34418d244acee363485d42a23694ed9d654f | Add fields to work serializer for GET methods | works/serializers.py | works/serializers.py | from rest_framework import serializers
from . import models
from clients import serializers as client_serializers
from users import serializers as user_serializers
class WorkTypeSerializer(serializers.ModelSerializer):
name = serializers.CharField(read_only=True)
class Meta:
model = models.WorkType
fields = ('id', 'work_type_id', 'name',)
class ArtTypeSerializer(serializers.ModelSerializer):
class Meta:
model = models.ArtType
fields = ('id', 'work_type', 'name',)
class ArtIgualaSerializer(serializers.ModelSerializer):
art_type_name = serializers.CharField(source='art_type.name', read_only=True)
class Meta:
model = models.ArtIguala
fields = ('id', 'iguala', 'art_type', 'quantity', 'art_type_name')
class IgualaSerializer(serializers.ModelSerializer):
client_complete = client_serializers.ClientSerializer(source='client', read_only=True)
art_iguala = ArtIgualaSerializer(many=True, read_only=True)
class Meta:
model = models.Iguala
fields = ('id', 'client', 'client_complete', 'name', 'start_date', 'end_date',
'art_iguala',)
class StatusSerializer(serializers.ModelSerializer):
name = serializers.CharField(source='__str__', read_only=True)
class Meta:
model = models.Status
fields = ('id', 'status_id', 'name',)
class ArtWorkSerializer(serializers.ModelSerializer):
class Meta:
model = models.ArtWork
fields = ('id', 'work', 'art_type', 'quantity',)
class FileSerializer(serializers.ModelSerializer):
class Meta:
model = models.File
fields = ('id', 'work', 'upload',)
class WorkDesignerSerializer(serializers.ModelSerializer):
start_date = serializers.DateTimeField(read_only=True)
end_date = serializers.DateTimeField(read_only=True)
class Meta:
model = models.WorkDesigner
fields = ('id', 'designer', 'work', 'start_date', 'end_date', 'active_work',)
class StatusChangeSerializer(serializers.ModelSerializer):
date = serializers.DateTimeField(read_only=True)
class Meta:
model = models.StatusChange
fields = ('id', 'work', 'status', 'user', 'date',)
class WorkSerializer(serializers.ModelSerializer):
creation_date = serializers.DateField(read_only=True)
executive_complete = user_serializers.UserSerializer(source='executive', read_only=True)
contact_complete = client_serializers.ContactSerializer(source='contact', read_only=True)
current_status_complete = StatusSerializer(source='current_status', read_only=True)
work_type_complete = WorkTypeSerializer(source='work_type', read_only=True)
iguala_complete = IgualaSerializer(source='iguala', read_only=True)
art_works = ArtWorkSerializer(many=True, read_only=True)
files = FileSerializer(many=True, read_only=True)
work_designers = WorkDesignerSerializer(many=True, read_only=True)
status_changes = StatusChangeSerializer(many=True, read_only=True)
class Meta:
model = models.Work
fields = ('id',
'executive',
'executive_complete',
'contact',
'contact_complete',
'current_status',
'current_status_complete',
'work_type',
'work_type_complete',
'iguala',
'iguala_complete',
'creation_date',
'name',
'expected_delivery_date',
'brief',
'final_link',
'art_works',
'files',
'work_designers',
'status_changes'
)
| from rest_framework import serializers
from . import models
from clients import serializers as client_serializers
from users import serializers as user_serializers
class WorkTypeSerializer(serializers.ModelSerializer):
name = serializers.CharField(read_only=True)
class Meta:
model = models.WorkType
fields = ('id', 'work_type_id', 'name',)
class ArtTypeSerializer(serializers.ModelSerializer):
class Meta:
model = models.ArtType
fields = ('id', 'work_type', 'name',)
class ArtIgualaSerializer(serializers.ModelSerializer):
art_type_name = serializers.CharField(source='art_type.name', read_only=True)
class Meta:
model = models.ArtIguala
fields = ('id', 'iguala', 'art_type', 'quantity', 'art_type_name')
class IgualaSerializer(serializers.ModelSerializer):
client_complete = client_serializers.ClientSerializer(source='client', read_only=True)
art_iguala = ArtIgualaSerializer(many=True, read_only=True)
class Meta:
model = models.Iguala
fields = ('id', 'client', 'client_complete', 'name', 'start_date', 'end_date',
'art_iguala',)
class StatusSerializer(serializers.ModelSerializer):
name = serializers.CharField(source='__str__', read_only=True)
class Meta:
model = models.Status
fields = ('id', 'status_id', 'name',)
class WorkSerializer(serializers.ModelSerializer):
creation_date = serializers.DateField(read_only=True)
executive_complete = user_serializers.UserSerializer(source='executive', read_only=True)
contact_complete = client_serializers.ContactSerializer(source='contact', read_only=True)
current_status_complete = StatusSerializer(source='current_status', read_only=True)
work_type_complete = WorkTypeSerializer(source='work_type', read_only=True)
iguala_complete = IgualaSerializer(source='iguala', read_only=True)
class Meta:
model = models.Work
fields = ('id',
'executive',
'executive_complete',
'contact',
'contact_complete',
'current_status',
'current_status_complete',
'work_type',
'work_type_complete',
'iguala',
'iguala_complete',
'creation_date',
'name',
'expected_delivery_date',
'brief',
'final_link',
)
class ArtWorkSerializer(serializers.ModelSerializer):
class Meta:
model = models.ArtWork
fields = ('id', 'work', 'art_type', 'quantity',)
class FileSerializer(serializers.ModelSerializer):
class Meta:
model = models.File
fields = ('id', 'work', 'upload',)
class WorkDesignerSerializer(serializers.ModelSerializer):
start_date = serializers.DateTimeField(read_only=True)
end_date = serializers.DateTimeField(read_only=True)
class Meta:
model = models.WorkDesigner
fields = ('id', 'designer', 'work', 'start_date', 'end_date', 'active_work',)
class StatusChangeSerializer(serializers.ModelSerializer):
date = serializers.DateTimeField(read_only=True)
class Meta:
model = models.StatusChange
fields = ('id', 'work', 'status', 'user', 'date',)
| Python | 0 |
a6fbb077eb3067cedc501cc8b0a9e99594cef9ed | Use memoize for compliance cache | recruit_app/hr/managers.py | recruit_app/hr/managers.py | # -*- coding: utf-8 -*-
from recruit_app.user.models import EveCharacter
from recruit_app.extensions import cache_extension
from flask import current_app
import requests
from bs4 import BeautifulSoup
class HrManager:
def __init__(self):
pass
@staticmethod
@cache_extension.memoize(timeout=3600)
def get_compliance(corp_id):
url = 'https://goonfleet.com'
s = requests.session()
r = s.get(url, verify=True)
soup = BeautifulSoup(r.text, 'html.parser')
token = soup.find('input', {'name':'auth_key'})['value']
payload = {
'ips_username' : current_app.config['GSF_USERNAME'],
'ips_password' : current_app.config['GSF_PASSWORD'],
'auth_key' : token,
'referer' : 'https://goonfleet.com/',
'rememberMe' : 1,
}
url = 'https://goonfleet.com/index.php?app=core&module=global§ion=login&do=process'
r = s.post(url, data=payload, verify=True)
url = 'https://goonfleet.com/corps/checkMembers.php'
r = s.get(url, verify=True)
payload = {
'corpID' : str(corp_id)
}
r = s.post(url, data=payload, verify=True)
soup = BeautifulSoup(r.text, 'html.parser')
output = "<table id='compliance' class='table tablesorter'><thead><th>Character Name</th><th>Forum Name/Main</th><th>Primary Group</th><th>Status</th></thead><tbody>\n"
for row in soup.findAll('tr'):
alert = None
if row.get('class'):
alert = row.get('class')[1]
cols = row.findAll('td')
charname = cols[1].get_text()
forumname = cols[2].get_text()
group = cols[3].get_text()
# Look for an API for character
if not alert and not EveCharacter.query.filter_by(character_name=charname).first():
alert = 'alert-warning'
# Set status
if alert == 'alert-warning':
status = 'No KF API'
elif alert == 'alert-success':
status = 'Director'
elif alert == 'alert-error':
status = 'No Goon Auth'
else:
status = 'OK'
if alert:
output = output + '<tr class="alert {0}"><td>{1}</td><td>{2}</td><td>{3}</td><td>{4}</td></tr>\n'.format(alert, charname, forumname, group, status)
else:
output = output + '<tr><td>{0}</td><td>{1}</td><td>{2}</td><td>{3}</td></tr>\n'.format(charname, forumname, group, status)
output = output + '</tbody></table>'
return output
| # -*- coding: utf-8 -*-
from recruit_app.user.models import EveCharacter
from recruit_app.extensions import cache_extension
from flask import current_app
import requests
from bs4 import BeautifulSoup
class HrManager:
def __init__(self):
pass
@staticmethod
@cache_extension.cached(timeout=3600, key_prefix='get_compliance')
def get_compliance(corp_id):
url = 'https://goonfleet.com'
s = requests.session()
r = s.get(url, verify=True)
soup = BeautifulSoup(r.text, 'html.parser')
token = soup.find('input', {'name':'auth_key'})['value']
payload = {
'ips_username' : current_app.config['GSF_USERNAME'],
'ips_password' : current_app.config['GSF_PASSWORD'],
'auth_key' : token,
'referer' : 'https://goonfleet.com/',
'rememberMe' : 1,
}
url = 'https://goonfleet.com/index.php?app=core&module=global§ion=login&do=process'
r = s.post(url, data=payload, verify=True)
url = 'https://goonfleet.com/corps/checkMembers.php'
r = s.get(url, verify=True)
payload = {
'corpID' : str(corp_id)
}
r = s.post(url, data=payload, verify=True)
soup = BeautifulSoup(r.text, 'html.parser')
output = "<table id='compliance' class='table tablesorter'><thead><th>Character Name</th><th>Forum Name/Main</th><th>Primary Group</th><th>Status</th></thead><tbody>\n"
for row in soup.findAll('tr'):
alert = None
if row.get('class'):
alert = row.get('class')[1]
cols = row.findAll('td')
charname = cols[1].get_text()
forumname = cols[2].get_text()
group = cols[3].get_text()
# Look for an API for character
if not alert and not EveCharacter.query.filter_by(character_name=charname).first():
alert = 'alert-warning'
# Set status
if alert == 'alert-warning':
status = 'No KF API'
elif alert == 'alert-success':
status = 'Director'
elif alert == 'alert-error':
status = 'No Goon Auth'
else:
status = 'OK'
if alert:
output = output + '<tr class="alert {0}"><td>{1}</td><td>{2}</td><td>{3}</td><td>{4}</td></tr>\n'.format(alert, charname, forumname, group, status)
else:
output = output + '<tr><td>{0}</td><td>{1}</td><td>{2}</td><td>{3}</td></tr>\n'.format(charname, forumname, group, status)
output = output + '</tbody></table>'
return output
| Python | 0 |
0ba2b371e08c40e7c4d56efee6f4a828f1e7aeb0 | Update functions.py | ref/functions/functions.py | ref/functions/functions.py | #functions.py
#Written by Jesse Gallarzo
#Add code here
#def functionOne():
#def functionTwo():
#def functionThree():
def main():
#Add code here
print('Test')
main()
| #functions.py
#Written by Jesse Gallarzo
#Add code here
#def function One:
def main():
#Add code here
print('Test')
main()
| Python | 0.000001 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.