commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
615fd3b0b7d67905cf4568971602c6dfd7c5eff3 | Remove outdated code from `fs.sshfs.error_tools` | fs/sshfs/error_tools.py | fs/sshfs/error_tools.py | # coding: utf-8
"""Utils to work with `paramiko` errors.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import errno
import sys
import six
from .. import errors
class _ConvertSSHFSErrors(object):
"""Context manager to convert OSErrors in to FS Errors."""
FILE_ERRORS = {
64: errors.RemoteConnectionError, # ENONET
errno.ENOENT: errors.ResourceNotFound,
errno.EFAULT: errors.ResourceNotFound,
errno.ESRCH: errors.ResourceNotFound,
errno.ENOTEMPTY: errors.DirectoryNotEmpty,
errno.EEXIST: errors.FileExists,
183: errors.DirectoryExists,
#errno.ENOTDIR: errors.DirectoryExpected,
errno.ENOTDIR: errors.ResourceNotFound,
errno.EISDIR: errors.FileExpected,
errno.EINVAL: errors.FileExpected,
errno.ENOSPC: errors.InsufficientStorage,
errno.EPERM: errors.PermissionDenied,
errno.EACCES: errors.PermissionDenied,
errno.ENETDOWN: errors.RemoteConnectionError,
errno.ECONNRESET: errors.RemoteConnectionError,
errno.ENAMETOOLONG: errors.PathError,
errno.EOPNOTSUPP: errors.Unsupported,
errno.ENOSYS: errors.Unsupported,
}
#
DIR_ERRORS = FILE_ERRORS.copy()
DIR_ERRORS[errno.ENOTDIR] = errors.DirectoryExpected
DIR_ERRORS[errno.EEXIST] = errors.DirectoryExists
DIR_ERRORS[errno.EINVAL] = errors.DirectoryExpected
def __init__(self, opname, path, directory=False):
self._opname = opname
self._path = path
self._directory = directory
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
ssh_errors = (
self.DIR_ERRORS
if self._directory
else self.FILE_ERRORS
)
if exc_type and isinstance(exc_value, EnvironmentError):
_errno = exc_value.errno
fserror = ssh_errors.get(_errno, errors.OperationFailed)
six.reraise(
fserror,
fserror(
self._path,
exc=exc_value
),
traceback
)
# Stops linter complaining about invalid class name
convert_sshfs_errors = _ConvertSSHFSErrors
| # coding: utf-8
"""Utils to work with `paramiko` errors.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import errno
import sys
import six
from .. import errors
class _ConvertSSHFSErrors(object):
"""Context manager to convert OSErrors in to FS Errors."""
FILE_ERRORS = {
64: errors.RemoteConnectionError, # ENONET
errno.ENOENT: errors.ResourceNotFound,
errno.EFAULT: errors.ResourceNotFound,
errno.ESRCH: errors.ResourceNotFound,
errno.ENOTEMPTY: errors.DirectoryNotEmpty,
errno.EEXIST: errors.FileExists,
183: errors.DirectoryExists,
#errno.ENOTDIR: errors.DirectoryExpected,
errno.ENOTDIR: errors.ResourceNotFound,
errno.EISDIR: errors.FileExpected,
errno.EINVAL: errors.FileExpected,
errno.ENOSPC: errors.InsufficientStorage,
errno.EPERM: errors.PermissionDenied,
errno.EACCES: errors.PermissionDenied,
errno.ENETDOWN: errors.RemoteConnectionError,
errno.ECONNRESET: errors.RemoteConnectionError,
errno.ENAMETOOLONG: errors.PathError,
errno.EOPNOTSUPP: errors.Unsupported,
errno.ENOSYS: errors.Unsupported,
}
#
DIR_ERRORS = FILE_ERRORS.copy()
DIR_ERRORS[errno.ENOTDIR] = errors.DirectoryExpected
DIR_ERRORS[errno.EEXIST] = errors.DirectoryExists
DIR_ERRORS[errno.EINVAL] = errors.DirectoryExpected
# if _WINDOWS_PLATFORM: # pragma: no cover
# DIR_ERRORS[13] = errors.DirectoryExpected
# DIR_ERRORS[267] = errors.DirectoryExpected
# FILE_ERRORS[13] = errors.FileExpected
def __init__(self, opname, path, directory=False):
self._opname = opname
self._path = path
self._directory = directory
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
ssh_errors = (
self.DIR_ERRORS
if self._directory
else self.FILE_ERRORS
)
if exc_type and isinstance(exc_value, EnvironmentError):
_errno = exc_value.errno
fserror = ssh_errors.get(_errno, errors.OperationFailed)
if _errno == errno.EACCES and sys.platform == "win32":
if getattr(exc_value, 'args', None) == 32: # pragma: no cover
fserror = errors.ResourceLocked
six.reraise(
fserror,
fserror(
self._path,
exc=exc_value
),
traceback
)
# Stops linter complaining about invalid class name
convert_sshfs_errors = _ConvertSSHFSErrors
| Python | 0.000002 |
b83576644d5aacd4ab841a03ff228796ff1b1b67 | change name | coolTestly/__openerp__.py | coolTestly/__openerp__.py | {
"name": "coolTestly",
"author": "Ju",
"version": "1.0",
"category": "Generic Modules/Sales & Purchases",
"depends": [
"purchase",
],
"demo": [],
"data": [
],
"installable": True
}
| {
"name": "testly",
"author": "Ju",
"version": "1.0",
"category": "Generic Modules/Sales & Purchases",
"depends": [
"purchase",
],
"demo": [],
"data": [
],
"installable": True
} | Python | 0.000147 |
f844b95e7cb034985f1d284a41789d1e427b0f74 | Update env_detect.py | device/src/env_detect.py | device/src/env_detect.py | #!/usr/bin/env python
#Weather station.
#detect environment information from several sensors:
#water leverl, air humity, raining, air temperature, light sensitivity.
#Air temperature&humity sensor: DHT11.
#Add dht.py in micropython/stmhal/modules, refer to esp8266
#Compile the DHT in firmware, then use DHT lib in application.
#Raining, same to soil moisture.
#Raining ? DO value: 0
from pyb import Pin
p_in = Pin('Y12', Pin.IN, Pin.PULL_UP)
p_in.value
adc = pyb.ADC(Pin('Y11')) # create an analog object from a pin
adc = pyb.ADC(pyb.Pin.board.Y11)
val = adc.read() # read an analog value
#-----------------------------------------#
#Light intensity sensor(GY-30) <--> I2C(1)
#SDA <--> X10
#SCL <--> X9
#VCC
#GND
#ADO(ADDR/address ?)
from pyb import I2C
i2c = I2C(1) # create on bus 1
i2c = I2C(1, I2C.MASTER) # create and init as a master
i2c.init(I2C.MASTER, baudrate=20000) # init as a master
i2c.init(I2C.SLAVE, addr=0x23) # init as a slave with given address(GY-30 address is 0x23)
i2c.deinit() # turn off the peripheral
i2c.init(I2C.MASTER)
i2c.send('123', 0x23) # send 3 bytes to slave with address 0x23
i2c.send(b'456', addr=0x23) # keyword for address
| #!/usr/bin/env python
#Weather station.
#detect environment information from several sensors:
#water leverl, air humity, raining, air temperature, light sensitivity.
#Air temperature&humity sensor: DHT11.
#Add dht.py in micropython/stmhal/modules, refer to esp8266
#Compile the DHT in firmware, then use DHT lib in application.
#Raining, same to soil moisture.
#Raining ? DO value: 0
from pyb import Pin
p_in = Pin('Y12', Pin.IN, Pin.PULL_UP)
p_in.value
adc = pyb.ADC(Pin('Y11')) # create an analog object from a pin
adc = pyb.ADC(pyb.Pin.board.Y11)
val = adc.read() # read an analog value
#-----------------------------------------#
#Light intensity sensor(GY-30) <--> I2C(1)
#SDA <--> X10
#SCL <--> X9
#VCC
#GND
#ADO(ADDR/address ?)
from pyb import I2C
i2c = I2C(1) # create on bus 1
i2c = I2C(1, I2C.MASTER) # create and init as a master
i2c.init(I2C.MASTER, baudrate=20000) # init as a master
i2c.init(I2C.SLAVE, addr=0x23) # init as a slave with given address(GY-30 address is 0x23)
i2c.deinit() # turn off the peripheral
i2c.init(I2C.MASTER)
| Python | 0.000001 |
ce948b49fbd4f3e8012b6d351bbf53db32172474 | fix repeated import of CascadeRPNHead (#8578) | mmdet/models/dense_heads/__init__.py | mmdet/models/dense_heads/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centripetal_head import CentripetalHead
from .corner_head import CornerHead
from .ddod_head import DDODHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .mask2former_head import Mask2FormerHead
from .maskformer_head import MaskFormerHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .solov2_head import SOLOV2Head
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet, YOLACTSegmHead
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTSegmHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead',
'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead',
'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead', 'AutoAssignHead',
'DETRHead', 'YOLOFHead', 'DeformableDETRHead', 'SOLOHead',
'DecoupledSOLOHead', 'CenterNetHead', 'YOLOXHead',
'DecoupledSOLOLightHead', 'LADHead', 'TOODHead', 'MaskFormerHead',
'Mask2FormerHead', 'SOLOV2Head', 'DDODHead'
]
| # Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centripetal_head import CentripetalHead
from .corner_head import CornerHead
from .ddod_head import DDODHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .mask2former_head import Mask2FormerHead
from .maskformer_head import MaskFormerHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .solov2_head import SOLOV2Head
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet, YOLACTSegmHead
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTSegmHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead',
'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead',
'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead', 'CascadeRPNHead',
'AutoAssignHead', 'DETRHead', 'YOLOFHead', 'DeformableDETRHead',
'SOLOHead', 'DecoupledSOLOHead', 'CenterNetHead', 'YOLOXHead',
'DecoupledSOLOLightHead', 'LADHead', 'TOODHead', 'MaskFormerHead',
'Mask2FormerHead', 'SOLOV2Head', 'DDODHead'
]
| Python | 0 |
5e7e60f8afb3ddeeec2714a9d11dd30a6ea3e52f | Add new calc_total_error tests | photutils/utils/tests/test_prepare_data.py | photutils/utils/tests/test_prepare_data.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from numpy.testing import assert_allclose
from astropy.tests.helper import pytest
import astropy.units as u
from .. import calc_total_error
SHAPE = (5, 5)
DATAVAL = 2.
DATA = np.ones(SHAPE) * DATAVAL
MASK = np.zeros_like(DATA, dtype=bool)
MASK[2, 2] = True
BKG_ERROR = np.ones(SHAPE)
EFFGAIN = np.ones(SHAPE) * DATAVAL
BACKGROUND = np.ones(SHAPE)
WRONG_SHAPE = np.ones((2, 2))
class TestCalculateTotalError(object):
def test_error_shape(self):
with pytest.raises(ValueError):
calc_total_error(DATA, WRONG_SHAPE, EFFGAIN)
def test_gain_shape(self):
with pytest.raises(ValueError):
calc_total_error(DATA, BKG_ERROR, WRONG_SHAPE)
@pytest.mark.parametrize('effective_gain', (0, -1))
def test_gain_le_zero(self, effective_gain):
with pytest.raises(ValueError):
calc_total_error(DATA, BKG_ERROR, effective_gain)
def test_gain_scalar(self):
error_tot = calc_total_error(DATA, BKG_ERROR, 2.)
assert_allclose(error_tot, np.sqrt(2.) * BKG_ERROR)
def test_gain_array(self):
error_tot = calc_total_error(DATA, BKG_ERROR, EFFGAIN)
assert_allclose(error_tot, np.sqrt(2.) * BKG_ERROR)
def test_units(self):
units = u.electron / u.s
error_tot1 = calc_total_error(DATA * units, BKG_ERROR * units,
EFFGAIN * u.s)
assert error_tot1.unit == units
error_tot2 = calc_total_error(DATA, BKG_ERROR, EFFGAIN)
assert_allclose(error_tot1.value, error_tot2)
def test_error_units(self):
units = u.electron / u.s
with pytest.raises(ValueError):
calc_total_error(DATA * units, BKG_ERROR * u.electron,
EFFGAIN * u.s)
def test_effgain_units(self):
units = u.electron / u.s
with pytest.raises(u.UnitsError):
calc_total_error(DATA * units, BKG_ERROR * units, EFFGAIN * u.km)
def test_missing_bkgerror_units(self):
units = u.electron / u.s
with pytest.raises(ValueError):
calc_total_error(DATA * units, BKG_ERROR, EFFGAIN * u.s)
def test_missing_effgain_units(self):
units = u.electron / u.s
with pytest.raises(ValueError):
calc_total_error(DATA * units, BKG_ERROR * units,
EFFGAIN)
| # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from numpy.testing import assert_allclose
from astropy.tests.helper import pytest
from .. import calc_total_error
SHAPE = (5, 5)
DATAVAL = 2.
DATA = np.ones(SHAPE) * DATAVAL
MASK = np.zeros_like(DATA, dtype=bool)
MASK[2, 2] = True
BKG_ERROR = np.ones(SHAPE)
EFFGAIN = np.ones(SHAPE) * DATAVAL
BACKGROUND = np.ones(SHAPE)
WRONG_SHAPE = np.ones((2, 2))
class TestCalculateTotalError(object):
def test_error_shape(self):
with pytest.raises(ValueError):
calc_total_error(DATA, WRONG_SHAPE, EFFGAIN)
def test_gain_shape(self):
with pytest.raises(ValueError):
calc_total_error(DATA, BKG_ERROR, WRONG_SHAPE)
@pytest.mark.parametrize('effective_gain', (0, -1))
def test_gain_le_zero(self, effective_gain):
with pytest.raises(ValueError):
calc_total_error(DATA, BKG_ERROR, effective_gain)
def test_gain_scalar(self):
error_tot = calc_total_error(DATA, BKG_ERROR, 2.)
assert_allclose(error_tot, np.sqrt(2.) * BKG_ERROR)
def test_gain_array(self):
error_tot = calc_total_error(DATA, BKG_ERROR, EFFGAIN)
assert_allclose(error_tot, np.sqrt(2.) * BKG_ERROR)
| Python | 0.000002 |
5dd6fa526eac5632f75d543342a7a5a45b866716 | Update plantcv/plantcv/visualize/obj_size_ecdf.py | plantcv/plantcv/visualize/obj_size_ecdf.py | plantcv/plantcv/visualize/obj_size_ecdf.py | # Plot Empirical Cumulative Distribution Function for Object Size
import os
import cv2
import pandas as pd
from plantcv.plantcv import params
from plantcv.plantcv._debug import _debug
from statsmodels.distributions.empirical_distribution import ECDF
from plotnine import ggplot, aes, geom_point, labels, scale_x_log10
def obj_size_ecdf(mask, title=None):
""" Plot empirical cumulative distribution for object size based on binary
mask
Inputs:
mask = binary mask
title = a custom title for the plot (default=None)
Returns:
fig_ecdf = empirical cumulative distribution function plot
:param mask: numpy.ndarray
:param title: str
:return fig_ecdf: plotnine.ggplot.ggplot
"""
objects, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:]
areas = [cv2.contourArea(cnt) for cnt in objects]
ecdf = ECDF(areas, side='right')
ecdf_df = pd.DataFrame({'object area': ecdf.x, 'cumulative probability': ecdf.y})
# create ecdf plot and apply log-scale for x-axis (areas)
fig_ecdf = (ggplot(data=ecdf_df, mapping=aes(x='object area', y='cumulative probability'))
+ geom_point(size=.1)
+ scale_x_log10())
if title is not None:
fig_ecdf = fig_ecdf + labels.ggtitle(title)
# Plot or print the ecdf
_debug(visual=fig_ecdf,
filename=os.path.join(params.debug_outdir, str(params.device) + '_area_ecdf.png'))
return fig_ecdf
| # Plot Empirical Cumulative Distribution Function for Object Size
import os
import cv2
import pandas as pd
from plantcv.plantcv import params
from plantcv.plantcv._debug import _debug
from statsmodels.distributions.empirical_distribution import ECDF
from plotnine import ggplot, aes, geom_point, labels, \
scale_color_manual, scale_x_log10
def obj_size_ecdf(mask, title=None):
""" Plot empirical cumulative distribution for object size based on binary
mask
Inputs:
mask = binary mask
title = a custom title for the plot (default=None)
Returns:
fig_ecdf = empirical cumulative distribution function plot
:param mask: numpy.ndarray
:param title: str
:return fig_ecdf: plotnine.ggplot.ggplot
"""
objects, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:]
areas = [cv2.contourArea(cnt) for cnt in objects]
ecdf = ECDF(areas, side='right')
ecdf_df = pd.DataFrame({'object area': ecdf.x, 'cumulative probability': ecdf.y})
# create ecdf plot and apply log-scale for x-axis (areas)
fig_ecdf = (ggplot(data=ecdf_df, mapping=aes(x='object area', y='cumulative probability'))
+ geom_point(size=.1)
+ scale_x_log10())
if title is not None:
fig_ecdf = fig_ecdf + labels.ggtitle(title)
# Plot or print the ecdf
_debug(visual=fig_ecdf,
filename=os.path.join(params.debug_outdir, str(params.device) + '_area_ecdf.png'))
return fig_ecdf
| Python | 0 |
77eefbefcdc5c04d194924e11fa491a0c4c91c14 | convert print statement to logger call | geopy/geocoders/bing.py | geopy/geocoders/bing.py | import xml.dom.minidom
from urllib import urlencode
from urllib2 import urlopen
from geopy.geocoders.base import Geocoder
from geopy.util import logger
class Bing(Geocoder):
"""Geocoder using the Bing Maps API."""
def __init__(self, api_key, format_string='%s', output_format='xml'):
"""Initialize a customized Bing geocoder with location-specific
address information and your Bing Maps API key.
``api_key`` should be a valid Bing Maps API key.
``format_string`` is a string containing '%s' where the string to
geocode should be interpolated before querying the geocoder.
For example: '%s, Mountain View, CA'. The default is just '%s'.
``output_format`` can currently only be 'xml'.
"""
self.api_key = api_key
self.format_string = format_string
self.output_format = output_format.lower()
self.url = "http://dev.virtualearth.net/REST/v1/Locations?%s"
def geocode(self, string, exactly_one=True):
params = {'addressLine': self.format_string % string,
'o': self.output_format,
'key': self.api_key
}
url = self.url % urlencode(params)
return self.geocode_url(url, exactly_one)
def geocode_url(self, url, exactly_one=True):
logger.debug("Fetching %s..." % url)
page = urlopen(url)
parse = getattr(self, 'parse_' + self.output_format)
return parse(page, exactly_one)
def parse_xml(self, page, exactly_one=True):
"""Parse a location name, latitude, and longitude from an XML response.
"""
if not isinstance(page, basestring):
page = self._decode_page(page)
doc = xml.dom.minidom.parseString(page)
resources = doc.getElementsByTagName('Resources')
if exactly_one and len(resources) != 1:
raise ValueError("Didn't find exactly one resource! " \
"(Found %d.)" % len(resources))
def parse_resource(resource):
strip = ", \n"
address = self._get_first_text(resource, 'AddressLine', strip)
city = self._get_first_text(resource, 'Locality', strip)
state = self._get_first_text(resource, 'AdminDistrict', strip)
zip = self._get_first_text(resource, 'PostalCode', strip)
country = self._get_first_text(resource, 'CountryRegion', strip)
city_state = self._join_filter(", ", [city, state])
place = self._join_filter(" ", [city_state, zip])
location = self._join_filter(", ", [address, place, country])
latitude = self._get_first_text(resource, 'Latitude') or None
latitude = latitude and float(latitude)
longitude = self._get_first_text(resource, 'Longitude') or None
longitude = longitude and float(longitude)
return (location, (latitude, longitude))
if exactly_one:
return parse_resource(resources[0])
else:
return (parse_resource(resource) for resource in resources)
| import xml.dom.minidom
from urllib import urlencode
from urllib2 import urlopen
from geopy.geocoders.base import Geocoder
class Bing(Geocoder):
"""Geocoder using the Bing Maps API."""
def __init__(self, api_key, format_string='%s', output_format='xml'):
"""Initialize a customized Bing geocoder with location-specific
address information and your Bing Maps API key.
``api_key`` should be a valid Bing Maps API key.
``format_string`` is a string containing '%s' where the string to
geocode should be interpolated before querying the geocoder.
For example: '%s, Mountain View, CA'. The default is just '%s'.
``output_format`` can currently only be 'xml'.
"""
self.api_key = api_key
self.format_string = format_string
self.output_format = output_format.lower()
self.url = "http://dev.virtualearth.net/REST/v1/Locations?%s"
def geocode(self, string, exactly_one=True):
params = {'addressLine': self.format_string % string,
'o': self.output_format,
'key': self.api_key
}
url = self.url % urlencode(params)
return self.geocode_url(url, exactly_one)
def geocode_url(self, url, exactly_one=True):
print "Fetching %s..." % url
page = urlopen(url)
parse = getattr(self, 'parse_' + self.output_format)
return parse(page, exactly_one)
def parse_xml(self, page, exactly_one=True):
"""Parse a location name, latitude, and longitude from an XML response.
"""
if not isinstance(page, basestring):
page = self._decode_page(page)
doc = xml.dom.minidom.parseString(page)
resources = doc.getElementsByTagName('Resources')
if exactly_one and len(resources) != 1:
raise ValueError("Didn't find exactly one resource! " \
"(Found %d.)" % len(resources))
def parse_resource(resource):
strip = ", \n"
address = self._get_first_text(resource, 'AddressLine', strip)
city = self._get_first_text(resource, 'Locality', strip)
state = self._get_first_text(resource, 'AdminDistrict', strip)
zip = self._get_first_text(resource, 'PostalCode', strip)
country = self._get_first_text(resource, 'CountryRegion', strip)
city_state = self._join_filter(", ", [city, state])
place = self._join_filter(" ", [city_state, zip])
location = self._join_filter(", ", [address, place, country])
latitude = self._get_first_text(resource, 'Latitude') or None
latitude = latitude and float(latitude)
longitude = self._get_first_text(resource, 'Longitude') or None
longitude = longitude and float(longitude)
return (location, (latitude, longitude))
if exactly_one:
return parse_resource(resources[0])
else:
return (parse_resource(resource) for resource in resources)
| Python | 0.999999 |
ddf52f0a7c3ed2d276c32c9b60242449ce8bf272 | convert print statement to logger call | geopy/geocoders/bing.py | geopy/geocoders/bing.py | import xml.dom.minidom
from urllib import urlencode
from urllib2 import urlopen
from geopy.geocoders.base import Geocoder
from geopy.util import logger
class Bing(Geocoder):
"""Geocoder using the Bing Maps API."""
def __init__(self, api_key, format_string='%s', output_format='xml'):
"""Initialize a customized Bing geocoder with location-specific
address information and your Bing Maps API key.
``api_key`` should be a valid Bing Maps API key.
``format_string`` is a string containing '%s' where the string to
geocode should be interpolated before querying the geocoder.
For example: '%s, Mountain View, CA'. The default is just '%s'.
``output_format`` can currently only be 'xml'.
"""
self.api_key = api_key
self.format_string = format_string
self.output_format = output_format.lower()
self.url = "http://dev.virtualearth.net/REST/v1/Locations?%s"
def geocode(self, string, exactly_one=True):
params = {'addressLine': self.format_string % string,
'o': self.output_format,
'key': self.api_key
}
url = self.url % urlencode(params)
return self.geocode_url(url, exactly_one)
def geocode_url(self, url, exactly_one=True):
logger.debug("Fetching %s..." % url)
page = urlopen(url)
parse = getattr(self, 'parse_' + self.output_format)
return parse(page, exactly_one)
def parse_xml(self, page, exactly_one=True):
"""Parse a location name, latitude, and longitude from an XML response.
"""
if not isinstance(page, basestring):
page = self._decode_page(page)
doc = xml.dom.minidom.parseString(page)
resources = doc.getElementsByTagName('Resources')
if exactly_one and len(resources) != 1:
raise ValueError("Didn't find exactly one resource! " \
"(Found %d.)" % len(resources))
def parse_resource(resource):
strip = ", \n"
address = self._get_first_text(resource, 'AddressLine', strip)
city = self._get_first_text(resource, 'Locality', strip)
state = self._get_first_text(resource, 'AdminDistrict', strip)
zip = self._get_first_text(resource, 'PostalCode', strip)
country = self._get_first_text(resource, 'CountryRegion', strip)
city_state = self._join_filter(", ", [city, state])
place = self._join_filter(" ", [city_state, zip])
location = self._join_filter(", ", [address, place, country])
latitude = self._get_first_text(resource, 'Latitude') or None
latitude = latitude and float(latitude)
longitude = self._get_first_text(resource, 'Longitude') or None
longitude = longitude and float(longitude)
return (location, (latitude, longitude))
if exactly_one:
return parse_resource(resources[0])
else:
return (parse_resource(resource) for resource in resources)
| import xml.dom.minidom
from urllib import urlencode
from urllib2 import urlopen
from geopy.geocoders.base import Geocoder
class Bing(Geocoder):
"""Geocoder using the Bing Maps API."""
def __init__(self, api_key, format_string='%s', output_format='xml'):
"""Initialize a customized Bing geocoder with location-specific
address information and your Bing Maps API key.
``api_key`` should be a valid Bing Maps API key.
``format_string`` is a string containing '%s' where the string to
geocode should be interpolated before querying the geocoder.
For example: '%s, Mountain View, CA'. The default is just '%s'.
``output_format`` can currently only be 'xml'.
"""
self.api_key = api_key
self.format_string = format_string
self.output_format = output_format.lower()
self.url = "http://dev.virtualearth.net/REST/v1/Locations?%s"
def geocode(self, string, exactly_one=True):
params = {'addressLine': self.format_string % string,
'o': self.output_format,
'key': self.api_key
}
url = self.url % urlencode(params)
return self.geocode_url(url, exactly_one)
def geocode_url(self, url, exactly_one=True):
print "Fetching %s..." % url
page = urlopen(url)
parse = getattr(self, 'parse_' + self.output_format)
return parse(page, exactly_one)
def parse_xml(self, page, exactly_one=True):
"""Parse a location name, latitude, and longitude from an XML response.
"""
if not isinstance(page, basestring):
page = self._decode_page(page)
doc = xml.dom.minidom.parseString(page)
resources = doc.getElementsByTagName('Resources')
if exactly_one and len(resources) != 1:
raise ValueError("Didn't find exactly one resource! " \
"(Found %d.)" % len(resources))
def parse_resource(resource):
strip = ", \n"
address = self._get_first_text(resource, 'AddressLine', strip)
city = self._get_first_text(resource, 'Locality', strip)
state = self._get_first_text(resource, 'AdminDistrict', strip)
zip = self._get_first_text(resource, 'PostalCode', strip)
country = self._get_first_text(resource, 'CountryRegion', strip)
city_state = self._join_filter(", ", [city, state])
place = self._join_filter(" ", [city_state, zip])
location = self._join_filter(", ", [address, place, country])
latitude = self._get_first_text(resource, 'Latitude') or None
latitude = latitude and float(latitude)
longitude = self._get_first_text(resource, 'Longitude') or None
longitude = longitude and float(longitude)
return (location, (latitude, longitude))
if exactly_one:
return parse_resource(resources[0])
else:
return (parse_resource(resource) for resource in resources)
| Python | 0.999999 |
22fdc870e6807a946f3e01b3c08b2ea5552c7555 | add end point for chamber member list | congress/members.py | congress/members.py | from .client import Client
from .utils import CURRENT_CONGRESS, check_chamber
class MembersClient(Client):
def list_chamber(self, chamber,congress=CURRENT_CONGRESS):
"Takes a bioguide_id, returns a legislator"
path = "{congress}/{chamber}/members.json".format(congress=congress,chamber=chamber)
return self.fetch(path)
def get(self, member_id):
"Takes a bioguide_id, returns a legislator"
path = "members/{0}.json".format(member_id)
return self.fetch(path)
def filter(self, chamber, congress=CURRENT_CONGRESS, **kwargs):
"""
Takes a chamber and Congress,
OR state and district, returning a list of members
"""
check_chamber(chamber)
kwargs.update(chamber=chamber, congress=congress)
if 'state' in kwargs and 'district' in kwargs:
path = ("members/{chamber}/{state}/{district}/"
"current.json").format(**kwargs)
elif 'state' in kwargs:
path = ("members/{chamber}/{state}/"
"current.json").format(**kwargs)
else:
path = ("{congress}/{chamber}/"
"members.json").format(**kwargs)
return self.fetch(path, parse=lambda r: r['results'])
def bills(self, member_id, type='introduced'):
"Same as BillsClient.by_member"
path = "members/{0}/bills/{1}.json".format(member_id, type)
return self.fetch(path)
def new(self, **kwargs):
"Returns a list of new members"
path = "members/new.json"
return self.fetch(path)
def departing(self, chamber, congress=CURRENT_CONGRESS):
"Takes a chamber and congress and returns a list of departing members"
check_chamber(chamber)
path = "{0}/{1}/members/leaving.json".format(congress, chamber)
return self.fetch(path)
def compare(self, first, second, chamber, type='votes', congress=CURRENT_CONGRESS):
"""
See how often two members voted together in a given Congress.
Takes two member IDs, a chamber and a Congress number.
"""
check_chamber(chamber)
path = "members/{first}/{type}/{second}/{congress}/{chamber}.json"
path = path.format(first=first, second=second, type=type,
congress=congress, chamber=chamber)
return self.fetch(path)
def party(self):
"Get state party counts for the current Congress"
path = "states/members/party.json"
return self.fetch(path, parse=lambda r: r['results'])
| from .client import Client
from .utils import CURRENT_CONGRESS, check_chamber
class MembersClient(Client):
def get(self, member_id):
"Takes a bioguide_id, returns a legislator"
path = "members/{0}.json".format(member_id)
return self.fetch(path)
def filter(self, chamber, congress=CURRENT_CONGRESS, **kwargs):
"""
Takes a chamber and Congress,
OR state and district, returning a list of members
"""
check_chamber(chamber)
kwargs.update(chamber=chamber, congress=congress)
if 'state' in kwargs and 'district' in kwargs:
path = ("members/{chamber}/{state}/{district}/"
"current.json").format(**kwargs)
elif 'state' in kwargs:
path = ("members/{chamber}/{state}/"
"current.json").format(**kwargs)
else:
path = ("{congress}/{chamber}/"
"members.json").format(**kwargs)
return self.fetch(path, parse=lambda r: r['results'])
def bills(self, member_id, type='introduced'):
"Same as BillsClient.by_member"
path = "members/{0}/bills/{1}.json".format(member_id, type)
return self.fetch(path)
def new(self, **kwargs):
"Returns a list of new members"
path = "members/new.json"
return self.fetch(path)
def departing(self, chamber, congress=CURRENT_CONGRESS):
"Takes a chamber and congress and returns a list of departing members"
check_chamber(chamber)
path = "{0}/{1}/members/leaving.json".format(congress, chamber)
return self.fetch(path)
def compare(self, first, second, chamber, type='votes', congress=CURRENT_CONGRESS):
"""
See how often two members voted together in a given Congress.
Takes two member IDs, a chamber and a Congress number.
"""
check_chamber(chamber)
path = "members/{first}/{type}/{second}/{congress}/{chamber}.json"
path = path.format(first=first, second=second, type=type,
congress=congress, chamber=chamber)
return self.fetch(path)
def party(self):
"Get state party counts for the current Congress"
path = "states/members/party.json"
return self.fetch(path, parse=lambda r: r['results'])
| Python | 0 |
e56df10f3e40b9287735f1295a0ed72e1525896f | change json to md RasaHQ/roadmap#280 | examples/restaurantbot/bot.py | examples/restaurantbot/bot.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import logging
import warnings
from policy import RestaurantPolicy
from rasa_core import utils
from rasa_core.agent import Agent
from rasa_core.policies.memoization import MemoizationPolicy
logger = logging.getLogger(__name__)
class RestaurantAPI(object):
def search(self, info):
return "papi's pizza place"
def train_dialogue(domain_file="restaurant_domain.yml",
model_path="models/dialogue",
training_data_file="data/babi_stories.md"):
agent = Agent(domain_file,
policies=[MemoizationPolicy(max_history=3),
RestaurantPolicy()])
training_data = agent.load_data(training_data_file)
agent.train(
training_data,
epochs=400,
batch_size=100,
validation_split=0.2
)
agent.persist(model_path)
return agent
def train_nlu():
from rasa_nlu.training_data import load_data
from rasa_nlu import config
from rasa_nlu.model import Trainer
training_data = load_data('data/nlu_data.md')
trainer = Trainer(config.load("nlu_model_config.yml"))
trainer.train(training_data)
model_directory = trainer.persist('models/nlu/',
fixed_model_name="current")
return model_directory
if __name__ == '__main__':
utils.configure_colored_logging(loglevel="INFO")
parser = argparse.ArgumentParser(
description='starts the bot')
parser.add_argument(
'task',
choices=["train-nlu", "train-dialogue", "run"],
help="what the bot should do - e.g. run or train?")
task = parser.parse_args().task
# decide what to do based on first parameter of the script
if task == "train-nlu":
train_nlu()
elif task == "train-dialogue":
train_dialogue()
| from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import logging
import warnings
from policy import RestaurantPolicy
from rasa_core import utils
from rasa_core.agent import Agent
from rasa_core.policies.memoization import MemoizationPolicy
logger = logging.getLogger(__name__)
class RestaurantAPI(object):
def search(self, info):
return "papi's pizza place"
def train_dialogue(domain_file="restaurant_domain.yml",
model_path="models/dialogue",
training_data_file="data/babi_stories.md"):
agent = Agent(domain_file,
policies=[MemoizationPolicy(max_history=3),
RestaurantPolicy()])
training_data = agent.load_data(training_data_file)
agent.train(
training_data,
epochs=400,
batch_size=100,
validation_split=0.2
)
agent.persist(model_path)
return agent
def train_nlu():
from rasa_nlu.training_data import load_data
from rasa_nlu import config
from rasa_nlu.model import Trainer
training_data = load_data('data/nlu_data.json')
trainer = Trainer(config.load("nlu_model_config.yml"))
trainer.train(training_data)
model_directory = trainer.persist('models/nlu/',
fixed_model_name="current")
return model_directory
if __name__ == '__main__':
utils.configure_colored_logging(loglevel="INFO")
parser = argparse.ArgumentParser(
description='starts the bot')
parser.add_argument(
'task',
choices=["train-nlu", "train-dialogue", "run"],
help="what the bot should do - e.g. run or train?")
task = parser.parse_args().task
# decide what to do based on first parameter of the script
if task == "train-nlu":
train_nlu()
elif task == "train-dialogue":
train_dialogue()
| Python | 0 |
40316f4a2f1e973ba98a50592a54f21a2d5ac18e | Add the exceptions module to describe exceptions from external target | rapidtest/executors/exceptions.py | rapidtest/executors/exceptions.py | from ..utils import Dictable
MSG_CANNOT_GUESS_METHOD = '''cannot find the target method. You may specify operations as \
arguments to Case if there are multiple methods to be called, or prepend all names of private \
methods with underscores.'''
class ExternalError(Exception):
pass
class ExternalEnvironmentError(ExternalError):
pass
class ExternalRuntimeError(ExternalError):
pass
class ExternalException(Dictable):
def __init__(self, name, message=None, stack_trace=None, runtime=False):
self.name = name
self.message = message or ''
self.stack_trace = (stack_trace or '').rstrip()
self.runtime = runtime
def to_exception(self):
Exc = type(self.name, (Exception,), {})
msg = '{}\n{}'.format(self.message, self.stack_trace)
Wrapper = ExternalRuntimeError if self.runtime else ExternalEnvironmentError
return Wrapper, Exc(msg)
| class ExternalExecutionTargetError(Exception):
pass
| Python | 0 |
07aca8e96d5e93edb684d0c4684ef8f837e8fc58 | Use comment builder for dirhtml too | readthedocs/doc_builder/loader.py | readthedocs/doc_builder/loader.py | from django.utils.importlib import import_module
from django.conf import settings
# Managers
mkdocs = import_module(getattr(settings, 'MKDOCS_BACKEND', 'doc_builder.backends.mkdocs'))
sphinx = import_module(getattr(settings, 'SPHINX_BACKEND', 'doc_builder.backends.sphinx'))
loading = {
# Possible HTML Builders
'sphinx': sphinx.HtmlBuilderComments,
'sphinx_htmldir': sphinx.HtmlDirBuilderComments,
'sphinx_singlehtml': sphinx.SingleHtmlBuilder,
# Other Sphinx Builders
'sphinx_pdf': sphinx.PdfBuilder,
'sphinx_epub': sphinx.EpubBuilder,
'sphinx_search': sphinx.SearchBuilder,
'sphinx_singlehtmllocalmedia': sphinx.LocalMediaBuilder,
# Other markup
'mkdocs': mkdocs.MkdocsHTML,
'mkdocs_json': mkdocs.MkdocsJSON,
}
| from django.utils.importlib import import_module
from django.conf import settings
# Managers
mkdocs = import_module(getattr(settings, 'MKDOCS_BACKEND', 'doc_builder.backends.mkdocs'))
sphinx = import_module(getattr(settings, 'SPHINX_BACKEND', 'doc_builder.backends.sphinx'))
loading = {
# Possible HTML Builders
'sphinx': sphinx.HtmlBuilderComments,
'sphinx_htmldir': sphinx.HtmlDirBuilder,
'sphinx_singlehtml': sphinx.SingleHtmlBuilder,
# Other Sphinx Builders
'sphinx_pdf': sphinx.PdfBuilder,
'sphinx_epub': sphinx.EpubBuilder,
'sphinx_search': sphinx.SearchBuilder,
'sphinx_singlehtmllocalmedia': sphinx.LocalMediaBuilder,
# Other markup
'mkdocs': mkdocs.MkdocsHTML,
'mkdocs_json': mkdocs.MkdocsJSON,
}
| Python | 0 |
000e44df690a4a1a607224d5f53cc4feee7bcbc0 | add tests for related objects. | reciprocity/chef_profile/tests.py | reciprocity/chef_profile/tests.py | """Test ChefProfile model."""
from __future__ import unicode_literals
from django.conf import settings
from django.test import TestCase
from django.db.models import QuerySet, Manager
from recipe.tests import RecipeFactory, IngredientFactory
from .models import ChefProfile
import factory
USER_BATCH_SIZE = 20
class UserFactory(factory.django.DjangoModelFactory):
"""Factory for User model in tests."""
class Meta:
"""Establish User model as the product of this factory."""
model = settings.AUTH_USER_MODEL
django_get_or_create = ('username',)
first_name = factory.Faker('first_name')
last_name = factory.Faker('last_name')
email = factory.Faker('email')
username = factory.LazyAttribute(
lambda obj: ''.join((obj.first_name, obj.last_name)))
password = factory.PostGenerationMethodCall('set_password', 'secret')
class BuiltUserCase(TestCase):
"""Single user not saved to database, testing functionality of handlers."""
def setUp(self):
"""Set up user stub."""
self.user = UserFactory.build()
def test_user_not_saved(self):
"""Make sure set up user has not been saved yet."""
self.assertIsNone(self.user.id)
def test_init_chef_profile(self):
"""Test that ChefProfile created on save."""
profile = ChefProfile(user=self.user)
self.assertIs(profile, self.user.profile)
class SingleUserCase(TestCase):
"""Set up single user for tests."""
def setUp(self):
"""Create a user for testing."""
self.user = UserFactory.create()
self.recipe = RecipeFactory.create()
self.ingredient1 = IngredientFactory.create()
self.ingredient2 = IngredientFactory.create()
class BasicUserProfileCase(SingleUserCase):
"""Basic test case for profile."""
def test_user_has_profile(self):
"""Test that newly created User has ChefProfile."""
self.assertTrue(self.user.profile)
def test_profile_pk(self):
"""Test that newly created User's profile has a primary key."""
self.assertIsInstance(self.user.profile.pk, int)
self.assertTrue(self.user.profile.pk)
def test_profile_is_active(self):
"""Test that profile of new User is active."""
self.assertTrue(self.user.profile.is_active)
def test_profile_active_manager(self):
"""Test that active attr is a Manager class."""
self.assertIsInstance(ChefProfile.active, Manager)
def test_profile_active_query(self):
"""Test that active manager can give a QuerySet."""
self.assertIsInstance(ChefProfile.active.all(), QuerySet)
def test_active_count(self):
"""Test that counting the active manager returns expected int."""
self.assertEqual(ChefProfile.active.count(), 2)
def test_about_me(self):
"""Test that User.profile.about_me can be added as expected."""
self.assertIsNone(self.user.profile.about_me)
self.user.profile.about_me = 'Here is something about me'
self.user.save()
self.assertEqual(self.user.profile.about_me, 'Here is something about me')
def test_favorites(self):
"""Ensure a user can have a favorite recipe."""
self.assertNotIn(self.recipe, self.user.profile.favorites.all())
self.user.profile.favorites.add(self.recipe)
self.assertIn(self.recipe, self.user.profile.favorites.all())
def test_liked_ingredient(self):
"""Test user can like a single ingredient."""
self.user.profile.liked_ingredients.add(self.ingredient1)
self.assertIn(self.ingredient1, self.user.profile.liked_ingredients.all())
self.assertNotIn(self.ingredient2, self.user.profile.liked_ingredients.all())
def test_disliked_ingredient(self):
"""Test that user can have disliked ingredients."""
self.assertNotIn(self.ingredient2, self.user.profile.disliked_ingredients.all())
self.user.profile.disliked_ingredients.add(self.ingredient2)
self.assertIn(self.ingredient2, self.user.profile.disliked_ingredients.all())
class ManyUsersCase(TestCase):
"""Test cases where many Users are registered."""
def setUp(self):
"""Add many Users to the test."""
self.user_batch = UserFactory.create_batch(USER_BATCH_SIZE)
def test_active_count(self):
"""Make sure that the active user count is the expected size."""
self.assertEqual(ChefProfile.active.count(), USER_BATCH_SIZE)
| """Test ChefProfile model."""
from __future__ import unicode_literals
from django.conf import settings
from django.test import TestCase
from django.db.models import QuerySet, Manager
from .models import ChefProfile
import factory
USER_BATCH_SIZE = 20
class UserFactory(factory.django.DjangoModelFactory):
"""Factory for User model in tests."""
class Meta:
"""Establish User model as the product of this factory."""
model = settings.AUTH_USER_MODEL
django_get_or_create = ('username',)
first_name = factory.Faker('first_name')
last_name = factory.Faker('last_name')
email = factory.Faker('email')
username = factory.LazyAttribute(
lambda obj: ''.join((obj.first_name, obj.last_name)))
password = factory.PostGenerationMethodCall('set_password', 'secret')
class BuiltUserCase(TestCase):
"""Single user not saved to database, testing functionality of handlers."""
def setUp(self):
"""Set up user stub."""
self.user = UserFactory.build()
def test_user_not_saved(self):
"""Make sure set up user has not been saved yet."""
self.assertIsNone(self.user.id)
def test_init_chef_profile(self):
"""Test that ChefProfile created on save."""
profile = ChefProfile(user=self.user)
self.assertIs(profile, self.user.profile)
class SingleUserCase(TestCase):
"""Set up single user for tests."""
def setUp(self):
"""Create a user for testing."""
self.user = UserFactory.create()
class BasicUserProfileCase(SingleUserCase):
"""Basic test case for profile."""
def test_user_has_profile(self):
"""Test that newly created User has ChefProfile."""
self.assertTrue(self.user.profile)
def test_profile_pk(self):
"""Test that newly created User's profile has a primary key."""
self.assertIsInstance(self.user.profile.pk, int)
self.assertTrue(self.user.profile.pk)
def test_profile_is_active(self):
"""Test that profile of new User is active."""
self.assertTrue(self.user.profile.is_active)
def test_profile_active_manager(self):
"""Test that active attr is a Manager class."""
self.assertIsInstance(ChefProfile.active, Manager)
def test_profile_active_query(self):
"""Test that active manager can give a QuerySet."""
self.assertIsInstance(ChefProfile.active.all(), QuerySet)
def test_active_count(self):
"""Test that counting the active manager returns expected int."""
self.assertEqual(ChefProfile.active.count(), 1)
def test_about_me(self):
"""Test that User.profile.about_me can be added as expected."""
self.assertIsNone(self.user.profile.about_me)
self.user.profile.about_me = 'Here is something about me'
self.user.save()
self.assertEqual(self.user.profile.about_me, 'Here is something about me')
class ManyUsersCase(TestCase):
"""Test cases where many Users are registered."""
def setUp(self):
"""Add many Users to the test."""
self.user_batch = UserFactory.create_batch(USER_BATCH_SIZE)
def test_active_count(self):
"""Make sure that the active user count is the expected size."""
self.assertEqual(ChefProfile.active.count(), USER_BATCH_SIZE)
| Python | 0 |
5c22153b8b43e13266ea2b68a39bcf94b75ab993 | Add get_timestra and get_entry_monitor_dir | creation/lib/cgWConsts.py | creation/lib/cgWConsts.py | ####################################
#
# Keep all the constants used to
# create glidein entries in this
# module
#
# Author: Igor Sfiligoi
#
####################################
import time
import string
import os.path
def get_timestr(when=time.time()):
start_time_tuple=time.localtime(when)
timestr=(string.printable[start_time_tuple[0]-2000]+ #year, will work until ~2060
string.printable[start_time_tuple[1]]+ #month
string.printable[start_time_tuple[2]]+ #day
string.printable[start_time_tuple[3]]+ #hour
string.digits[start_time_tuple[4]/10]) #first minute digit
return timestr
TIMESTR=get_timestr()
# these two are in the submit dir, so they can be changed
PARAMS_FILE="params.cfg"
SUMMARY_SIGNATURE_FILE="signatures.sha1"
# these are in the stage dir, so they need to be renamed if changed
DESCRIPTION_FILE="description.%s.cfg"%TIMESTR
ATTRS_FILE="attributes.cfg"
CONSTS_FILE="constants.%s.cfg"%TIMESTR
FILE_LISTFILE="file_list.%s.lst"%TIMESTR
SCRIPT_LISTFILE="script_list.%s.lst"%TIMESTR
SUBSYSTEM_LISTFILE="subsystem_list.%s.lst"%TIMESTR
SIGNATURE_FILE="signature.%s.sha1"%TIMESTR
CONDOR_FILE="condor_bin.%s.tgz"%TIMESTR
CONDOR_DIR="condor"
CONDOR_ATTR="CONDOR_DIR"
VARS_FILE="condor_vars.%s.lst"%TIMESTR
CONDOR_STARTUP_FILE="condor_startup.sh"
# these are again in the submit dir
STARTUP_FILE="glidein_startup.sh"
GLIDEIN_FILE="glidein.descript"
JOB_DESCRIPT_FILE="job.descript"
SUBMIT_FILE="job.condor"
SUBMIT_WRAPPER="job_submit.sh"
XML_CONFIG_FILE="glideinWMS.xml"
###################################################
#
# These functions append constant parts to strings
#
###################################################
def get_entry_submit_dir(submit_dir,entry_name):
entry_submit_dir=os.path.join(submit_dir,"entry_"+entry_name)
return entry_submit_dir
def get_entry_stage_dir(stage_dir,entry_name):
entry_stage_dir=os.path.join(stage_dir,"entry_"+entry_name)
return entry_stage_dir
def get_entry_monitor_dir(monitor_dir,entry_name):
entry_monitor_dir=os.path.join(monitor_dir,"entry_"+entry_name)
return entry_monitor_dir
###########################################################
#
# CVS info
#
# $Id: cgWConsts.py,v 1.6 2007/11/28 20:51:48 sfiligoi Exp $
#
# Log:
# $Log: cgWConsts.py,v $
# Revision 1.6 2007/11/28 20:51:48 sfiligoi
# Add get_timestra and get_entry_monitor_dir
#
# Revision 1.5 2007/11/27 20:29:27 sfiligoi
# Fix typo
#
# Revision 1.4 2007/11/27 19:58:51 sfiligoi
# Move dicts initialization into cgWDictFile and entry subdir definition in cgWConsts
#
# Revision 1.3 2007/10/12 21:56:24 sfiligoi
# Add glideinWMS.cfg in the list of constants
#
# Revision 1.2 2007/10/12 21:02:24 sfiligoi
# Add missing import
#
# Revision 1.1 2007/10/12 20:20:26 sfiligoi
# Put constants into a dedicated module
#
#
###########################################################
| ####################################
#
# Keep all the constants used to
# create glidein entries in this
# module
#
# Author: Igor Sfiligoi
#
####################################
import time
import string
import os.path
start_time_tuple=time.localtime()
TIMESTR=(string.printable[start_time_tuple[0]-2000]+ #year, will work until ~2060
string.printable[start_time_tuple[1]]+ #month
string.printable[start_time_tuple[2]]+ #day
string.printable[start_time_tuple[3]]+ #hour
string.digits[start_time_tuple[4]/10]) #first minute digit
del start_time_tuple
# these two are in the submit dir, so they can be changed
PARAMS_FILE="params.cfg"
SUMMARY_SIGNATURE_FILE="signatures.sha1"
# these are in the stage dir, so they need to be renamed if changed
DESCRIPTION_FILE="description.%s.cfg"%TIMESTR
ATTRS_FILE="attributes.cfg"
CONSTS_FILE="constants.%s.cfg"%TIMESTR
FILE_LISTFILE="file_list.%s.lst"%TIMESTR
SCRIPT_LISTFILE="script_list.%s.lst"%TIMESTR
SUBSYSTEM_LISTFILE="subsystem_list.%s.lst"%TIMESTR
SIGNATURE_FILE="signature.%s.sha1"%TIMESTR
CONDOR_FILE="condor_bin.%s.tgz"%TIMESTR
CONDOR_DIR="condor"
CONDOR_ATTR="CONDOR_DIR"
VARS_FILE="condor_vars.%s.lst"%TIMESTR
CONDOR_STARTUP_FILE="condor_startup.sh"
# these are again in the submit dir
STARTUP_FILE="glidein_startup.sh"
GLIDEIN_FILE="glidein.descript"
JOB_DESCRIPT_FILE="job.descript"
SUBMIT_FILE="job.condor"
SUBMIT_WRAPPER="job_submit.sh"
XML_CONFIG_FILE="glideinWMS.xml"
###################################################
#
# These functions append constant parts to strings
#
###################################################
def get_entry_submit_dir(submit_dir,entry_name):
entry_submit_dir=os.path.join(submit_dir,"entry_"+entry_name)
return entry_submit_dir
def get_entry_stage_dir(stage_dir,entry_name):
entry_stage_dir=os.path.join(stage_dir,"entry_"+entry_name)
return entry_stage_dir
###########################################################
#
# CVS info
#
# $Id: cgWConsts.py,v 1.5 2007/11/27 20:29:27 sfiligoi Exp $
#
# Log:
# $Log: cgWConsts.py,v $
# Revision 1.5 2007/11/27 20:29:27 sfiligoi
# Fix typo
#
# Revision 1.4 2007/11/27 19:58:51 sfiligoi
# Move dicts initialization into cgWDictFile and entry subdir definition in cgWConsts
#
# Revision 1.3 2007/10/12 21:56:24 sfiligoi
# Add glideinWMS.cfg in the list of constants
#
# Revision 1.2 2007/10/12 21:02:24 sfiligoi
# Add missing import
#
# Revision 1.1 2007/10/12 20:20:26 sfiligoi
# Put constants into a dedicated module
#
#
###########################################################
| Python | 0.000001 |
933dacc43a4d3db0ea134d374867e80e8144a539 | Fix pairing syntax bug | mycroft/managers/identity_manager.py | mycroft/managers/identity_manager.py | # Copyright (c) 2017 Mycroft AI, Inc.
#
# This file is part of Mycroft Light
# (see https://github.com/MatthewScholefield/mycroft-light).
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from os.path import isfile
from time import time as get_time
from mycroft.managers.manager_plugin import ManagerPlugin
from mycroft.util import log
class IdentityManager(ManagerPlugin):
def __init__(self, rt):
super().__init__(rt)
if not rt.config['use_server']:
raise NotImplementedError('Server Disabled')
self.identity_file = rt.filesystem.path(rt.paths.identity)
self.uuid = self.access_token = self.refresh_token = ''
self.expiration = 0
self.load()
def is_expired(self):
return self.refresh_token and self.expiration <= get_time()
@staticmethod
def translate_from_server(data):
replacements = {
'accessToken': 'access',
'refreshToken': 'refresh',
'expiration': 'expires_at'
}
data['expiration'] += get_time()
return {replacements.get(k, k): v for k, v in data.items()}
def register(self, data):
"""Registers new login data from server"""
log.debug('REGISTERING TO:', data)
data = self.translate_from_server(data)
self.assign(data)
with open(self.identity_file, 'w') as f:
json.dump(data, f)
def assign(self, data):
"""Set identity from data"""
if not isinstance(data, dict):
log.error('Invalid Identity Data:', data)
return
try:
self.uuid = data['uuid']
self.access_token = data['access']
self.refresh_token = data['refresh']
self.expiration = data['expires_at']
except KeyError:
log.exception('Loading Identity')
def load(self):
"""Load identity from disk"""
if isfile(self.identity_file):
with open(self.identity_file) as f:
data = json.load(f)
self.assign(data)
| # Copyright (c) 2017 Mycroft AI, Inc.
#
# This file is part of Mycroft Light
# (see https://github.com/MatthewScholefield/mycroft-light).
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from os.path import isfile
from time import time as get_time
from mycroft.managers.manager_plugin import ManagerPlugin
from mycroft.util import log
class IdentityManager(ManagerPlugin):
def __init__(self, rt):
super().__init__(rt)
if not rt.config['use_server']:
raise NotImplementedError('Server Disabled')
self.identity_file = rt.filesystem.path(rt.paths.identity)
self.uuid = self.access_token = self.refresh_token = ''
self.expiration = 0
self.load()
def is_expired(self):
return self.refresh_token and self.expiration <= get_time()
@staticmethod
def translate_from_server(data):
replacements = {
'accessToken': 'access',
'refreshToken': 'refresh',
'expiration': 'expires_at'
}
data['expiration'] += get_time()
return {k.get(replacements, k): v for k, v in data.items()}
def register(self, data):
"""Registers new login data from server"""
log.debug('REGISTERING TO:', data)
data = self.translate_from_server(data)
self.assign(data)
with open(self.identity_file, 'w') as f:
json.dump(data, f)
def assign(self, data):
"""Set identity from data"""
if not isinstance(data, dict):
log.error('Invalid Identity Data:', data)
return
try:
self.uuid = data['uuid']
self.access_token = data['access']
self.refresh_token = data['refresh']
self.expiration = data['expires_at']
except KeyError:
log.exception('Loading Identity')
def load(self):
"""Load identity from disk"""
if isfile(self.identity_file):
with open(self.identity_file) as f:
data = json.load(f)
self.assign(data)
| Python | 0.000001 |
adb9b262167beecb30edc281e4ab10fc05b3e6da | optimize imports and add common exceptions | voting-server/app/errors.py | voting-server/app/errors.py | from flask import jsonify
from werkzeug.exceptions import HTTPException, default_exceptions
from app import app
def json_error(error):
response = jsonify(message = str(error))
response.status_code = error.code if isinstance(error, HTTPException) else 500
return response
for code in default_exceptions.keys():
app.register_error_handler(code, json_error)
app.register_error_handler(Exception, json_error)
| from flask import jsonify
from werkzeug.exceptions import HTTPException
from werkzeug.exceptions import default_exceptions
from app import app
def json_error(error):
response = jsonify(message = str(error))
response.status_code = error.code if isinstance(error, HTTPException) else 500
return response
for code in default_exceptions.keys():
app.register_error_handler(code, json_error)
| Python | 0 |
7d650f3ee367a8eac710893c1818aa08cccf7598 | Add auth | bot/dao.py | bot/dao.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from pymongo import MongoClient, IndexModel
class DAO:
def __init__(self, *args, **kwargs):
#args -- tuple of anonymous arguments
#kwargs -- dictionary of named arguments
if kwargs.get('mongo') is None:
self.default_mongo_init()
else:
self.mongo = MongoClient(kwargs.get('mongo').get('host'), kwargs.get('mongo').get('port'))
self.mongo_db = self.mongo[kwargs.get('mongo').get('db')]
def default_mongo_init(self):
self.mongo = MongoClient('mongodb://username:password@localhost:27017/dark')
self.mongo_db = self.mongo['dark']
self.mongo_db['actress'].create_index('id', unique=True)
def update_one_feedback_by_id(self, id, ox, image):
collection = self.mongo_db['actress']
result = collection.update_one({"id": id}, {'$inc': {'count': 1}, '$push': {ox: image}}, upsert=True)
def update_one_works_by_id(self, id, no):
collection = self.mongo_db['actress']
result = collection.update_one({"id": id}, {'$push': {"works": no}}, upsert=True)
def find_one_works_by_id(self, id):
collection = self.mongo_db['actress']
return collection.find_one({"id": id}, {"works": True, "_id": False})
def update_one_info_by_actress(self, actress):
collection = self.mongo_db['actress']
result = collection.update_one({"id": actress.get("id")}, {'$set': {"id": actress.get("id"), "name": actress.get("name"), "img": actress.get("img")}}, upsert=True)
def find_one_actress_by_id(self, id):
collection = self.mongo_db['actress']
return collection.find_one({"id": id}, {"_id": False})
| #!/usr/bin/python
# -*- coding: utf-8 -*-
from pymongo import MongoClient, IndexModel
class DAO:
def __init__(self, *args, **kwargs):
#args -- tuple of anonymous arguments
#kwargs -- dictionary of named arguments
if kwargs.get('mongo') is None:
self.default_mongo_init()
else:
self.mongo = MongoClient(kwargs.get('mongo').get('host'), kwargs.get('mongo').get('port'))
self.mongo_db = self.mongo[kwargs.get('mongo').get('db')]
def default_mongo_init(self):
self.mongo = MongoClient('mongodb://localhost:27017/')
self.mongo_db = self.mongo['dark']
self.mongo_db['actress'].create_index('id', unique=True)
def update_one_feedback_by_id(self, id, ox, image):
collection = self.mongo_db['actress']
result = collection.update_one({"id": id}, {'$inc': {'count': 1}, '$push': {ox: image}}, upsert=True)
def update_one_works_by_id(self, id, no):
collection = self.mongo_db['actress']
result = collection.update_one({"id": id}, {'$push': {"works": no}}, upsert=True)
def find_one_works_by_id(self, id):
collection = self.mongo_db['actress']
return collection.find_one({"id": id}, {"works": True, "_id": False})
def update_one_info_by_actress(self, actress):
collection = self.mongo_db['actress']
result = collection.update_one({"id": actress.get("id")}, {'$set': {"id": actress.get("id"), "name": actress.get("name"), "img": actress.get("img")}}, upsert=True)
def find_one_actress_by_id(self, id):
collection = self.mongo_db['actress']
return collection.find_one({"id": id}, {"_id": False})
| Python | 0.000007 |
4b8339b53f1b9dcd79f2a9060933713328a13b90 | Mark dask-distributed tests on Windows as xfail (#1747) | xarray/tests/test_distributed.py | xarray/tests/test_distributed.py | import sys
import pytest
import xarray as xr
from xarray.core.pycompat import suppress
distributed = pytest.importorskip('distributed')
da = pytest.importorskip('dask.array')
import dask
from distributed.utils_test import cluster, loop, gen_cluster
from distributed.client import futures_of, wait
from xarray.tests.test_backends import create_tmp_file, ON_WINDOWS
from xarray.tests.test_dataset import create_test_data
from . import assert_allclose, has_scipy, has_netCDF4, has_h5netcdf
ENGINES = []
if has_scipy:
ENGINES.append('scipy')
if has_netCDF4:
ENGINES.append('netcdf4')
if has_h5netcdf:
ENGINES.append('h5netcdf')
@pytest.mark.xfail(sys.platform == 'win32',
reason='https://github.com/pydata/xarray/issues/1738')
@pytest.mark.parametrize('engine', ENGINES)
def test_dask_distributed_integration_test(loop, engine):
with cluster() as (s, _):
with distributed.Client(s['address'], loop=loop):
original = create_test_data()
with create_tmp_file(allow_cleanup_failure=ON_WINDOWS) as filename:
original.to_netcdf(filename, engine=engine)
with xr.open_dataset(filename, chunks=3, engine=engine) as restored:
assert isinstance(restored.var1.data, da.Array)
computed = restored.compute()
assert_allclose(original, computed)
@pytest.mark.skipif(distributed.__version__ <= '1.19.3',
reason='Need recent distributed version to clean up get')
@gen_cluster(client=True, timeout=None)
def test_async(c, s, a, b):
x = create_test_data()
assert not dask.is_dask_collection(x)
y = x.chunk({'dim2': 4}) + 10
assert dask.is_dask_collection(y)
assert dask.is_dask_collection(y.var1)
assert dask.is_dask_collection(y.var2)
z = y.persist()
assert str(z)
assert dask.is_dask_collection(z)
assert dask.is_dask_collection(z.var1)
assert dask.is_dask_collection(z.var2)
assert len(y.__dask_graph__()) > len(z.__dask_graph__())
assert not futures_of(y)
assert futures_of(z)
future = c.compute(z)
w = yield future
assert not dask.is_dask_collection(w)
assert_allclose(x + 10, w)
assert s.task_state
| import pytest
import xarray as xr
from xarray.core.pycompat import suppress
distributed = pytest.importorskip('distributed')
da = pytest.importorskip('dask.array')
import dask
from distributed.utils_test import cluster, loop, gen_cluster
from distributed.client import futures_of, wait
from xarray.tests.test_backends import create_tmp_file, ON_WINDOWS
from xarray.tests.test_dataset import create_test_data
from . import assert_allclose, has_scipy, has_netCDF4, has_h5netcdf
ENGINES = []
if has_scipy:
ENGINES.append('scipy')
if has_netCDF4:
ENGINES.append('netcdf4')
if has_h5netcdf:
ENGINES.append('h5netcdf')
@pytest.mark.parametrize('engine', ENGINES)
def test_dask_distributed_integration_test(loop, engine):
with cluster() as (s, _):
with distributed.Client(s['address'], loop=loop):
original = create_test_data()
with create_tmp_file(allow_cleanup_failure=ON_WINDOWS) as filename:
original.to_netcdf(filename, engine=engine)
with xr.open_dataset(filename, chunks=3, engine=engine) as restored:
assert isinstance(restored.var1.data, da.Array)
computed = restored.compute()
assert_allclose(original, computed)
@pytest.mark.skipif(distributed.__version__ <= '1.19.3',
reason='Need recent distributed version to clean up get')
@gen_cluster(client=True, timeout=None)
def test_async(c, s, a, b):
x = create_test_data()
assert not dask.is_dask_collection(x)
y = x.chunk({'dim2': 4}) + 10
assert dask.is_dask_collection(y)
assert dask.is_dask_collection(y.var1)
assert dask.is_dask_collection(y.var2)
z = y.persist()
assert str(z)
assert dask.is_dask_collection(z)
assert dask.is_dask_collection(z.var1)
assert dask.is_dask_collection(z.var2)
assert len(y.__dask_graph__()) > len(z.__dask_graph__())
assert not futures_of(y)
assert futures_of(z)
future = c.compute(z)
w = yield future
assert not dask.is_dask_collection(w)
assert_allclose(x + 10, w)
assert s.task_state
| Python | 0 |
6c3ff180c3bda17f1b6fdcf4500fdefdef9d713f | Fix copy & paste errors in setting I brought over from django-skel. | vsub/settings/production.py | vsub/settings/production.py | """Settings used in the production environment."""
from memcacheify import memcacheify
from postgresify import postgresify
from base import *
## Email configuration
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = os.environ.get('EMAIL_HOST', 'smtp.gmail.com')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host-password
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD', '')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host-user
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER', 'your_email@example.com')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = os.environ.get('EMAIL_PORT', 587)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = '[%s] ' % SITE_NAME
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-use-tls
EMAIL_USE_TLS = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = EMAIL_HOST_USER
## Database configuration
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = postgresify()
## Cache configuration
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = memcacheify()
## Secret key configuration
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Use the value set in the Heroku configuration.
SECRET_KEY = os.environ.get('SECRET_KEY', SECRET_KEY)
| """Settings used in the production environment."""
from memcacheify import memcacheify
from postgresify import postgresify
from base import *
## Email configuration
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = os.environ.get('EMAIL_HOST', 'smtp.gmail.com')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host-password
EMAIL_HOST_PASSWORD = environ.get('EMAIL_HOST_PASSWORD', '')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host-user
EMAIL_HOST_USER = environ.get('EMAIL_HOST_USER', 'your_email@example.com')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = environ.get('EMAIL_PORT', 587)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = '[%s] ' % SITE_NAME
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-use-tls
EMAIL_USE_TLS = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = EMAIL_HOST_USER
## Database configuration
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = postgresify()
## Cache configuration
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = memcacheify()
## Secret key configuration
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Use the value set in the Heroku configuration.
SECRET_KEY = environ.get('SECRET_KEY', SECRET_KEY)
| Python | 0 |
4fcdd2f4cea4899b3f7057f98b95fa7380682816 | Add else: return to break out of loop when BACK is selected | cs251tk/webapp/web_cli.py | cs251tk/webapp/web_cli.py | import os
from ..common import chdir
from ..student import stash, pull, checkout_date
from ..student.markdownify.process_file import process_file
from PyInquirer import style_from_dict, Token, prompt
def check_student(student, spec, basedir):
files = []
if os.path.exists('{}/{}'.format(student, spec['assignment'])):
with chdir('{}/{}'.format(student, spec['assignment'])):
for file in spec['files']:
result = process_file(file['filename'],
steps=file['commands'],
options=file['options'],
spec=spec,
cwd=os.getcwd(),
supporting_dir=os.path.join(basedir, 'data', 'supporting'),
interact=False,
basedir=basedir,
spec_id=spec['assignment'],
skip_web_compile=False)
if 'web' in file['options']:
description = file['filename']
if result['missing']:
if 'optional' in file['options']:
description = '{} MISSING (OPTIONAL)'.format(file['filename'])
else:
description = '{} MISSING'.format(file['filename'])
files = files + [description]
else:
continue
return files
def ask_student(usernames):
style = style_from_dict({
Token.QuestionMark: '#959ee7 bold',
Token.Selected: '#959ee7',
Token.Pointer: '#959ee7 bold',
Token.Answer: '#959ee7 bold',
})
questions = [
{
'type': 'list',
'name': 'student',
'message': 'Choose student',
'choices': ['QUIT', 'LOG and QUIT', *usernames]
}
]
student = prompt(questions, style=style)
if not student:
return None
return student['student']
def ask_file(files, student, spec, basedir):
style = style_from_dict({
Token.QuestionMark: '#e3bd27 bold',
Token.Selected: '#e3bd27',
Token.Pointer: '#e3bd27 bold',
Token.Answer: '#e3bd27 bold',
})
while True:
questions = [
{
'type': 'list',
'name': 'file',
'message': 'Choose file',
'choices': ['BACK'] + files,
}
]
file = prompt(questions, style=style)
if file and file['file'] != 'BACK':
file_spec = {}
for f in spec['files']:
if f['filename'] == file['file']:
file_spec = f
break
if file_spec:
with chdir('{}/{}'.format(student, spec['assignment'])):
process_file(file_spec['filename'],
steps=file_spec['commands'],
options=file_spec['options'],
spec=spec,
cwd=os.getcwd(),
supporting_dir=os.path.join(basedir, 'data', 'supporting'),
interact=False,
basedir=basedir,
spec_id=spec['assignment'],
skip_web_compile=False)
else:
return
def launch_cli(basedir, date, no_update, spec, usernames):
usernames = [
'{} NO SUBMISSION'.format(user)
if not os.path.exists('{}/{}'.format(user, spec['assignment']))
else user
for user in usernames
]
while True:
student = ask_student(usernames)
if not student or student == 'QUIT':
return False
elif student == 'LOG and QUIT':
return True
stash(student, no_update=no_update)
pull(student, no_update=no_update)
checkout_date(student, date=date)
files = check_student(student, spec, basedir)
if files:
ask_file(files, student, spec, basedir)
| import os
from ..common import chdir
from ..student import stash, pull, checkout_date
from ..student.markdownify.process_file import process_file
from PyInquirer import style_from_dict, Token, prompt
def check_student(student, spec, basedir):
files = []
if os.path.exists('{}/{}'.format(student, spec['assignment'])):
with chdir('{}/{}'.format(student, spec['assignment'])):
for file in spec['files']:
result = process_file(file['filename'],
steps=file['commands'],
options=file['options'],
spec=spec,
cwd=os.getcwd(),
supporting_dir=os.path.join(basedir, 'data', 'supporting'),
interact=False,
basedir=basedir,
spec_id=spec['assignment'],
skip_web_compile=False)
if 'web' in file['options']:
description = file['filename']
if result['missing']:
if 'optional' in file['options']:
description = '{} MISSING (OPTIONAL)'.format(file['filename'])
else:
description = '{} MISSING'.format(file['filename'])
files = files + [description]
else:
continue
return files
def ask_student(usernames):
style = style_from_dict({
Token.QuestionMark: '#959ee7 bold',
Token.Selected: '#959ee7',
Token.Pointer: '#959ee7 bold',
Token.Answer: '#959ee7 bold',
})
questions = [
{
'type': 'list',
'name': 'student',
'message': 'Choose student',
'choices': ['QUIT', 'LOG and QUIT', *usernames]
}
]
student = prompt(questions, style=style)
if not student:
return None
return student['student']
def ask_file(files, student, spec, basedir):
style = style_from_dict({
Token.QuestionMark: '#e3bd27 bold',
Token.Selected: '#e3bd27',
Token.Pointer: '#e3bd27 bold',
Token.Answer: '#e3bd27 bold',
})
while True:
questions = [
{
'type': 'list',
'name': 'file',
'message': 'Choose file',
'choices': ['BACK'] + files,
}
]
file = prompt(questions, style=style)
if file and file['file'] != 'BACK':
file_spec = {}
for f in spec['files']:
if f['filename'] == file['file']:
file_spec = f
break
if file_spec:
with chdir('{}/{}'.format(student, spec['assignment'])):
process_file(file_spec['filename'],
steps=file_spec['commands'],
options=file_spec['options'],
spec=spec,
cwd=os.getcwd(),
supporting_dir=os.path.join(basedir, 'data', 'supporting'),
interact=False,
basedir=basedir,
spec_id=spec['assignment'],
skip_web_compile=False)
def launch_cli(basedir, date, no_update, spec, usernames):
usernames = [
'{} NO SUBMISSION'.format(user)
if not os.path.exists('{}/{}'.format(user, spec['assignment']))
else user
for user in usernames
]
while True:
student = ask_student(usernames)
if not student or student == 'QUIT':
return False
elif student == 'LOG and QUIT':
return True
stash(student, no_update=no_update)
pull(student, no_update=no_update)
checkout_date(student, date=date)
files = check_student(student, spec, basedir)
if files:
ask_file(files, student, spec, basedir)
| Python | 0.000001 |
c3b0d4b05314dc9fd51c790a86d30659d09c5250 | Allow negative numbers in the GEOS string | wagtailgeowidget/helpers.py | wagtailgeowidget/helpers.py | import re
geos_ptrn = re.compile(
"^SRID=([0-9]{1,});POINT\((-?[0-9\.]{1,})\s(-?[0-9\.]{1,})\)$"
)
def geosgeometry_str_to_struct(value):
'''
Parses a geosgeometry string into struct.
Example:
SRID=5432;POINT(12.0 13.0)
Returns:
>> [5432, 12.0, 13.0]
'''
result = geos_ptrn.match(value)
if not result:
return None
return {
'srid': result.group(1),
'x': result.group(2),
'y': result.group(3),
}
| import re
geos_ptrn = re.compile(
"^SRID=([0-9]{1,});POINT\(([0-9\.]{1,})\s([0-9\.]{1,})\)$"
)
def geosgeometry_str_to_struct(value):
'''
Parses a geosgeometry string into struct.
Example:
SRID=5432;POINT(12.0 13.0)
Returns:
>> [5432, 12.0, 13.0]
'''
result = geos_ptrn.match(value)
if not result:
return None
return {
'srid': result.group(1),
'x': result.group(2),
'y': result.group(3),
}
| Python | 0 |
03ea21b744a1c499348a3d1b1ad87258efee1e33 | add helpful StrictJsonObject object | dimagi/ext/jsonobject.py | dimagi/ext/jsonobject.py | from __future__ import absolute_import
import datetime
import decimal
from jsonobject.base_properties import AbstractDateProperty
from jsonobject import *
import re
from jsonobject.api import re_date, re_time, re_decimal
from dimagi.utils.dates import safe_strftime
from dimagi.utils.parsing import ISO_DATETIME_FORMAT
from django.conf import settings
OldJsonObject = JsonObject
OldDateTimeProperty = DateTimeProperty
HISTORICAL_DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
try:
# this isn't actually part of dimagi-utils
# but this is temporary and don't want to do a bigger reorg
from corehq.util.soft_assert import soft_assert
except ImportError:
def _assert(assertion, msg):
assert assertion, msg
else:
_assert = soft_assert('{}@{}'.format('droberts', 'dimagi.com'),
# should still fail in tests
fail_if_debug=settings.UNIT_TESTING)
class DateTimeProperty(AbstractDateProperty):
"""
Accepts and produces ISO8601 string in UTC (with the Z suffix)
Accepts with or without microseconds (must have all six digits if any)
Always produces with microseconds
(USec stands for microsecond)
"""
_type = datetime.datetime
def _wrap(self, value):
if '.' in value:
fmt = ISO_DATETIME_FORMAT
if len(value.split('.')[-1]) != 7:
raise ValueError(
'USecDateTimeProperty '
'must have 6 decimal places '
'or none at all: {}'.format(value)
)
else:
fmt = HISTORICAL_DATETIME_FORMAT
try:
result = datetime.datetime.strptime(value, fmt)
except ValueError as e:
raise ValueError(
'Invalid date/time {0!r} [{1}]'.format(value, e))
_assert(result.tzinfo is None,
"USecDateTimeProperty shouldn't ever return offset-aware!")
return result
def _unwrap(self, value):
_assert(value.tzinfo is None,
"Can't set a USecDateTimeProperty to an offset-aware datetime")
return value, safe_strftime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
re_trans_datetime = re.compile("""
^
(\d{4}) # year
-
(0[1-9]|1[0-2]) # month
-
([12]\d|0[1-9]|3[01]) # day
T
([01]\d|2[0-3]) # hour
:
[0-5]\d # minute
:
[0-5]\d # second
(\.\d{6})? # millisecond (optional)
Z # timezone
$
""", re.VERBOSE)
# this is like jsonobject.api.re_datetime,
# but without the "time" part being optional
# i.e. I just removed (...)? surrounding the second two lines
re_loose_datetime = re.compile("""
^
(\d{4}) # year
\D?
(0[1-9]|1[0-2]) # month
\D?
([12]\d|0[1-9]|3[01]) # day
[ T]
([01]\d|2[0-3]) # hour
\D?
([0-5]\d) # minute
\D?
([0-5]\d)? # second
\D?
(\d{3,6})? # millisecond
([zZ]|([\+-])([01]\d|2[0-3])\D?([0-5]\d)?)? # timezone
$
""", re.VERBOSE)
class USecDateTimeMeta(object):
update_properties = {
datetime.datetime: DateTimeProperty,
}
string_conversions = (
(re_date, datetime.date),
(re_time, datetime.time),
(re_trans_datetime, datetime.datetime),
(re_decimal, decimal.Decimal),
)
class JsonObject(OldJsonObject):
Meta = USecDateTimeMeta
class StrictJsonObject(JsonObject):
_allow_dynamic_properties = False
| from __future__ import absolute_import
import datetime
import decimal
from jsonobject.base_properties import AbstractDateProperty
from jsonobject import *
import re
from jsonobject.api import re_date, re_time, re_decimal
from dimagi.utils.dates import safe_strftime
from dimagi.utils.parsing import ISO_DATETIME_FORMAT
from django.conf import settings
OldJsonObject = JsonObject
OldDateTimeProperty = DateTimeProperty
HISTORICAL_DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
try:
# this isn't actually part of dimagi-utils
# but this is temporary and don't want to do a bigger reorg
from corehq.util.soft_assert import soft_assert
except ImportError:
def _assert(assertion, msg):
assert assertion, msg
else:
_assert = soft_assert('{}@{}'.format('droberts', 'dimagi.com'),
# should still fail in tests
fail_if_debug=settings.UNIT_TESTING)
class DateTimeProperty(AbstractDateProperty):
"""
Accepts and produces ISO8601 string in UTC (with the Z suffix)
Accepts with or without microseconds (must have all six digits if any)
Always produces with microseconds
(USec stands for microsecond)
"""
_type = datetime.datetime
def _wrap(self, value):
if '.' in value:
fmt = ISO_DATETIME_FORMAT
if len(value.split('.')[-1]) != 7:
raise ValueError(
'USecDateTimeProperty '
'must have 6 decimal places '
'or none at all: {}'.format(value)
)
else:
fmt = HISTORICAL_DATETIME_FORMAT
try:
result = datetime.datetime.strptime(value, fmt)
except ValueError as e:
raise ValueError(
'Invalid date/time {0!r} [{1}]'.format(value, e))
_assert(result.tzinfo is None,
"USecDateTimeProperty shouldn't ever return offset-aware!")
return result
def _unwrap(self, value):
_assert(value.tzinfo is None,
"Can't set a USecDateTimeProperty to an offset-aware datetime")
return value, safe_strftime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
re_trans_datetime = re.compile("""
^
(\d{4}) # year
-
(0[1-9]|1[0-2]) # month
-
([12]\d|0[1-9]|3[01]) # day
T
([01]\d|2[0-3]) # hour
:
[0-5]\d # minute
:
[0-5]\d # second
(\.\d{6})? # millisecond (optional)
Z # timezone
$
""", re.VERBOSE)
# this is like jsonobject.api.re_datetime,
# but without the "time" part being optional
# i.e. I just removed (...)? surrounding the second two lines
re_loose_datetime = re.compile("""
^
(\d{4}) # year
\D?
(0[1-9]|1[0-2]) # month
\D?
([12]\d|0[1-9]|3[01]) # day
[ T]
([01]\d|2[0-3]) # hour
\D?
([0-5]\d) # minute
\D?
([0-5]\d)? # second
\D?
(\d{3,6})? # millisecond
([zZ]|([\+-])([01]\d|2[0-3])\D?([0-5]\d)?)? # timezone
$
""", re.VERBOSE)
class USecDateTimeMeta(object):
update_properties = {
datetime.datetime: DateTimeProperty,
}
string_conversions = (
(re_date, datetime.date),
(re_time, datetime.time),
(re_trans_datetime, datetime.datetime),
(re_decimal, decimal.Decimal),
)
class JsonObject(OldJsonObject):
Meta = USecDateTimeMeta
| Python | 0.004985 |
9ad697081f62b63f966f998b6618bfb543717f4a | Allow to pass more attributes. | webapp/bootstrap/helpers.py | webapp/bootstrap/helpers.py | # -*- coding: utf-8 -*-
from byceps.blueprints.brand.models import Brand
from byceps.blueprints.orga.models import OrgaFlag, OrgaTeam, \
Membership as OrgaTeamMembership
from byceps.blueprints.party.models import Party
from byceps.blueprints.seating.models import Area as SeatingArea, \
Category as SeatingCategory, Point, Seat
from byceps.blueprints.snippet.models import Snippet, SnippetVersion, \
Mountpoint as SnippetMountpoint
from byceps.blueprints.terms.models import Version as TermsVersion
from byceps.blueprints.user.models import User
from byceps.blueprints.user_group.models import UserGroup
from .util import add_to_database
# -------------------------------------------------------------------- #
# brands
@add_to_database
def create_brand(id, title):
return Brand(id=id, title=title)
def get_brand(id):
return Brand.query.get(id)
# -------------------------------------------------------------------- #
# parties
@add_to_database
def create_party(**kwargs):
return Party(**kwargs)
def get_party(id):
return Party.query.get(id)
# -------------------------------------------------------------------- #
# users
@add_to_database
def create_user(screen_name, email_address, password, *, enabled=False):
user = User.create(screen_name, email_address, password)
user.enabled = enabled
return user
def get_user(screen_name):
return User.query.filter_by(screen_name=screen_name).one()
# -------------------------------------------------------------------- #
# orga teams
@add_to_database
def promote_orga(brand, user):
return OrgaFlag(brand=brand, user=user)
@add_to_database
def create_orga_team(id, title):
return OrgaTeam(id=id, title=title)
@add_to_database
def assign_user_to_orga_team(user, orga_team, party, *, duties=None):
membership = OrgaTeamMembership(orga_team=orga_team, party=party, user=user)
if duties:
membership.duties = duties
return membership
def get_orga_team(id):
return OrgaTeam.query.get(id)
# -------------------------------------------------------------------- #
# user groups
@add_to_database
def create_user_group(creator, title, description=None):
return UserGroup(creator, title, description)
# -------------------------------------------------------------------- #
# snippets
@add_to_database
def create_snippet(party, name):
return Snippet(party=party, name=name)
@add_to_database
def create_snippet_version(snippet, creator, title, body):
return SnippetVersion(snippet=snippet, creator=creator, title=title, body=body)
@add_to_database
def mount_snippet(snippet, endpoint_suffix, url_path):
return SnippetMountpoint(snippet=snippet, endpoint_suffix=endpoint_suffix, url_path=url_path)
# -------------------------------------------------------------------- #
# terms
@add_to_database
def create_terms_version(brand, creator, body):
return TermsVersion(brand=brand, creator=creator, body=body)
# -------------------------------------------------------------------- #
# seating
@add_to_database
def create_seating_area(party, slug, title, **kwargs):
return SeatingArea(party=party, slug=slug, title=title, **kwargs)
@add_to_database
def create_seat_category(party, title):
return SeatingCategory(party=party, title=title)
@add_to_database
def create_seat(area, coord_x, coord_y, category):
seat = Seat(area=area, category=category)
seat.coords = Point(x=coord_x, y=coord_y)
return seat
| # -*- coding: utf-8 -*-
from byceps.blueprints.brand.models import Brand
from byceps.blueprints.orga.models import OrgaFlag, OrgaTeam, \
Membership as OrgaTeamMembership
from byceps.blueprints.party.models import Party
from byceps.blueprints.seating.models import Area as SeatingArea, \
Category as SeatingCategory, Point, Seat
from byceps.blueprints.snippet.models import Snippet, SnippetVersion, \
Mountpoint as SnippetMountpoint
from byceps.blueprints.terms.models import Version as TermsVersion
from byceps.blueprints.user.models import User
from byceps.blueprints.user_group.models import UserGroup
from .util import add_to_database
# -------------------------------------------------------------------- #
# brands
@add_to_database
def create_brand(id, title):
return Brand(id=id, title=title)
def get_brand(id):
return Brand.query.get(id)
# -------------------------------------------------------------------- #
# parties
@add_to_database
def create_party(**kwargs):
return Party(**kwargs)
def get_party(id):
return Party.query.get(id)
# -------------------------------------------------------------------- #
# users
@add_to_database
def create_user(screen_name, email_address, password, *, enabled=False):
user = User.create(screen_name, email_address, password)
user.enabled = enabled
return user
def get_user(screen_name):
return User.query.filter_by(screen_name=screen_name).one()
# -------------------------------------------------------------------- #
# orga teams
@add_to_database
def promote_orga(brand, user):
return OrgaFlag(brand=brand, user=user)
@add_to_database
def create_orga_team(id, title):
return OrgaTeam(id=id, title=title)
@add_to_database
def assign_user_to_orga_team(user, orga_team, party, *, duties=None):
membership = OrgaTeamMembership(orga_team=orga_team, party=party, user=user)
if duties:
membership.duties = duties
return membership
def get_orga_team(id):
return OrgaTeam.query.get(id)
# -------------------------------------------------------------------- #
# user groups
@add_to_database
def create_user_group(creator, title, description=None):
return UserGroup(creator, title, description)
# -------------------------------------------------------------------- #
# snippets
@add_to_database
def create_snippet(party, name):
return Snippet(party=party, name=name)
@add_to_database
def create_snippet_version(snippet, creator, title, body):
return SnippetVersion(snippet=snippet, creator=creator, title=title, body=body)
@add_to_database
def mount_snippet(snippet, endpoint_suffix, url_path):
return SnippetMountpoint(snippet=snippet, endpoint_suffix=endpoint_suffix, url_path=url_path)
# -------------------------------------------------------------------- #
# terms
@add_to_database
def create_terms_version(brand, creator, body):
return TermsVersion(brand=brand, creator=creator, body=body)
# -------------------------------------------------------------------- #
# seating
@add_to_database
def create_seating_area(party, slug, title):
return SeatingArea(party=party, slug=slug, title=title)
@add_to_database
def create_seat_category(party, title):
return SeatingCategory(party=party, title=title)
@add_to_database
def create_seat(area, coord_x, coord_y, category):
seat = Seat(area=area, category=category)
seat.coords = Point(x=coord_x, y=coord_y)
return seat
| Python | 0 |
7e45a26f86095ee2f6972e08697aa132e642636e | Test for types in __mul__ | csympy/tests/test_arit.py | csympy/tests/test_arit.py | from nose.tools import raises
from csympy import Symbol, Integer
def test_arit1():
x = Symbol("x")
y = Symbol("y")
e = x + y
e = x * y
e = Integer(2)*x
e = 2*x
def test_arit2():
x = Symbol("x")
y = Symbol("y")
assert x+x == Integer(2) * x
assert x+x != Integer(3) * x
assert x+x == 2 * x
@raises(TypeError)
def test_arit3():
x = Symbol("x")
y = Symbol("y")
e = "x"*x
| from csympy import Symbol, Integer
def test_arit1():
x = Symbol("x")
y = Symbol("y")
e = x + y
e = x * y
e = Integer(2)*x
e = 2*x
def test_arit2():
x = Symbol("x")
y = Symbol("y")
assert x+x == Integer(2) * x
assert x+x != Integer(3) * x
assert x+x == 2 * x
| Python | 0.000004 |
852c62eef3d9beea43927f75b1a8aaa021ce25f9 | Add a docstring to test helper | test/selenium/src/lib/test_helpers.py | test/selenium/src/lib/test_helpers.py | # Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: jernej@reciprocitylabs.com
# Maintained By: jernej@reciprocitylabs.com
"""
Utility classes for page objects used in tests.
Details:
Most of the tests require a sequence of primitive methods of the page
object. If the sequence repeats itself among tests, it should be shared in
this module.
"""
import uuid
from lib import base
from lib.constants.test import modal_create_new
from lib.constants.test import modal_custom_attribute
class HtmlParser(base.Test):
"""The HtmlParser class simulates what happens with (non-rich)text in HTML.
"""
@staticmethod
def parse_text(text):
"""Simulates text parsed by html
Args:
text (str)
"""
return text.replace(" ", "").replace("\n", "")
class ModalNewProgramPage(base.Test):
"""Methods for simulating common user actions"""
@staticmethod
def enter_test_data(modal):
"""Fills out all fields in the lhn_modal
Args:
modal (lib.page.lhn_modal.new_program.EditProgramModal)
"""
unique_id = str(uuid.uuid4())
modal.enter_title(modal_create_new.Program.TITLE + unique_id)
modal.enter_description(
modal_create_new.Program.DESCRIPTION_SHORT + unique_id)
modal.enter_notes(
modal_create_new.Program.NOTES_SHORT + unique_id)
modal.enter_code(modal_create_new.Program.CODE + unique_id)
modal.filter_and_select_primary_contact("example")
modal.filter_and_select_secondary_contact("example")
modal.enter_program_url(
unique_id + modal_create_new.Program.PROGRAM_URL)
modal.enter_reference_url(
unique_id + modal_create_new.Program.REFERENCE_URL)
@staticmethod
def set_start_end_dates(modal, day_start, day_end):
"""
Sets the dates from the datepicker in the new program/edit modal.
Args:
modal (lib.page.lhn_modal.new_program.EditProgramModal)
day_start (int): for more info see
base.DatePicker.select_day_in_current_month
day_end (int): for more info see
base.DatePicker.select_day_in_current_month
"""
modal.enter_effective_date_start_month(day_start)
modal.enter_stop_date_end_month(day_end)
class ModalNewProgramCustomAttribute(base.Test):
@staticmethod
def enter_test_data(modal):
"""Fills out all fields in the lhn_modal
Args:
modal (lib.page.widget.custom_attribute.NewCustomAttributeModal)
"""
modal.enter_title(modal_custom_attribute.Program.TITLE)
modal.enter_inline_help(modal_custom_attribute.Program.INLINE_HELP)
modal.enter_placeholder(modal_custom_attribute.Program.PLACEHOLDER)
| # Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: jernej@reciprocitylabs.com
# Maintained By: jernej@reciprocitylabs.com
"""
Utility classes for page objects used in tests.
Details:
Most of the tests require a sequence of primitive methods of the page
object. If the sequence repeats itself among tests, it should be shared in
this module.
"""
import uuid
from lib import base
from lib.constants.test import modal_create_new
from lib.constants.test import modal_custom_attribute
class HtmlParser(base.Test):
@staticmethod
def parse_text(text):
"""Simulates text parsed by html
Args:
text (str)
"""
return text.replace(" ", "").replace("\n", "")
class ModalNewProgramPage(base.Test):
"""Methods for simulating common user actions"""
@staticmethod
def enter_test_data(modal):
"""Fills out all fields in the lhn_modal
Args:
modal (lib.page.lhn_modal.new_program.EditProgramModal)
"""
unique_id = str(uuid.uuid4())
modal.enter_title(modal_create_new.Program.TITLE + unique_id)
modal.enter_description(
modal_create_new.Program.DESCRIPTION_SHORT + unique_id)
modal.enter_notes(
modal_create_new.Program.NOTES_SHORT + unique_id)
modal.enter_code(modal_create_new.Program.CODE + unique_id)
modal.filter_and_select_primary_contact("example")
modal.filter_and_select_secondary_contact("example")
modal.enter_program_url(
unique_id + modal_create_new.Program.PROGRAM_URL)
modal.enter_reference_url(
unique_id + modal_create_new.Program.REFERENCE_URL)
@staticmethod
def set_start_end_dates(modal, day_start, day_end):
"""
Sets the dates from the datepicker in the new program/edit modal.
Args:
modal (lib.page.lhn_modal.new_program.EditProgramModal)
day_start (int): for more info see
base.DatePicker.select_day_in_current_month
day_end (int): for more info see
base.DatePicker.select_day_in_current_month
"""
modal.enter_effective_date_start_month(day_start)
modal.enter_stop_date_end_month(day_end)
class ModalNewProgramCustomAttribute(base.Test):
@staticmethod
def enter_test_data(modal):
"""Fills out all fields in the lhn_modal
Args:
modal (lib.page.widget.custom_attribute.NewCustomAttributeModal)
"""
modal.enter_title(modal_custom_attribute.Program.TITLE)
modal.enter_inline_help(modal_custom_attribute.Program.INLINE_HELP)
modal.enter_placeholder(modal_custom_attribute.Program.PLACEHOLDER)
| Python | 0.000001 |
cfca983b909c4919d8478e81fca66bf01af80c74 | Fix failing tests. | test/services/updater/test_updater.py | test/services/updater/test_updater.py | import os
import logging
import time
from threading import Semaphore, Thread, Event
from unittest.mock import patch, Mock
import git
from weavelib.messaging import Receiver, Sender
from weavelib.rpc import RPCClient
from weaveserver.core.services import ServiceManager
from weaveserver.services.updater.service import UpdaterService, UpdateScanner
from weaveserver.services.updater.service import Updater
AUTH = {
"auth1": {
"type": "SYSTEM",
"appid": "appmgr",
"package": "p",
},
"auth2": {
"appid": "appid2"
}
}
class TestUpdateScanner(object):
def setup_method(self):
self.update_check_freq_backup = UpdateScanner.UPDATE_CHECK_FREQ
UpdateScanner.UPDATE_CHECK_FREQ = 5
os.environ["USE_FAKE_REDIS"] = "TRUE"
self.service_manager = ServiceManager()
self.service_manager.apps.update(AUTH)
self.service_manager.start_services(["messaging", "appmanager"])
def teardown_method(self):
del os.environ["USE_FAKE_REDIS"]
self.service_manager.stop()
def test_simple_update(self):
mock_repo = Mock()
mock_repo.needs_pull = Mock(return_value=False)
UpdateScanner.list_repos = lambda x, y: ["dir"]
UpdateScanner.get_repo = lambda x, y: mock_repo
started = Event()
service = UpdaterService("auth1", None)
service.before_service_start()
service.notify_start = started.set
Thread(target=service.on_service_start).start()
started.wait()
while service.get_status() != "No updates available.":
time.sleep(1)
mock_repo.asssert_called_with("dir")
mock_repo.needs_pull = Mock(return_value=True)
time.sleep(8)
assert service.get_status() == "Updates available."
service.on_service_stop()
def test_trigger_update_when_no_update(self):
UpdateScanner.UPDATE_CHECK_FREQ = 1000
mock_repo = Mock()
mock_repo.needs_pull = Mock(return_value=False)
UpdateScanner.list_repos = lambda x, y: ["dir"]
UpdateScanner.get_repo = lambda x, y: mock_repo
started = Event()
service = UpdaterService("auth1", None)
service.before_service_start()
service.notify_start = started.set
Thread(target=service.on_service_start).start()
started.wait()
service.update_status("dummy")
rpc = RPCClient(service.rpc.info_message)
rpc.start()
print("RPC:", rpc["perform_upgrade"](_block=True))
assert service.get_status() == "No updates available."
service.on_service_stop()
| import os
import logging
import time
from threading import Semaphore, Thread, Event
from unittest.mock import patch, Mock
import git
from weavelib.messaging import Receiver, Sender
from weavelib.rpc import RPCClient
from weaveserver.core.services import ServiceManager
from weaveserver.services.updater.service import UpdaterService, UpdateScanner
from weaveserver.services.updater.service import Updater
AUTH = {
"auth1": {
"type": "SYSTEM",
"appid": "appmgr",
"package": "p",
},
"auth2": {
"appid": "appid2"
}
}
class TestUpdateScanner(object):
def setup_method(self):
self.update_check_freq_backup = UpdateScanner.UPDATE_CHECK_FREQ
UpdateScanner.UPDATE_CHECK_FREQ = 5
os.environ["USE_FAKE_REDIS"] = "TRUE"
self.service_manager = ServiceManager()
self.service_manager.apps = AUTH
self.service_manager.start_services(["messaging", "appmanager"])
def teardown_method(self):
del os.environ["USE_FAKE_REDIS"]
self.service_manager.stop()
def test_simple_update(self):
mock_repo = Mock()
mock_repo.needs_pull = Mock(return_value=False)
UpdateScanner.list_repos = lambda x, y: ["dir"]
UpdateScanner.get_repo = lambda x, y: mock_repo
started = Event()
service = UpdaterService("auth1", None)
service.before_service_start()
service.notify_start = started.set
Thread(target=service.on_service_start).start()
started.wait()
while service.get_status() != "No updates available.":
time.sleep(1)
mock_repo.asssert_called_with("dir")
mock_repo.needs_pull = Mock(return_value=True)
time.sleep(8)
assert service.get_status() == "Updates available."
service.on_service_stop()
def test_trigger_update_when_no_update(self):
UpdateScanner.UPDATE_CHECK_FREQ = 1000
mock_repo = Mock()
mock_repo.needs_pull = Mock(return_value=False)
UpdateScanner.list_repos = lambda x, y: ["dir"]
UpdateScanner.get_repo = lambda x, y: mock_repo
started = Event()
service = UpdaterService("auth1", None)
service.before_service_start()
service.notify_start = started.set
Thread(target=service.on_service_start).start()
started.wait()
service.update_status("dummy")
rpc = RPCClient(service.rpc.info_message)
rpc.start()
print("RPC:", rpc["perform_upgrade"](_block=True))
assert service.get_status() == "No updates available."
service.on_service_stop()
| Python | 0.000003 |
665ece3f699d6a62be0d9c859532ae73e250d86f | Update __init__.py | wellapplication/__init__.py | wellapplication/__init__.py | # -*- coding: utf-8 -*-
__version__ = '0.2.24'
__author__ = 'Paul Inkenbrandt'
__name__ = 'wellapplication'
from transport import *
from usgs import *
from chem import WQP
from mesopy import *
from graphs import piper, fdc, gantt
import MannKendall
import avgMeths
| # -*- coding: utf-8 -*-
__version__ = '0.2.24'
__author__ = 'Paul Inkenbrandt'
__name__ = 'wellapplication'
from transport import *
from usgs import *
from chem import WQP
from graphs import piper, fdc, gantt
import MannKendall
import avgMeths
| Python | 0.000072 |
f5e459e369f1a00d3407f7993e427b676047da21 | bump version | django_anger/__init__.py | django_anger/__init__.py | __version__ = "0.1.1-20130516"
| __version__ = "0.1-20130513.try3"
| Python | 0 |
ed37e4fc5968c7ab17fa55aca3958e72c963ea78 | replace with right code | demo/sort/sort.py | demo/sort/sort.py | #!/usr/bin/python2.7 -OOBRtt
import errno
import os
import operator
import sys
from pysec import alg
from pysec.io import fd
from pysec.utils import ilen, xrange
from pysec import tb
from pysec.xsplit import xbounds
# tb.set_excepthook(tb.short_tb)
BUFSIZE = 4096
MAX_MEMORY_SORT = 10240
TMP_DIR = os.path.abspath('./tmp')
try:
os.mkdir(TMP_DIR)
except OSError, ex:
if ex.errno != errno.EEXIST:
raise
def sort_in_memory(fp, start, end):
lines = [fp[start:end] for start, end in fp.xlines(start, end, keep_eol=1, size=BUFSIZE)]
lines.sort()
return lines
def _main():
path = os.path.abspath(sys.argv[1])
fno = 0
with fd.File.open(path, fd.FO_READEX) as txt:
# split and sort
prev_end = offset = 0
for lineno, (start, end) in enumerate(txt.xlines(keep_eol=1, size=BUFSIZE)):
if end - offset > MAX_MEMORY_SORT:
if end - prev_end > MAX_MEMORY_SORT:
print >> sys.stderr, "[ERROR]"
print >> sys.stderr, "Line %d bigger than MAX_MEMORY_SORT limit" % lineno
print >> sys.stderr, "Line's length: %d" % (end - prev_end)
print >> sys.stderr, "MAX_MEMORY_SORT limit: %d" % MAX_MEMORY_SORT
return 1
with fd.File.open(os.path.join(TMP_DIR, '%s.srt' % str(fno)), fd.FO_WRITE) as fout:
fout.truncate()
for line in sort_in_memory(txt, offset, prev_end):
fout.write(line)
fno += 1
offset = end
prev_end = end
else:
with fd.File.open(os.path.join(TMP_DIR, '%s.srt' % str(fno)), fd.FO_WRITE) as fout:
fout.truncate()
for line in sort_in_memory(txt, offset, prev_end):
fout.write(line)
fno += 1
splits = fno
# merge and sort
files = [fd.File.open(os.path.join(TMP_DIR, '%s.srt' % str(fno)), fd.FO_READ).lines()
for fno in xrange(0, splits)]
lines = [f.next() for f in files]
while files:
fno, line = min(enumerate(lines), key=operator.itemgetter(1))
print line
try:
lines[fno] = files[fno].next()
except StopIteration:
del lines[fno]
del files[fno]
for i in xrange(0, splits):
os.unlink(os.path.join(TMP_DIR, '%s.srt' % str(i)))
if __name__ == '__main__':
ret = _main()
os.rmdir(TMP_DIR)
| #!/usr/bin/python2.7 -OOBRtt
import os
import sys
from pysec import alg
from pysec.io import fd
from pysec.xsplit import xbounds
def find_ck(fp, sub, chunk):
buf = fp[:chunk]
offset = len(buf)
sub_len = len(sub)
while buf:
pos = alg.find(sub)
if pos >= 0:
yield pos
buf = buf[pos+1:]
else:
offset = offset - sub_len
buf = buf[offset:offset+chunk-sub_len]
if __name__ == '__main__':
path = os.path.abspath(sys.argv[1])
with fd.File.open(path, fd.FO_READEX) as txt:
for lineno, (start, end) in enumerate(xbounds(txt, sep='\n', keep_sep=1, find=lambda t, s: find_ck(t, s, 4096))):
print lineno
| Python | 0.999602 |
34db4460aa67fc9abfaaaf2c48a6ea7c5b801ff0 | Fix for libtest for cpython 2.6 / jython / pypy | examples/libtest/imports/__init__.py | examples/libtest/imports/__init__.py |
exec_order = []
class Imports(object):
exec_order = exec_order
def __init__(self):
self.v = 1
imports = Imports()
overrideme = "not overridden"
from . import cls as loccls
# This is not valid since Python 2.6!
try:
from .imports import cls as upcls
except ImportError:
upcls = loccls
def conditional_func():
return "not overridden"
if True:
def conditional_func():
return "overridden"
|
exec_order = []
class Imports(object):
exec_order = exec_order
def __init__(self):
self.v = 1
imports = Imports()
overrideme = "not overridden"
from . import cls as loccls
from .imports import cls as upcls
def conditional_func():
return "not overridden"
if True:
def conditional_func():
return "overridden"
| Python | 0.000001 |
e354eba380c6df2f0a14e324da9cbe6467494ddc | add - ModelIORedis & build up interface. | versus/tools/modelIO.py | versus/tools/modelIO.py | """
Class family for Model IO classes to handle read/write of learning models
"""
import redis
class ModelIO(object):
def __init__(self, **kwargs):
pass
def write(self, model):
raise NotImplementedError()
def validate(self, model):
""" Ensures that the model is valid. """
pass
def genkey(self, model):
""" Generates a key from the model. Presumes model is valid. """
return str(model)
def package(self, model):
""" Prepares the model for writing. """
return model
class ModelIORedis(ModelIO):
""" Performs IO to redis. """
def __init__(self, **kwargs):
super(ModelIORedis, self).__init__(**kwargs)
def write(self, model):
pool = redis.ConnectionPool(host='localhost', port=6379, db=0)
r = redis.Redis(connection_pool=pool)
r.set(self.genkey(model), self.package(model)) | """
Class family for Model IO classes to handle read/write of learning models
"""
class ModelIO(object):
def __init__(self):
pass
def write(self):
raise NotImplementedError() | Python | 0 |
c1135762c32526b3dc70c186ab97404ac27422e6 | Handle missing plugin directory | repository.py | repository.py | import re
import os
import ttimer
import logging
import subprocess
import plugin
import static
class Repository():
def __init__(self, path):
self.ttl = static.ttl
self.path = path
self.system = static.system.lower()
self.logger = logging.getLogger('default')
self.load_plugins()
def pop_plugin(self, id):
i = 0
for p in self.plugins:
if p.id == id:
return self.plugins.pop(i)
i += 1
def get_plugin(self, id):
i = 0
for p in self.plugins:
if p.id == id:
return p
i += 1
return None
def get_plugins(self):
if hasattr(self, 'plugins'):
return self.plugins
return None
def start_plugins(self):
for p in self.plugins:
if not p.status(): p.start()
def reload_plugins(self):
old_ids = []
new_ids = []
# Build up lists with identifers before and after reload
for new in self.read_dir():
new_ids += [new]
for old in self.get_plugins():
old_ids += [old.id]
removed_plugins = set(old_ids) - set(new_ids)
added_plugins = set(new_ids) - set(old_ids)
# Stop threads for plugins that have been removed
for p in removed_plugins:
old_plugin = self.pop_plugin(p)
if old_plugin:
old_plugin.stop()
del old_plugin
# Add new plugins to list
for p in added_plugins:
self.logger.info('Adding ' + p + ' to polling list')
self.plugins += [plugin.Plugin(p, self.path, self.ttl)]
# Start stopped plugins
self.start_plugins()
def read_dir(self):
try:
files = os.listdir(self.path)
plugins = []
for file in files:
full_file_path = self.path + '/' + file
if os.path.isfile(full_file_path):
dotfile = re.compile('^\.')
anysystem = re.compile('-noarch\.[\w\d]+$')
cursystem = re.compile('-' + self.system + '\.[\w\d]+$')
# Skip dot-files
if dotfile.search(file):
self.logger.debug('Skipping dot-file: ' + file)
continue
# Add plugin to polling list if os or any match
if cursystem.search(file) or anysystem.search(file):
plugins += [file]
return plugins
except OSError as e:
self.logger.error('Failed to read directory \'' + self.path + '\': ' + e.strerror)
return None
def load_plugins(self):
files = self.read_dir()
self.plugins = []
if files:
for file in files:
try:
self.plugins += [plugin.Plugin(file, self.path, self.ttl)]
except plugin.PluginError as e:
self.logger.error(e)
self.logger.info('Loaded ' + str(len(self.plugins)) + ' plugins from ' + self.path)
return self.plugins
def config_load(self):
return # Do overrides to config values
def config_save(self):
return # Save runtime config to file
| import re
import os
import ttimer
import logging
import subprocess
import plugin
import static
class Repository():
def __init__(self, path):
self.ttl = static.ttl
self.path = path
self.system = static.system.lower()
self.logger = logging.getLogger('default')
self.load_plugins()
def pop_plugin(self, id):
i = 0
for p in self.plugins:
if p.id == id:
return self.plugins.pop(i)
i += 1
def get_plugin(self, id):
i = 0
for p in self.plugins:
if p.id == id:
return p
i += 1
return None
def get_plugins(self):
if hasattr(self, 'plugins'):
return self.plugins
return None
def start_plugins(self):
for p in self.plugins:
if not p.status(): p.start()
def reload_plugins(self):
old_ids = []
new_ids = []
# Build up lists with identifers before and after reload
for new in self.read_dir():
new_ids += [new]
for old in self.get_plugins():
old_ids += [old.id]
removed_plugins = set(old_ids) - set(new_ids)
added_plugins = set(new_ids) - set(old_ids)
# Stop threads for plugins that have been removed
for p in removed_plugins:
old_plugin = self.pop_plugin(p)
if old_plugin:
old_plugin.stop()
del old_plugin
# Add new plugins to list
for p in added_plugins:
self.logger.info('Adding ' + p + ' to polling list')
self.plugins += [plugin.Plugin(p, self.path, self.ttl)]
# Start stopped plugins
self.start_plugins()
def read_dir(self):
try:
files = os.listdir(self.path)
plugins = []
for file in files:
full_file_path = self.path + '/' + file
if os.path.isfile(full_file_path):
dotfile = re.compile('^\.')
anysystem = re.compile('-noarch\.[\w\d]+$')
cursystem = re.compile('-' + self.system + '\.[\w\d]+$')
# Skip dot-files
if dotfile.search(file):
self.logger.debug('Skipping dot-file: ' + file)
continue
# Add plugin to polling list if os or any match
if cursystem.search(file) or anysystem.search(file):
plugins += [file]
return plugins
except OSError as e:
self.logger.error('Failed to read directory \'' + self.path + '\': ' + e.strerror)
return None
def load_plugins(self):
files = self.read_dir()
self.plugins = []
for file in files:
try:
self.plugins += [plugin.Plugin(file, self.path, self.ttl)]
except plugin.PluginError as e:
self.logger.error(e)
self.logger.info('Loaded ' + str(len(self.plugins)) + ' plugins from ' + self.path)
return self.plugins
def config_load(self):
return # Do overrides to config values
def config_save(self):
return # Save runtime config to file
| Python | 0.000001 |
4b9da366b4169caf8802dcbbf20168512cc4e12e | Fix typo | ostrich/stages/stage_30_clone_osa.py | ostrich/stages/stage_30_clone_osa.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ostrich import steps
from ostrich import utils
def _ansible_debug(r):
if r.complete['ansible-debug'] == 'yes':
return 1
return 0
def get_steps(r):
"""Clone OSA."""
nextsteps = []
nextsteps.append(
steps.SimpleCommandStep(
'git-clone-osa',
('git clone %s/openstack/openstack-ansible '
'/opt/openstack-ansible'
% r.complete['git-mirror-openstack']),
**r.kwargs
)
)
nextsteps.append(
steps.KwargsStep(
'kwargs-osa',
r,
{
'cwd': '/opt/openstack-ansible',
'env': {
'ANSIBLE_ROLE_FETCH_MODE': 'git-clone',
'ANSIBLE_DEBUG': _ansible_debug(r),
'ANSIBLE_KEEP_REMOTE_FILES': '1'
}
},
**r.kwargs
)
)
if utils.is_ironic(r):
nextsteps.append(
steps.KwargsStep(
'kwargs-ironic',
r,
{
'env': {
'BOOTSTRAP_OPTS': 'nova_virt_type=ironic'
}
},
**r.kwargs
)
)
return nextsteps
| # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ostrich import steps
from ostrich import utils
def _ansible_debug(r):
if r.complete['ansbile-debug'] == 'yes':
return 1
return 0
def get_steps(r):
"""Clone OSA."""
nextsteps = []
nextsteps.append(
steps.SimpleCommandStep(
'git-clone-osa',
('git clone %s/openstack/openstack-ansible '
'/opt/openstack-ansible'
% r.complete['git-mirror-openstack']),
**r.kwargs
)
)
nextsteps.append(
steps.KwargsStep(
'kwargs-osa',
r,
{
'cwd': '/opt/openstack-ansible',
'env': {
'ANSIBLE_ROLE_FETCH_MODE': 'git-clone',
'ANSIBLE_DEBUG': _ansible_debug(r),
'ANSIBLE_KEEP_REMOTE_FILES': '1'
}
},
**r.kwargs
)
)
if utils.is_ironic(r):
nextsteps.append(
steps.KwargsStep(
'kwargs-ironic',
r,
{
'env': {
'BOOTSTRAP_OPTS': 'nova_virt_type=ironic'
}
},
**r.kwargs
)
)
return nextsteps
| Python | 0.999999 |
62f137072aa26999ad30dda01fe2a736c3e00495 | exclude in admin | django_typograf/admin.py | django_typograf/admin.py | from django.contrib import admin
from django_typograf.utils import get_typograf_field_name, get_typograf_hash_field_name
class TypografAdmin(admin.ModelAdmin):
""" Admin class for hide typograf fields from admin site """
def _exclude(self, obj=None):
""" Mark typograf fields as exclude """
exclude = ()
if obj:
exclude += tuple((get_typograf_field_name(field) for field in obj._meta.typografed_fields))
exclude += tuple((get_typograf_hash_field_name(field) for field in obj._meta.typografed_fields))
return exclude
def get_form(self, request, obj=None, **kwargs):
exclude = self.exclude or ()
exclude += self._exclude(obj)
kwargs.update(dict(exclude=exclude))
return super().get_form(request, obj, **kwargs)
| from django.contrib import admin
from django_typograf.utils import get_typograf_field_name, get_typograf_hash_field_name
class TypografAdmin(admin.ModelAdmin):
""" Admin class for hide typograf fields from admin site """
def _exclude(self, obj=None):
""" Mark typograf fields as exclude """
exclude = ()
if obj:
exclude += tuple((get_typograf_field_name(field) for field in obj._meta.typografed_fields))
exclude += tuple((get_typograf_hash_field_name(field) for field in obj._meta.typografed_fields))
return exclude
def get_form(self, request, obj=None, **kwargs):
self.exclude = self.exclude or ()
self.exclude += self._exclude(obj)
return super().get_form(request, obj, **kwargs)
| Python | 0.00006 |
1ba74fc225d71bc071827291d9942738ef56dd1f | Correct login view | apps/__init__.py | apps/__init__.py | # -*- coding: utf-8 -*-
from flask import Flask
from flask.ext.login import LoginManager
from .models import db, User
from .views import module
__all__ = ('create_app',)
def _init_db(app):
db.app = app
db.init_app(app)
def _init_jinja(app):
pass
def _init_login(app):
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.user_loader(User.get)
login_manager.login_view = '/signin'
def create_app(name=None):
if name is None:
name = __name__
app = Flask(name)
app.config.from_object('config')
_init_db(app)
_init_jinja(app)
_init_login(app)
app.register_blueprint(module)
return app
| # -*- coding: utf-8 -*-
from flask import Flask
from flask.ext.login import LoginManager
from .models import db, User
from .views import module
__all__ = ('create_app',)
def _init_db(app):
db.app = app
db.init_app(app)
def _init_jinja(app):
pass
def _init_login(app):
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.user_loader(User.get)
login_manager.login_view = '/login'
def create_app(name=None):
if name is None:
name = __name__
app = Flask(name)
app.config.from_object('config')
_init_db(app)
_init_jinja(app)
_init_login(app)
app.register_blueprint(module)
return app
| Python | 0.000001 |
586418860c0441eaebadd0fe79989d6d9f90fa28 | Fix for the component lookup error in vocabulary | src/bda/plone/productshop/vocabularies.py | src/bda/plone/productshop/vocabularies.py | from zope.interface import directlyProvides
from zope.schema.interfaces import IVocabularyFactory
from zope.schema.vocabulary import (
SimpleVocabulary,
SimpleTerm,
)
from zope.i18nmessageid import MessageFactory
from .utils import (
dotted_name,
available_variant_aspects,
)
#added by espen
from zope.component import getUtility
from plone.dexterity.interfaces import IDexterityFTI
_ = MessageFactory('bda.plone.productshop')
def AvailableVariantAspectsVocabulary(context):
terms = list()
for definition in available_variant_aspects():
terms.append(SimpleTerm(value=dotted_name(definition.interface),
title=definition.title))
return SimpleVocabulary(terms)
directlyProvides(AvailableVariantAspectsVocabulary, IVocabularyFactory)
def RtfFieldsVocabulary(context):
try:
fields = getUtility(IDexterityFTI, name='bda.plone.productshop.product').lookupSchema()
except:
fields = ['Datasheet', ]
terms = [ SimpleTerm(value=pair, token=pair, title=pair) for pair in fields]
return SimpleVocabulary(terms)
directlyProvides(RtfFieldsVocabulary, IVocabularyFactory)
| from zope.interface import directlyProvides
from zope.schema.interfaces import IVocabularyFactory
from zope.schema.vocabulary import (
SimpleVocabulary,
SimpleTerm,
)
from zope.i18nmessageid import MessageFactory
from .utils import (
dotted_name,
available_variant_aspects,
)
#added by espen
from zope.component import getUtility
from plone.dexterity.interfaces import IDexterityFTI
from zope.component import ComponentLookupError
_ = MessageFactory('bda.plone.productshop')
def AvailableVariantAspectsVocabulary(context):
terms = list()
for definition in available_variant_aspects():
terms.append(SimpleTerm(value=dotted_name(definition.interface),
title=definition.title))
return SimpleVocabulary(terms)
directlyProvides(AvailableVariantAspectsVocabulary, IVocabularyFactory)
def RtfFieldsVocabulary(context):
try:
type = getUtility(IDexterityFTI, name='bda.plone.productshop.product')
fields = type.lookupSchema()
terms = [ SimpleTerm(value=pair, token=pair, title=pair) for pair in fields]
return SimpleVocabulary(terms)
except KeyError:
pass
finally:
pass
directlyProvides(RtfFieldsVocabulary, IVocabularyFactory)
| Python | 0 |
3cade5788e55b124ce6c55350afb1beae4d3a5c3 | Update __init__.py | examples/quickhowto2/app/__init__.py | examples/quickhowto2/app/__init__.py | import logging
from flask import Flask
from flask.ext.appbuilder import SQLA, AppBuilder
#from sqlalchemy.engine import Engine
#from sqlalchemy import event
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(name)s:%(message)s')
logging.getLogger().setLevel(logging.DEBUG)
app = Flask(__name__)
app.config.from_object('config')
db = SQLA(app)
appbuilder = AppBuilder(app, db.session, menu=Menu(reverse=False))
"""
Only include this for SQLLite constraints
@event.listens_for(Engine, "connect")
def set_sqlite_pragma(dbapi_connection, connection_record):
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close()
"""
from app import views
| import logging
from flask import Flask
from flask.ext.appbuilder import SQLA, AppBuilder
#from sqlalchemy.engine import Engine
#from sqlalchemy import event
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(name)s:%(message)s')
logging.getLogger().setLevel(logging.DEBUG)
app = Flask(__name__)
app.config.from_object('config')
db = SQLA(app)
appbuilder = AppBuilder(app, db.session)
"""
Only include this for SQLLite constraints
@event.listens_for(Engine, "connect")
def set_sqlite_pragma(dbapi_connection, connection_record):
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close()
"""
from app import views
| Python | 0.000072 |
14d12b1d6bbcf5784256b82f58974f02fe8d1503 | Remove unused imports. Gating tests so they do not run on Windows | tests/integration/states/test_cron.py | tests/integration/states/test_cron.py | # -*- coding: utf-8 -*-
'''
Tests for the cron state
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
# Import Salt libs
import salt.utils.platform
@skipIf(salt.utils.platform.is_windows(), 'minion is windows')
class CronTest(ModuleCase):
'''
Validate the file state
'''
def setUp(self):
'''
Setup
'''
self.run_state('user.present', name='test_cron_user')
def tearDown(self):
'''
Teardown
'''
# Remove cron file
self.run_function('cmd.run',
cmd='crontab -u test_cron_user -r')
# Delete user
self.run_state('user.absent', name='test_cron_user')
def test_managed(self):
'''
file.managed
'''
ret = self.run_state(
'cron.file',
name='salt://issue-46881/cron',
user='test_cron_user'
)
_expected = '--- \n+++ \n@@ -1 +1,2 @@\n-\n+# Lines below here are managed by Salt, do not edit\n+@hourly touch /tmp/test-file\n'
self.assertIn('changes', ret['cron_|-salt://issue-46881/cron_|-salt://issue-46881/cron_|-file'])
self.assertIn('diff', ret['cron_|-salt://issue-46881/cron_|-salt://issue-46881/cron_|-file']['changes'])
self.assertEqual(_expected, ret['cron_|-salt://issue-46881/cron_|-salt://issue-46881/cron_|-file']['changes']['diff'])
| # -*- coding: utf-8 -*-
'''
Tests for the cron state
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
from tests.support.paths import FILES, TMP, TMP_STATE_TREE
from tests.support.helpers import (
skip_if_not_root,
with_system_user_and_group,
with_tempfile,
Webserver,
)
class CronTest(ModuleCase):
'''
Validate the file state
'''
def setUp(self):
'''
Setup
'''
self.run_state('user.present', name='test_cron_user')
def tearDown(self):
'''
Teardown
'''
# Remove cron file
self.run_function('cmd.run',
cmd='crontab -u test_cron_user -r')
# Delete user
self.run_state('user.absent', name='test_cron_user')
def test_managed(self):
'''
file.managed
'''
ret = self.run_state(
'cron.file',
name='salt://issue-46881/cron',
user='test_cron_user'
)
_expected = '--- \n+++ \n@@ -1 +1,2 @@\n-\n+# Lines below here are managed by Salt, do not edit\n+@hourly touch /tmp/test-file\n'
self.assertIn('changes', ret['cron_|-salt://issue-46881/cron_|-salt://issue-46881/cron_|-file'])
self.assertIn('diff', ret['cron_|-salt://issue-46881/cron_|-salt://issue-46881/cron_|-file']['changes'])
self.assertEqual(_expected, ret['cron_|-salt://issue-46881/cron_|-salt://issue-46881/cron_|-file']['changes']['diff'])
| Python | 0 |
6493cc31acdc09df6c0ad952e95380ac31a0e504 | Update __init__.py | vispy/color/__init__.py | vispy/color/__init__.py | # -*- coding: utf-8 -*-
# Copyright (c) 2014, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
Convience interfaces to manipulate colors.
This module provides support for manipulating colors.
"""
__all__ = ['Color', 'ColorArray', 'Colormap',
'get_colormap', 'get_colormaps',
'get_color_names', 'get_color_dict']
from ._color_dict import get_color_names, get_color_dict # noqa
from .color_array import Color, ColorArray
from .colormap import (Colormap, # noqa
get_colormap, get_colormaps) # noqa
| # -*- coding: utf-8 -*-
# Copyright (c) 2014, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
Convience interfaces to manipulate colors.
This module provides support for manipulating colors.
"""
<<<<<<< HEAD
__all__ = ['Color', 'ColorArray', 'Colormap',
'get_colormap', 'get_colormaps',
'get_color_names', 'get_color_dict']
from ._color_dict import get_color_names, get_color_dict # noqa
from .color_array import Color, ColorArray
from .colormap import (Colormap, # noqa
get_colormap, get_colormaps) # noqa
=======
__all__ = ['Color', 'ColorArray', 'LinearGradient', 'get_color_names',
'get_colormap_py']
from ._color_dict import get_color_names # noqa
from ._color import (Color, ColorArray, LinearGradient, # noqa
get_colormap, colormaps, get_colormap_py) # noqa
>>>>>>> new visuals/isocurve for tri mesh
| Python | 0.000072 |
a364196814c3b33e7fd51a42b4c3a48a3aaeaee8 | Update list of extraordinary gentlemen | volaparrot/constants.py | volaparrot/constants.py | ADMINFAG = ["RealDolos"]
PARROTFAG = "Parrot"
BLACKFAGS = [i.casefold() for i in (
"kalyx", "merc", "loliq", "annoying", "RootBeats", "JEW2FORU", "quag", "mire", "perici", "Voldemort", "briseis", "brisis", "GNUsuks", "rhooes", "n1sm4n", "honeyhole", "Printer", "yume1")]
OBAMAS = [i.casefold() for i in (
"counselor", "briseis", "apha", "bread", "ark3", "jizzbomb", "acid", "elkoalemos", "tarta", "counselor", "myon")]
BLACKROOMS = "e7u-CG", "jAzmc3", "f66jeG", "24_zFd", "BHfjGvT", "BHI0pxg",
WHITEROOMS = "BEEPi",
| ADMINFAG = ["RealDolos"]
PARROTFAG = "Parrot"
BLACKFAGS = [i.casefold() for i in (
"kalyx", "merc", "loliq", "annoying", "bot", "RootBeats", "JEW2FORU", "quag", "mire", "perici")]
OBAMAS = [i.casefold() for i in (
"counselor", "briseis", "apha", "bread", "ark3", "jizzbomb", "acid", "elkoalemos", "tarta")]
BLACKROOMS = "e7u-CG", "jAzmc3", "f66jeG", "24_zFd"
WHITEROOMS = "9pdLvy"
| Python | 0 |
84965de8b53dcb5b10788808a2bf135df82cd4d9 | Update dev DB to sqlite3 | vpr/vpr/settings/dev.py | vpr/vpr/settings/dev.py | # Django settings for vpr project.
from base import *
DEBUG = True
DEVELOPMENT = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'vpr.sqlite3', # Or path to database file if using sqlite3.
#'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'vpr_dev', # Or path to database file if using sqlite3.
'USER': 'vpr', # Not used with sqlite3.
'PASSWORD': 'vpr', # Not used with sqlite3.
'HOST': 'localhost', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '3306', # Set to empty string for default. Not used with sqlite3.
}
}
# Make this unique, and don't share it with anybody.
#SECRET_KEY = 'kw7#s$8t&6d9*7*$a$(gui0r1ze7f#u%(hua=^a3u66+vyj+9g'
ROOT_URLCONF = 'vpr.urls.dev'
INSTALLED_APPS += (
'django_extensions',
)
| # Django settings for vpr project.
from base import *
DEBUG = True
DEVELOPMENT = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
#'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
#'NAME': 'vpr.sqlite3', # Or path to database file if using sqlite3.
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'vpr_dev', # Or path to database file if using sqlite3.
'USER': 'vpr', # Not used with sqlite3.
'PASSWORD': 'vpr', # Not used with sqlite3.
'HOST': 'localhost', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '3306', # Set to empty string for default. Not used with sqlite3.
}
}
# Make this unique, and don't share it with anybody.
#SECRET_KEY = 'kw7#s$8t&6d9*7*$a$(gui0r1ze7f#u%(hua=^a3u66+vyj+9g'
ROOT_URLCONF = 'vpr.urls.dev'
INSTALLED_APPS += (
'django_extensions',
)
| Python | 0 |
52f9d95582456754aa4e9f9cd5d59a308c9e256c | remove comments | server.py | server.py | import socket
import communication
import time
import cv2
import numpy as np
from picamera import PiCamera
import sys
import signal
run_condition = True
def signal_handler(signal, frame):
print('Exiting...')
global run_condition
run_condition = False
sys.exit(0)
def generateRandomImg():
z = np.random.random((500, 500)) # Test data
print z.dtype
return z
def getCameraStill():
with PiCamera() as camera:
camera.resolution=(500,500)
camera.capture('temp.bmp')
data = np.asarray(cv2.imread('temp.bmp'), dtype='uint16')
return data
def getData():
z = getCameraStill()
return z
def waitForClient(sock):
connection, address = sock.accept()
print "sending ", communication.SERVER_READY
communication.send_msg(connection, communication.SERVER_READY)
msg = communication.recv_msg(connection)
print "received ", msg
return connection
camera_id = sys.argv[1].zfill(2)
signal.signal(signal.SIGINT, signal_handler)
print 'starting server ', camera_id
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.bind(("", 10006))
server_socket.listen(5)
while(run_condition):
try:
connection = waitForClient(server_socket)
print "sending ", camera_id
communication.send_msg(connection, camera_id)
print "received ", communication.recv_msg(connection)
data = getData()
print "sending image data"
communication.send_img(connection, data)
print "closing connection"
connection.close()
except:
print "Server failure, resetting connection"
server_socket.close()
| import socket
import communication
import time
import cv2
import numpy as np
from picamera import PiCamera
import sys
import signal
run_condition = True
def signal_handler(signal, frame):
print('Exiting...')
global run_condition
run_condition = False
sys.exit(0)
def generateRandomImg():
z = np.random.random((500, 500)) # Test data
print z.dtype
return z
def getCameraStill():
with PiCamera() as camera:
camera.resolution= (500,500)
camera.capture('temp.bmp')
data = np.asarray(cv2.imread('temp.bmp'), dtype='uint16')
return data
def getData():
z = getCameraStill()
return z
def waitForClient(sock):
connection, address = sock.accept()
#send SERVER_READY
print "sending ", communication.SERVER_READY
communication.send_msg(connection, communication.SERVER_READY)
#receive CLIENT_READY
msg = communication.recv_msg(connection)
print "received ", msg
return connection
camera_id = sys.argv[1].zfill(2)
signal.signal(signal.SIGINT, signal_handler)
print 'starting server ', camera_id
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.bind(("", 10006))
server_socket.listen(5)
while(run_condition):
try:
connection = waitForClient(server_socket)
#send camera id
print "sending ", camera_id
communication.send_msg(connection, camera_id)
#receive CAMERA_ID_ACK
print "received ", communication.recv_msg(connection)
data = getData()
print "sending image data"
communication.send_img(connection, data)
print "closing connection"
connection.close()
except:
print "Server failure, resetting connection"
server_socket.close()
| Python | 0 |
95495c1b0ae1a696b95eb486c7a40d54f507dacb | Add cross origin requests. | server.py | server.py | import pymzn
import os
from subprocess import Popen, PIPE
from flask import Flask, json, Response
app = Flask(__name__)
folder = 'models' #where the .mzn files are stored
models = []
for file in os.listdir(folder):
if file.endswith('.mzn'):
models.append(file)
@app.route('/')
def Allmodels():
return json.jsonify(result=models)
#inputs models musn't 'output'
@app.route('/model/<string:model>.json')
def Model(model):
if (model+".mzn" in models):
def output_line():
with Popen(["minizinc", folder + '/' + model+".mzn", "-a"], stdout=PIPE, bufsize=1, universal_newlines=True) as p: #-a outputs all solutions
for line in p.stdout:
markup = ['----------','==========']
if line.rstrip() not in markup: #each new solution is a new JSON object
yield str(pymzn.parse_dzn(line)) #use pymzn to turn output into nice JSON objects
return Response(output_line(), mimetype='text/json')
else:
return json.jsonify(model="no model found")
# TODO: Unsure if this is safe security wise, have to look into it.
# aka. CORS request.
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE')
return response
| import pymzn
import os
from subprocess import Popen, PIPE
from flask import Flask, json, Response
app = Flask(__name__)
folder = 'models' #where the .mzn files are stored
models = []
for file in os.listdir(folder):
if file.endswith('.mzn'):
models.append(file)
@app.route('/')
def Allmodels():
return json.jsonify(result=models)
#inputs models musn't 'output'
@app.route('/model/<string:model>.json')
def Model(model):
if (model+".mzn" in models):
def output_line():
with Popen(["MiniZinc", folder + '/' + model+".mzn", "-a"], stdout=PIPE, bufsize=1, universal_newlines=True) as p: #-a outputs all solutions
for line in p.stdout:
markup = ['----------','==========']
if line.rstrip() not in markup: #each new solution is a new JSON object
yield str(pymzn.parse_dzn(line)) #use pymzn to turn output into nice JSON objects
return Response(output_line(), mimetype='text/json')
else:
return json.jsonify(model="no model found") | Python | 0 |
11000be28e9c150adde2d9109bb5e078a5c553b6 | Assume the dimesions are good | server.py | server.py | #!/usr/bin/env python
import sys
import numpy
import math
import logging
from fuel.datasets import DogsVsCats
from fuel.schemes import ShuffledScheme
from fuel.server import start_server
from fuel.streams import DataStream
from fuel.transformers import ScaleAndShift, ForceFloatX
from fuel.transformers.image import (
RandomFixedSizeCrop,
SourcewiseTransformer, ExpectsAxisLabels)
from PIL import Image
class ForceMinimumDimension(SourcewiseTransformer, ExpectsAxisLabels):
def __init__(self, data_stream, min_dim, resample='nearest',
**kwargs):
self.min_dim = min_dim
try:
self.resample = getattr(Image, resample.upper())
except AttributeError:
raise ValueError("unknown resampling filter '{}'".format(resample))
kwargs.setdefault('produces_examples', data_stream.produces_examples)
kwargs.setdefault('axis_labels', data_stream.axis_labels)
super(ForceMinimumDimension, self).__init__(data_stream, **kwargs)
def transform_source_batch(self, batch, source_name):
self.verify_axis_labels(('batch', 'channel', 'height', 'width'),
self.data_stream.axis_labels[source_name],
source_name)
return [self._example_transform(im, source_name) for im in batch]
def transform_source_example(self, example, source_name):
self.verify_axis_labels(('channel', 'height', 'width'),
self.data_stream.axis_labels[source_name],
source_name)
return self._example_transform(example, source_name)
def _example_transform(self, example, _):
if example.ndim > 3 or example.ndim < 2:
raise NotImplementedError
original_min_dim = min(example.shape[-2:])
multiplier = self.min_dim / float(original_min_dim)
dt = example.dtype
# If we're dealing with a colour image, swap around the axes
# to be in the format that PIL needs.
if example.ndim == 3:
im = example.transpose(1, 2, 0)
else:
im = example
im = Image.fromarray(im)
width, height = im.size
width = int(math.ceil(width * multiplier))
height = int(math.ceil(height * multiplier))
im = numpy.array(im.resize((width, height))).astype(dt)
# If necessary, undo the axis swap from earlier.
if im.ndim == 3:
example = im.transpose(2, 0, 1)
else:
example = im
return example
def add_transformers(stream, random_crop=False):
# Now the dataset has images with good minimum size
# stream = ForceMinimumDimension(stream, 128,
# which_sources=['image_features'])
if random_crop:
stream = RandomFixedSizeCrop(stream, (128, 128),
which_sources=['image_features'])
stream = ScaleAndShift(stream, 1 / 255.0, 0,
which_sources=['image_features'])
stream = ForceFloatX(stream)
return stream
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
train = DogsVsCats(("train",), subset=slice(None, int(sys.argv[1]), None))
train_str = DataStream(
train, iteration_scheme=ShuffledScheme(train.num_examples, int(sys.argv[2])))
train_str = add_transformers(train_str, random_crop=True)
start_server(train_str, port=int(sys.argv[3]))
| #!/usr/bin/env python
import sys
import numpy
import math
import logging
from fuel.datasets import DogsVsCats
from fuel.schemes import ShuffledScheme
from fuel.server import start_server
from fuel.streams import DataStream
from fuel.transformers import ScaleAndShift, ForceFloatX
from fuel.transformers.image import (
RandomFixedSizeCrop,
SourcewiseTransformer, ExpectsAxisLabels)
from PIL import Image
class ForceMinimumDimension(SourcewiseTransformer, ExpectsAxisLabels):
def __init__(self, data_stream, min_dim, resample='nearest',
**kwargs):
self.min_dim = min_dim
try:
self.resample = getattr(Image, resample.upper())
except AttributeError:
raise ValueError("unknown resampling filter '{}'".format(resample))
kwargs.setdefault('produces_examples', data_stream.produces_examples)
kwargs.setdefault('axis_labels', data_stream.axis_labels)
super(ForceMinimumDimension, self).__init__(data_stream, **kwargs)
def transform_source_batch(self, batch, source_name):
self.verify_axis_labels(('batch', 'channel', 'height', 'width'),
self.data_stream.axis_labels[source_name],
source_name)
return [self._example_transform(im, source_name) for im in batch]
def transform_source_example(self, example, source_name):
self.verify_axis_labels(('channel', 'height', 'width'),
self.data_stream.axis_labels[source_name],
source_name)
return self._example_transform(example, source_name)
def _example_transform(self, example, _):
if example.ndim > 3 or example.ndim < 2:
raise NotImplementedError
original_min_dim = min(example.shape[-2:])
multiplier = self.min_dim / float(original_min_dim)
dt = example.dtype
# If we're dealing with a colour image, swap around the axes
# to be in the format that PIL needs.
if example.ndim == 3:
im = example.transpose(1, 2, 0)
else:
im = example
im = Image.fromarray(im)
width, height = im.size
width = int(math.ceil(width * multiplier))
height = int(math.ceil(height * multiplier))
im = numpy.array(im.resize((width, height))).astype(dt)
# If necessary, undo the axis swap from earlier.
if im.ndim == 3:
example = im.transpose(2, 0, 1)
else:
example = im
return example
def add_transformers(stream, random_crop=False):
stream = ForceMinimumDimension(stream, 128,
which_sources=['image_features'])
if random_crop:
stream = RandomFixedSizeCrop(stream, (128, 128),
which_sources=['image_features'])
stream = ScaleAndShift(stream, 1 / 255.0, 0,
which_sources=['image_features'])
stream = ForceFloatX(stream)
return stream
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
train = DogsVsCats(("train",), subset=slice(None, int(sys.argv[1]), None))
train_str = DataStream(
train, iteration_scheme=ShuffledScheme(train.num_examples, int(sys.argv[2])))
train_str = add_transformers(train_str, random_crop=True)
start_server(train_str, port=int(sys.argv[3]))
| Python | 0.999997 |
eb889d08c6031ba6c2b55a883d09e22f5684f0ee | Add mode (training|arena) option to the command line | server.py | server.py | #!/usr/bin/env python
import os
import sys
import requests
import re
from bot import RandomBot
def get_new_game_state(server_host, key, mode='training', number_of_turns = '20'):
if(mode=='training'):
params = { 'key': key, 'turns': number_of_turns}
r = requests.post(server_host + '/api/training', params)
if(r.status_code == 200):
return r.json()
else:
print("Error when creating the game")
print(r.text)
else:
pass
def move(url, direction):
r = requests.post(url, {'dir': direction})
return r.json()
def start(server_host, key, mode, bot, number_of_games = 20):
def play(state, games_played = 0):
if (state['game']['finished']):
games_played += 1
print('Game finished: %d/%d' % (games_played, number_of_games))
if(games_played < number_of_games):
print('asking a new game')
state = get_new_game_state(server_host, key)
play(state, games_played)
else:
url = state['playUrl']
direction = bot.move(state)
new_state = move(url, direction)
print("Playing turn %d with direction %s" % (state['game']['turn'], direction))
play(new_state, games_played)
state = get_new_game_state(server_host, key, mode)
print("Start: " + state['viewUrl'])
play(state)
if __name__ == "__main__":
if (len(sys.argv) > 4):
start(sys.argv[1], sys.argv[2], sys.argv[3], RandomBot(), int(sys.argv[4]))
else:
print("Usage: %s <server> <key> <[training|arena]> <number-of-games-to-play>" % (sys.argv[0]))
print('Example: %s http://localhost:9000 mySecretKey training 20' % (sys.argv[0]))
| #!/usr/bin/env python
import os
import sys
import requests
import re
from bot import RandomBot
def get_new_game_state(server_host, key, number_of_turns = '20', mode='training'):
if(mode=='training'):
params = { 'key': key, 'turns': number_of_turns}
r = requests.post(server_host + '/api/training', params)
if(r.status_code == 200):
return r.json()
else:
print("Error when creating the game")
print(r.text)
else:
pass
def move(url, direction):
r = requests.post(url, {'dir': direction})
return r.json()
def start(server_host, key, bot, number_of_games = 20):
def play(state, games_played = 0):
if (state['game']['finished']):
games_played += 1
print('Game finished: %d/%d' % (games_played, number_of_games))
if(games_played < number_of_games):
print('asking a new game')
state = get_new_game_state(server_host, key)
play(state, games_played)
else:
url = state['playUrl']
direction = bot.move(state)
new_state = move(url, direction)
print("Playing turn %d with direction %s" % (state['game']['turn'], direction))
play(new_state, games_played)
state = get_new_game_state(server_host, key)
print("Start: " + state['viewUrl'])
play(state)
if __name__ == "__main__":
if (len(sys.argv) > 3):
start(sys.argv[1], sys.argv[2], RandomBot(), int(sys.argv[3]))
else:
print("Usage: %s <server> <key> <number-of-games-to-play>" % (sys.argv[0]))
print('Example: %s http://localhost:9000 mySecretKey 20' % (sys.argv[0]))
| Python | 0.000001 |
50a3805a16f75010fbaa6fc4e4c49c4da64f30b8 | edit and delete | shares.py | shares.py | # coding: utf-8
"""
share.py
~~~~~~~~
木犀分享的信息
"""
from flask import url_for, jsonify, request, g, current_app
from . import api
from muxiwebsite.models import Share, AnonymousUser
from muxiwebsite import db
from .authentication import auth
from flask_login import current_user
tags = ['frontend', 'backend', 'android', 'design', 'product']
@api.route('/shares/', methods=['GET'])
def get_shares():
"""
获取所有分享
"""
page = request.args.get('page', 1, type=int)
pagination = Share.query.paginate(
page,
per_page=current_app.config['MUXI_SHARES_PER_PAGE'],
error_out=False
)
shares = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_shares', page=page-1, _external=True)
next = None
if pagination.has_next:
next = url_for('api.get_shares', page=page+1, _external=True)
shares_count = len(Share.query.all())
page_count = shares_count//current_app.config['MUXI_SHARES_PER_PAGE']
if not isinstance(page_count, int) \
or page_count == 0:
page_count = page_count + 1
last = url_for('api.get_shares', page=page_count, _external=True)
return jsonify({
'shares': [share.to_json() for share in shares],
'count': pagination.total
}), 200, {'link': '<%s>; rel="next", <%s>; rel="last"' % (next, last)}
@api.route('/shares/', methods=['POST', 'GET'])
@auth.login_required
def new_share():
"""创建一个分享"""
data_dict = eval(request.data)
if not hasattr(g.current_user, 'id'):
return jsonify({
'error' : 'please login first'
}), 403
share = Share(
title = data_dict.get('title'),
share = data_dict.get('share'),
author_id = g.current_user.id
)
db.session.add(share)
db.session.commit()
return jsonify({
'new share' : share.id
}), 201
# 展示特定id的分享,相关评论,发表评论
@api.route('/views/<int:id>',methods=["GET"."POST"])
def view_share(id) :
share = Share.query.get_or_404(id)
share.author =
User.query.filter_by(id=share.author_id).fisrt().username
comments = Comment.query.filter_by(share_id=share.id).all()
if request.method == 'POST' :
comment = Comment()
comment.comment = request.get_json().get("comment")
comment.share_id = id
comment.author_id = current_user.id
comment.count = 0
comment.auhtor_name =
User.query.filter_by(id=current_user.id).first().username
db.session.add(comment)
db.session.commit()
this_comment = Comment.query.filter_by(
comment = comment.comment.data ,
author_id = current_user.id,
share_id = id ,
).first()
this_comment.count += 1
return jsonify(comment.to_json()) , 201
share_avatar =
User.query.filter_by(id=share.author_id).first().avator_url
share_comments_num =
@login_required
@api.route('/send/',methods=['GET','POST'])
def add_share() :
if request.method == 'POST' :
share = Share()
share.title = request.get_json.get("title")
share.share = request.get_json.get("share")
share.tag = request.get_json.get("tag")
share.content = request.get_json.get("content")
share.author_id = current_user.id
db.session.add(share)
db.session.commit()
return redirect(url_for('.index',page))
return jsonify(share.to_json2()) ,201
@login_required
@api.route('/delete/<int:id>',methods=['GET','DELETE'])
@permission_required(Permission.WRITE_APTICLES)
def delete(id) :
share = Share.query.get_or_404(id)
if request.method == 'DELETE' :
db.session.delete(share)
db.session.commit()
return jsonify({
'deleted' : share.id
}) , 200
@api.route('/edit-share/<int:id>/', methods=["PUT", "GET"])
@login_required
@permission_required(Permission.WRITE_ARTICLES)
def edit(id) :
share = Share.query.get_or_404(id)
if request.method == 'put' :
share.share = request.get_json().get("share")
share.title = request.get_json().get("title")
db.session.add(share)
db.session.commit()
return jsonify({
'edited' : share.id
}) , 200
@api.route('/')
def index() :
| # coding: utf-8
"""
share.py
~~~~~~~~
木犀分享的信息
"""
from flask import url_for, jsonify, request, g, current_app
from . import api
from muxiwebsite.models import Share, AnonymousUser
from muxiwebsite import db
from .authentication import auth
from flask_login import current_user
@api.route('/shares/', methods=['GET'])
def get_shares():
"""
获取所有分享
"""
page = request.args.get('page', 1, type=int)
pagination = Share.query.paginate(
page,
per_page=current_app.config['MUXI_SHARES_PER_PAGE'],
error_out=False
)
shares = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_shares', page=page-1, _external=True)
next = None
if pagination.has_next:
next = url_for('api.get_shares', page=page+1, _external=True)
shares_count = len(Share.query.all())
page_count = shares_count//current_app.config['MUXI_SHARES_PER_PAGE']
if not isinstance(page_count, int) \
or page_count == 0:
page_count = page_count + 1
last = url_for('api.get_shares', page=page_count, _external=True)
return jsonify({
'shares': [share.to_json() for share in shares],
'count': pagination.total
}), 200, {'link': '<%s>; rel="next", <%s>; rel="last"' % (next, last)}
@api.route('/shares/', methods=['POST', 'GET'])
@auth.login_required
def new_share():
"""创建一个分享"""
data_dict = eval(request.data)
if not hasattr(g.current_user, 'id'):
return jsonify({
'error' : 'please login first'
}), 403
share = Share(
title = data_dict.get('title'),
share = data_dict.get('share'),
author_id = g.current_user.id
)
db.session.add(share)
db.session.commit()
return jsonify({
'new share' : share.id
}), 201
# 展示特定id的分享,相关评论,发表评论
@api.route('/views/<int:id>',methods=["GET"."POST"])
def view_share(id) :
share = Share.query.get_or_404(id)
share.author =
User.query.filter_by(id=share.author_id).fisrt().username
comments = Comment.query.filter_by(share_id=share.id).all()
if request.method == 'POST' :
comment = CommentForm()
comment.
| Python | 0.000001 |
7542d224d2ab15adec31a2d896a22cab6a7a2b37 | add log | spider.py | spider.py | # -*- coding: utf-8 -*-
import http.client
from ltc.models import Record
import json
def get_current_price():
conn = http.client.HTTPSConnection('api.huobi.com')
conn.request('GET','/staticmarket/ticker_ltc_json.js')
r = conn.getResponse()
if r.status == 200 :
data = r.read()
string = data.decode('utf8').replace("'", '"')
json_data = json.loads(string)
json = json.dumps(json_data,indent=4, sort_keys=True)
print(json)
price = json['ticker']['last']
date = json['time']
recod = Record.create(price,date)
record.save()
if __name__ == '__main__':
get_current_price()
| # -*- coding: utf-8 -*-
import http.client
from ltc.models import Record
import json
def get_current_price():
conn = http.client.HTTPSConnection('api.huobi.com')
conn.request('GET','/staticmarket/ticker_ltc_json.js')
r = conn.getResponse()
if r.status == 200 :
data = r.read()
string = data.decode('utf8').replace("'", '"')
json_data = json.loads(string)
json = json.dumps(json_data,indent=4, sort_keys=True)
price = json['ticker']['last']
date = json['time']
recod = Record.create(price,date)
record.save()
if __name__ == '__main__':
get_current_price()
| Python | 0.000002 |
7d2b0e1afa0606f7f4bcf4ff9005595120bd822c | Add method to find possible moves | src/ai.py | src/ai.py | from card import Card
from board import Board
class AI():
# Check for possible move given the state of the board
def possibleMoves(self, board):
# List of the possible moves
# a move is a tuple (cardOrigin, cardDestination)
moves = []
# Test cards of the tableau
for pile in board.PlayingStacks:
for card in pile:
if (not card.facedown):
# Moves on the tableau
for pile2 in board.PlayingStacks:
if (pile != pile2):
for card2 in pile2:
if (board.moveCardFromTableau(card, card2) == 0):
moves.append([card, card2])
# Moves on the foundations
if (board.moveCardFromTableau(card, "H") == 0):
moves.append([card, "H"])
if (board.moveCardFromTableau(card, "S") == 0):
moves.append([card, "S"])
if (board.moveCardFromTableau(card, "C") == 0):
moves.append([card, "C"])
if (board.moveCardFromTableau(card, "D") == 0):
moves.append([card, "D"])
# Test card of the waste
if (len(board.waste) > 0):
# Move to the tableau
for pile in board.PlayingStacks:
if (len(pile) > 0):
destinationCard = pile[-1]
if (board.moveCardFromWaste(destinationCard) == 0):
moves.append([ board.waste[-1], destinationCard ])
# Moves on the foundations
if (board.moveCardFromWaste("H") == 0):
moves.append([board.waste[-1], "H"])
if (board.moveCardFromWaste("S") == 0):
moves.append([board.waste[-1], "S"])
if (board.moveCardFromWaste("C") == 0):
moves.append([board.waste[-1], "C"])
if (board.moveCardFromWaste("D") == 0):
moves.append([board.waste[-1], "D"])
# Test card from the foundations
if (len(board.H) > 0):
# Move to the tableau
for pile in board.PlayingStacks:
if (len(pile) > 0):
destinationCard = pile[-1]
if (board.moveCardFromFoundation("H", destinationCard) == 0):
moves.append([ board.H[-1], destinationCard ])
if (len(board.S) > 0):
# Move to the tableau
for pile in board.PlayingStacks:
if (len(pile) > 0):
destinationCard = pile[-1]
if (board.moveCardFromFoundation("S", destinationCard) == 0):
moves.append([ board.S[-1], destinationCard ])
if (len(board.C) > 0):
# Move to the tableau
for pile in board.PlayingStacks:
if (len(pile) > 0):
destinationSard = pile[-1]
if (board.moveCardFromFoundation("C", destinationCard) == 0):
moves.append([ board.C[-1], destinationSard ])
if (len(board.D) > 0):
# Move to the tableau
for pile in board.PlayingStacks:
if (len(pile) > 0):
destinationCard = pile[-1]
if (board.moveCardFromFoundation("D", destinationCard) == 0):
moves.append([ board.D[-1], destinationCard ])
return moves
# Define if all the cards are discovered and placed in a way which
# will always let the player finish the game
def willWin(self, board):
# Check if there is still some card in the board or in the waste
if (len(board.stock) != 0 or len(board.waste)!=0):
return False
# Check if some card on the tableau are still face down
for pile in board.PlayingStacks:
for card in pile:
if (card.facedown):
return False
return True
| from card import Card
from board import Board
class AI():
# Check for possible move given the state of the board
def possibleMoves(self, board):
return
# Define if all the cards are discovered and placed in a way which
# will always let the player finish the game
def willWin(self, board):
print("test victoire:")
# Check if there is still some card in the board or in the waste
if (len(board.stock) != 0 or len(board.waste)!=0):
print ("cartes dans le stock ou dans le waste")
return False
# Check if some card on the tableau are still face down
for pile in board.PlayingStacks:
for card in pile:
if (card.facedown):
print("Carte retournee")
return False
print("victoire")
return True
| Python | 0.000019 |
195e5cde1d81d7f73b77ce1b1c52f0beea8a2595 | Fix tclist iterator slicing | ejdb/tc.py | ejdb/tc.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numbers
from . import c
from .utils import CObjectWrapper
class ListIterator(CObjectWrapper):
"""Python iterator wrapper for a `TCLIST *`.
"""
def __init__(self, wrapped, count=None):
super(ListIterator, self).__init__(
wrapped=wrapped, finalizer=c.tc.listdel,
)
if count is None:
count = c.tc.listnum(wrapped)
self._len = count
self._i = 0
def __iter__(self): # pragma: no cover
return self
def __len__(self):
return self._len
def __getitem__(self, key):
if isinstance(key, slice):
return [self[i] for i in range(*key.indices(len(self)))]
elif isinstance(key, numbers.Number):
if key >= len(self):
raise IndexError('Iterator index out of range.')
value_p = c.tc.listval2(self._wrapped, key)
return self.instantiate(value_p)
return NotImplemented
def __next__(self):
if self._i >= self._len:
raise StopIteration
value_p = c.tc.listval2(self._wrapped, self._i)
self._i += 1
return self.instantiate(value_p)
def next(self): # pragma: no cover
"""Python 2 compatibility.
"""
return self.__next__()
def instantiate(self, value_p):
"""Subclasses should override this method to instantiate an item during
iteration.
:param value_p: Points to the current TCList iterator value of type
`c_void_p`.
"""
raise NotImplementedError
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numbers
from . import c
from .utils import CObjectWrapper
class ListIterator(CObjectWrapper):
"""Python iterator wrapper for a `TCLIST *`.
"""
def __init__(self, wrapped, count=None):
super(ListIterator, self).__init__(
wrapped=wrapped, finalizer=c.tc.listdel,
)
if count is None:
count = c.tc.listnum(wrapped)
self._len = count
self._i = 0
def __iter__(self): # pragma: no cover
return self
def __len__(self):
return self._len
def __getitem__(self, key):
if isinstance(key, slice):
return [self[i] for i in range(key.indices(len(self)))]
elif isinstance(key, numbers.Number):
if key >= len(self):
raise IndexError('Iterator index out of range.')
value_p = c.tc.listval2(self._wrapped, key)
return self.instantiate(value_p)
return NotImplemented
def __next__(self):
if self._i >= self._len:
raise StopIteration
value_p = c.tc.listval2(self._wrapped, self._i)
self._i += 1
return self.instantiate(value_p)
def next(self): # pragma: no cover
"""Python 2 compatibility.
"""
return self.__next__()
def instantiate(self, value_p):
"""Subclasses should override this method to instantiate an item during
iteration.
:param value_p: Points to the current TCList iterator value of type
`c_void_p`.
"""
raise NotImplementedError
| Python | 0.000009 |
1e7869a5b7c5503498634dc0161fdfb29bbb4c7b | Fix imports to restore schema import. | grano/logic/schemata.py | grano/logic/schemata.py | import os
import yaml
from pprint import pprint
from grano.core import db, url_for
from grano.model import Schema, Attribute
from grano.logic.validation import validate_schema, Invalid
from grano.logic import projects as projects_logic
from grano.logic import attributes
def save(project, data):
""" Create a schema. """
data = validate_schema(data)
name = data.get('name')
obj = Schema.by_name(project, name)
if obj is None:
obj = Schema()
obj.name = name
obj.project = project
obj.label = data.get('label')
obj.label_in = data.get('label_in') or obj.label
obj.label_out = data.get('label_out') or obj.label
obj.obj = data.get('obj')
obj.hidden = data.get('hidden')
db.session.add(obj)
names = []
for attribute in data.get('attributes', []):
attribute['schema'] = obj
attr = attributes.save(attribute)
obj.attributes.append(attr)
names.append(attr.name)
for attr in obj.attributes:
if attr.name not in names:
db.session.delete(attr)
return obj
def import_schema(project, fh):
data = yaml.load(fh.read())
try:
save(project, data)
db.session.commit()
except Invalid, inv:
pprint(inv.asdict())
def export_schema(project, path):
if not os.path.exists(path):
os.makedirs(path)
for schema in Schema.all().filter_by(project=project):
if schema.name == 'base':
continue
fn = os.path.join(path, schema.name + '.yaml')
with open(fn, 'w') as fh:
fh.write(yaml.dump(to_dict(schema)))
def to_basic(schema):
return {
'name': schema.name,
'label': schema.label
}
def to_index(schema):
data = to_basic(schema)
data['hidden'] = schema.hidden
return data
def to_rest_index(schema):
data = to_basic(schema)
data['project'] = projects_logic.to_rest_index(schema.project)
data['api_url'] = url_for('schemata_api.view', name=schema.name)
return data
def to_rest(schema):
data = to_rest_index(schema)
data['id'] = schema.id
data['hidden'] = schema.hidden
if schema.label_in:
data['label_in'] = schema.label_in
if schema.label_out:
data['label_out'] = schema.label_out
as_ = [attributes.to_rest(a) for a in schema.attributes]
data['attributes'] = as_
return data
def to_dict(schema):
data = to_basic(schema)
data['id'] = schema.id
data['obj'] = schema.obj
data['project'] = schema.project.slug
data['hidden'] = schema.hidden
data['label_in'] = schema.label_in
data['label_out'] = schema.label_out
data['attributes'] = [attributes.to_dict(a) for a in schema.attributes]
return data
| import os
from grano.core import db, url_for
from grano.model import Schema, Attribute
from grano.logic.validation import validate_schema
from grano.logic import projects as projects_logic
from grano.logic import attributes
def save(project, data):
""" Create a schema. """
data = validate_schema(data)
name = data.get('name')
obj = Schema.by_name(project, name)
if obj is None:
obj = Schema()
obj.name = name
obj.project = project
obj.label = data.get('label')
obj.label_in = data.get('label_in') or obj.label
obj.label_out = data.get('label_out') or obj.label
obj.obj = data.get('obj')
obj.hidden = data.get('hidden')
db.session.add(obj)
names = []
for attribute in data.get('attributes', []):
attribute['schema'] = obj
attr = attributes.save(attribute)
obj.attributes.append(attr)
names.append(attr.name)
for attr in obj.attributes:
if attr.name not in names:
db.session.delete(attr)
return obj
def import_schema(project, fh):
data = yaml.load(fh.read())
try:
save(project, data)
db.session.commit()
except Invalid, inv:
pprint(inv.asdict())
def export_schema(project, path):
if not os.path.exists(path):
os.makedirs(path)
for schema in Schema.all().filter_by(project=project):
if schema.name == 'base':
continue
fn = os.path.join(path, schema.name + '.yaml')
with open(fn, 'w') as fh:
fh.write(yaml.dump(to_dict(schema)))
def to_basic(schema):
return {
'name': schema.name,
'label': schema.label
}
def to_index(schema):
data = to_basic(schema)
data['hidden'] = schema.hidden
return data
def to_rest_index(schema):
data = to_basic(schema)
data['project'] = projects_logic.to_rest_index(schema.project)
data['api_url'] = url_for('schemata_api.view', name=schema.name)
return data
def to_rest(schema):
data = to_rest_index(schema)
data['id'] = schema.id
data['hidden'] = schema.hidden
if schema.label_in:
data['label_in'] = schema.label_in
if schema.label_out:
data['label_out'] = schema.label_out
as_ = [attributes.to_rest(a) for a in schema.attributes]
data['attributes'] = as_
return data
def to_dict(schema):
data = to_basic(schema)
data['id'] = schema.id
data['obj'] = schema.obj
data['project'] = schema.project.slug
data['hidden'] = schema.hidden
data['label_in'] = schema.label_in
data['label_out'] = schema.label_out
data['attributes'] = [attributes.to_dict(a) for a in schema.attributes]
return data
| Python | 0 |
1aed7a2b0213d457e1efdf5923a159fc053caae5 | Check for multiple teams in migrate_tips | gratipay/models/team.py | gratipay/models/team.py | """Teams on Gratipay are plural participants with members.
"""
from postgres.orm import Model
class Team(Model):
"""Represent a Gratipay team.
"""
typname = 'teams'
def __eq__(self, other):
if not isinstance(other, Team):
return False
return self.id == other.id
def __ne__(self, other):
if not isinstance(other, Team):
return True
return self.id != other.id
# Constructors
# ============
@classmethod
def from_id(cls, id):
"""Return an existing team based on id.
"""
return cls._from_thing("id", id)
@classmethod
def from_slug(cls, slug):
"""Return an existing team based on slug.
"""
return cls._from_thing("slug_lower", slug.lower())
@classmethod
def _from_thing(cls, thing, value):
assert thing in ("id", "slug_lower")
return cls.db.one("""
SELECT teams.*::teams
FROM teams
WHERE {}=%s
""".format(thing), (value,))
@classmethod
def insert(cls, owner, **fields):
fields['slug_lower'] = fields['slug'].lower()
fields['owner'] = owner.username
return cls.db.one("""
INSERT INTO teams
(slug, slug_lower, name, homepage,
product_or_service, revenue_model, getting_involved, getting_paid,
owner)
VALUES (%(slug)s, %(slug_lower)s, %(name)s, %(homepage)s,
%(product_or_service)s, %(revenue_model)s, %(getting_involved)s,
%(getting_paid)s,
%(owner)s)
RETURNING teams.*::teams
""", fields)
def get_og_title(self):
out = self.name
receiving = self.receiving
if receiving > 0:
out += " receives $%.2f/wk" % receiving
else:
out += " is"
return out + " on Gratipay"
def update_receiving(self, cursor=None):
# Stubbed out for now. Migrate this over from Participant.
pass
@property
def status(self):
return { None: 'unreviewed'
, False: 'rejected'
, True: 'approved'
}[self.is_approved]
def migrate_tips(self):
subscriptions = self.db.all("""
SELECT s.*
FROM subscriptions s
JOIN teams t ON t.slug = s.team
JOIN participants p ON t.owner = p.username
WHERE p.username = %s
AND s.ctime < t.ctime
""", (self.owner, ))
# Make sure the migration hasn't been done already
if subscriptions:
raise AlreadyMigrated
self.db.run("""
INSERT INTO subscriptions
(ctime, mtime, subscriber, team, amount, is_funded)
SELECT ct.ctime
, ct.mtime
, ct.tipper
, %(slug)s
, ct.amount
, ct.is_funded
FROM current_tips ct
JOIN participants p ON p.username = tipper
WHERE ct.tippee=%(owner)s
AND p.claimed_time IS NOT NULL
AND p.is_suspicious IS NOT TRUE
AND p.is_closed IS NOT TRUE
""", {'slug': self.slug, 'owner': self.owner})
class AlreadyMigrated(Exception): pass
| """Teams on Gratipay are plural participants with members.
"""
from postgres.orm import Model
class Team(Model):
"""Represent a Gratipay team.
"""
typname = 'teams'
def __eq__(self, other):
if not isinstance(other, Team):
return False
return self.id == other.id
def __ne__(self, other):
if not isinstance(other, Team):
return True
return self.id != other.id
# Constructors
# ============
@classmethod
def from_id(cls, id):
"""Return an existing team based on id.
"""
return cls._from_thing("id", id)
@classmethod
def from_slug(cls, slug):
"""Return an existing team based on slug.
"""
return cls._from_thing("slug_lower", slug.lower())
@classmethod
def _from_thing(cls, thing, value):
assert thing in ("id", "slug_lower")
return cls.db.one("""
SELECT teams.*::teams
FROM teams
WHERE {}=%s
""".format(thing), (value,))
@classmethod
def insert(cls, owner, **fields):
fields['slug_lower'] = fields['slug'].lower()
fields['owner'] = owner.username
return cls.db.one("""
INSERT INTO teams
(slug, slug_lower, name, homepage,
product_or_service, revenue_model, getting_involved, getting_paid,
owner)
VALUES (%(slug)s, %(slug_lower)s, %(name)s, %(homepage)s,
%(product_or_service)s, %(revenue_model)s, %(getting_involved)s,
%(getting_paid)s,
%(owner)s)
RETURNING teams.*::teams
""", fields)
def get_og_title(self):
out = self.name
receiving = self.receiving
if receiving > 0:
out += " receives $%.2f/wk" % receiving
else:
out += " is"
return out + " on Gratipay"
def update_receiving(self, cursor=None):
# Stubbed out for now. Migrate this over from Participant.
pass
@property
def status(self):
return { None: 'unreviewed'
, False: 'rejected'
, True: 'approved'
}[self.is_approved]
def migrate_tips(self):
subscriptions = self.db.all("""
SELECT s.*
FROM subscriptions s
JOIN teams t ON t.slug = s.team
WHERE team=%s
AND s.ctime < t.ctime
""", (self.slug,))
# Make sure the migration hasn't been done already
if subscriptions:
raise AlreadyMigrated
self.db.run("""
INSERT INTO subscriptions
(ctime, mtime, subscriber, team, amount, is_funded)
SELECT ct.ctime
, ct.mtime
, ct.tipper
, %(slug)s
, ct.amount
, ct.is_funded
FROM current_tips ct
JOIN participants p ON p.username = tipper
WHERE ct.tippee=%(owner)s
AND p.claimed_time IS NOT NULL
AND p.is_suspicious IS NOT TRUE
AND p.is_closed IS NOT TRUE
""", {'slug': self.slug, 'owner': self.owner})
class AlreadyMigrated(Exception): pass
| Python | 0 |
87608781d7de4fe9977c14a3165f4540ae1e81f2 | simplify interaction example | examples/utils/interaction.py | examples/utils/interaction.py | import sys
from PySide.QtGui import QApplication, QColor
from pivy import quarter, coin, graphics
class ConnectionPolygon(graphics.Polygon):
std_col = "green"
def __init__(self, markers, dynamic=False):
super(ConnectionPolygon, self).__init__(
sum([m.points for m in markers], []), dynamic=dynamic)
self.markers = markers
for m in self.markers:
m.on_drag.append(self.updatePolygon)
def updatePolygon(self):
self.points = sum([m.points for m in self.markers], [])
@property
def drag_objects(self):
return self.markers
class ConnectionLine(graphics.Line):
def __init__(self, markers, dynamic=False):
super(ConnectionLine, self).__init__(
sum([m.points for m in markers], []), dynamic=dynamic)
self.markers = markers
for m in self.markers:
m.on_drag.append(self.updateLine)
def updateLine(self):
self.points = sum([m.points for m in self.markers], [])
@property
def drag_objects(self):
return self.markers
def main():
app = QApplication(sys.argv)
viewer = quarter.QuarterWidget()
root = graphics.InteractionSeparator(viewer.sorendermanager)
m1 = graphics.Marker([[-1, -1, -1]], dynamic=True)
m2 = graphics.Marker([[-1, 1, -1]], dynamic=True)
m3 = graphics.Marker([[ 1, 1, -1]], dynamic=True)
m4 = graphics.Marker([[ 1, -1, -1]], dynamic=True)
m5 = graphics.Marker([[-1, -1, 1]], dynamic=True)
m6 = graphics.Marker([[-1, 1, 1]], dynamic=True)
m7 = graphics.Marker([[ 1, 1, 1]], dynamic=True)
m8 = graphics.Marker([[ 1, -1, 1]], dynamic=True)
points = [m1, m2, m3, m4, m5, m6, m7, m8]
l01 = ConnectionLine([m1, m2], dynamic=True)
l02 = ConnectionLine([m2, m3], dynamic=True)
l03 = ConnectionLine([m3, m4], dynamic=True)
l04 = ConnectionLine([m4, m1], dynamic=True)
l05 = ConnectionLine([m5, m6], dynamic=True)
l06 = ConnectionLine([m6, m7], dynamic=True)
l07 = ConnectionLine([m7, m8], dynamic=True)
l08 = ConnectionLine([m8, m5], dynamic=True)
l09 = ConnectionLine([m1, m5], dynamic=True)
l10 = ConnectionLine([m2, m6], dynamic=True)
l11 = ConnectionLine([m3, m7], dynamic=True)
l12 = ConnectionLine([m4, m8], dynamic=True)
lines = [l01, l02, l03, l04, l05, l06, l07, l08, l09, l10, l11, l12]
p1 = ConnectionPolygon([m1, m2, m3, m4], dynamic=True)
p2 = ConnectionPolygon([m8, m7, m6, m5], dynamic=True)
p3 = ConnectionPolygon([m5, m6, m2, m1], dynamic=True)
p4 = ConnectionPolygon([m6, m7, m3, m2], dynamic=True)
p5 = ConnectionPolygon([m7, m8, m4, m3], dynamic=True)
p6 = ConnectionPolygon([m8, m5, m1, m4], dynamic=True)
polygons = [p1, p2, p3, p4, p5, p6]
root += points + lines + polygons
root.register()
viewer.setSceneGraph(root)
viewer.setBackgroundColor(QColor(255, 255, 255))
viewer.setWindowTitle("minimal")
viewer.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| import sys
from PySide.QtGui import QApplication, QColor
from pivy import quarter, coin, graphics
class ConnectionPolygon(graphics.Polygon):
std_col = "green"
def __init__(self, markers, lines, dynamic=False):
super(ConnectionPolygon, self).__init__(
sum([m.points for m in markers], []), dynamic=dynamic)
self.lines = lines
self.markers = markers
for l in self.lines:
l.on_drag.append(self.updatePolygon)
for m in self.markers:
m.on_drag.append(self.updatePolygon)
def updatePolygon(self):
self.points = sum([m.points for m in self.markers], [])
[foo() for foo in self.on_drag]
@property
def drag_objects(self):
return self.lines + self.markers + [self]
class ConnectionLine(graphics.Line):
def __init__(self, markers, dynamic=False):
super(ConnectionLine, self).__init__(
sum([m.points for m in markers], []), dynamic=dynamic)
self.markers = markers
for m in self.markers:
m.on_drag.append(self.updateLine)
def updateLine(self):
self.points = sum([m.points for m in self.markers], [])
[foo() for foo in self.on_drag]
@property
def drag_objects(self):
return self.markers + [self]
def main():
app = QApplication(sys.argv)
viewer = quarter.QuarterWidget()
root = graphics.InteractionSeparator(viewer.sorendermanager)
m1 = graphics.Marker([[0, 0, 0]], dynamic=True)
m2 = graphics.Marker([[1, 0, 0]], dynamic=True)
m3 = graphics.Marker([[0, 1, 0]], dynamic=True)
m4 = graphics.Marker([[0, 0, 2]], dynamic=True)
l1 = ConnectionLine([m1, m2], dynamic=True)
l2 = ConnectionLine([m2, m3], dynamic=True)
l3 = ConnectionLine([m3, m1], dynamic=True)
l4 = ConnectionLine([m1, m4], dynamic=True)
l5 = ConnectionLine([m2, m4], dynamic=True)
l6 = ConnectionLine([m3, m4], dynamic=True)
p1 = ConnectionPolygon([m3, m2, m1], [], dynamic=True)
p2 = ConnectionPolygon([m1, m2, m4], [], dynamic=True)
p3 = ConnectionPolygon([m2, m3, m4], [], dynamic=True)
p4 = ConnectionPolygon([m3, m1, m4], [], dynamic=True)
root += [m1, m2, m3, m4,
l1, l2, l3,
l4, l5, l6,
p1, p2, p3, p4]
root.register()
viewer.setSceneGraph(root)
viewer.setBackgroundColor(QColor(255, 255, 255))
viewer.setWindowTitle("minimal")
viewer.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| Python | 0.99796 |
93ed74d3c904c743f91f0c9e81f6d36bda731a10 | modify example.py | example.py | example.py | #! /usr/bin/python
import FixQueue
if __name__ == "__main__" :
fq = FixQueue(2) # 2 means queue size
fq.append('a')
fq.append('b')
print (fq) # ['a', 'b']
fq.append('c')
print (fq) # ['b', 'c']
print (fq.pop()) # b
print (fq) # ['b']
| #! /usr/bin/python
from FixQueue import FixQueue
if __name__ == "__main__" :
fq = FixQueue(2) # 2 means queue size
fq.append('a')
fq.append('b')
print (fq) # ['a', 'b']
fq.append('c')
print (fq) # ['b', 'c']
print (fq.pop()) # b
print (fq) # ['b']
| Python | 0.000001 |
80adb43a22f82a036d72bdd398d45ee4daa9a5ff | Update example.py | example.py | example.py | import pornhub
search_keywords = []
#client = pornhub.PornHub("5.135.164.72", 3128, search_keywords)
#With proxy, given a Proxy IP and Port. For the countries with restricted access like Turkey, etc.
client = pornhub.PornHub(search_keywords)
for star in client.getStars(10):
print(star)
print(star["name"])
for video in client.getVideos(10,page=2):
print(video)
for photo_url in client.getPhotos(5):
print(photo_url)
video = client.getVideo("SOME VIDEO URL")
print(video)
print(video['accurate_views'])
| import pornhub
search_keywords = []
#client = pornhub.PornHub("5.135.164.72", 3128, search_keywords)
#With proxy, given a Proxy IP and Port. For the countries with restricted access like Turkey, etc.
client = pornhub.PornHub(search_keywords)
for star in client.getStars(10):
print(star)
print(star["name"])
for video in client.getVideos(10,page=2):
print(video)
for photo_url in client.getPhotos(5):
print(photo_url) | Python | 0.000001 |
4053baa698f1047f09540c9ff3d6e8a08d844335 | Comment example | example.py | example.py | """Example app to login to GitHub"""
import argparse
import mechanicalsoup
parser = argparse.ArgumentParser(description='Login to GitHub.')
parser.add_argument("username")
parser.add_argument("password")
args = parser.parse_args()
browser = mechanicalsoup.Browser()
# request github login page
login_page = browser.get("https://github.com/login")
# find login form
login_form = login_page.soup.select("#login")[0].select("form")[0]
# specify username and password
login_form.select("#login_field")[0]['value'] = args.username
login_form.select("#password")[0]['value'] = args.password
# submit!
page2 = browser.submit(login_page.response, login_form)
# verify we are now logged in
assert page2.soup.select(".logout-form")
# verify we remain logged in (thanks to cookies) as we browse the rest of the site
page3 = browser.get("https://github.com/matt-hickford/MechanicalSoup")
assert page3.soup.select(".logout-form")
| """Example app to login to GitHub"""
import argparse
import mechanicalsoup
parser = argparse.ArgumentParser(description='Login to GitHub.')
parser.add_argument("username")
parser.add_argument("password")
args = parser.parse_args()
browser = mechanicalsoup.Browser()
# request github login page
login_page = browser.get("https://github.com/login")
# find login form
login_form = login_page.soup.select("#login")[0].select("form")[0]
# specify username and password
login_form.select("#login_field")[0]['value'] = args.username
login_form.select("#password")[0]['value'] = args.password
page2 = browser.submit(login_page.response, login_form)
assert page2.soup.select(".logout-form") | Python | 0 |
c279eda7b56e1f793b0241a66fe155cfdbdd1d40 | Remove intermediate string during gzip compression | api/tasks.py | api/tasks.py | from __future__ import absolute_import
import os
from celery import shared_task
from celery.utils.log import get_task_logger
from api.StormReplayParser import StormReplayParser
import boto
import StringIO
import cStringIO
import json
import gzip
from boto.s3.key import Key
log = get_task_logger(__name__)
@shared_task
def LocallyStoredReplayParsingTask(fileName):
log.info('File name='+fileName)
replayFile = open(fileName)
srp = StormReplayParser(replayFile)
log.info("Created StormReplayParser, getting data")
retval = {
'unique_match_id': srp.getUniqueMatchId(),
'map': srp.getMapName(),
'players': srp.getReplayPlayers(),
'chat': srp.getChat(),
#'game': srp.getReplayGameEvents(),
}
log.info("Finished reading from StormReplay. Cleaning up.")
replayFile.close()
os.remove(replayFile.name)
return retval
@shared_task
def S3StoredReplayParsingTask(keyName):
splitKey = keyName.split('/')
if len(splitKey) != 2:
raise ValueError("keyName must be of the form: <folder>/<file>")
keyBase = splitKey[0]
resultKeyName = keyBase + '/replay.json.gz'
#todo: duplicate limiting
log.info('Key='+keyName)
s3 = boto.connect_s3()
bucket = s3.get_bucket(os.environ.get('AWS_BUCKET_NAME'), validate=False)
k = Key(bucket)
k.key = keyName
#todo: is there a better way than just pretending the string is a file?
# try: https://chromium.googlesource.com/external/boto/+/refs/heads/master/boto/s3/keyfile.py
# It's possible we just need to read this to a temp file to save memory.
replayFile = cStringIO.StringIO(k.get_contents_as_string())
srp = StormReplayParser(replayFile)
log.info("Created StormReplayParser, getting data")
retval = {
'unique_match_id': srp.getUniqueMatchId(),
'map': srp.getMapName(),
'players': srp.getReplayPlayers(),
'chat': srp.getChat(),
#'game': srp.getReplayGameEvents(),
}
rk = Key(bucket)
rk.key = resultKeyName
rk.set_metadata('Content-Encoding', 'gzip')
out = cStringIO.StringIO()
with gzip.GzipFile(fileobj=out, mode="w") as f:
f.write(json.dumps(retval))
#rk.set_contents_from_string(out.getvalue())
rk.set_contents_from_file(out, rewind=True)
out.close()
secondsToExpire = 1*60*60
responseHeaders = {
'response-content-encoding': 'gzip',
'response-content-type': 'application/json',
}
s3UrlToResultKey = rk.generate_url(secondsToExpire, 'GET', response_headers=responseHeaders)
log.info("Result: " + s3UrlToResultKey);
log.info("Finished reading from StormReplay. Cleaning up.")
return {
'url': s3UrlToResultKey
}
# todo: task specific logging?
# http://blog.mapado.com/task-specific-logging-in-celery/
| from __future__ import absolute_import
import os
from celery import shared_task
from celery.utils.log import get_task_logger
from api.StormReplayParser import StormReplayParser
import boto
import StringIO
import json
import gzip
from boto.s3.key import Key
log = get_task_logger(__name__)
@shared_task
def LocallyStoredReplayParsingTask(fileName):
log.info('File name='+fileName)
replayFile = open(fileName)
srp = StormReplayParser(replayFile)
log.info("Created StormReplayParser, getting data")
retval = {
'unique_match_id': srp.getUniqueMatchId(),
'map': srp.getMapName(),
'players': srp.getReplayPlayers(),
'chat': srp.getChat(),
#'game': srp.getReplayGameEvents(),
}
log.info("Finished reading from StormReplay. Cleaning up.")
replayFile.close()
os.remove(replayFile.name)
return retval
@shared_task
def S3StoredReplayParsingTask(keyName):
splitKey = keyName.split('/')
if len(splitKey) != 2:
raise ValueError("keyName must be of the form: <folder>/<file>")
keyBase = splitKey[0]
resultKeyName = keyBase + '/replay.json.gz'
#todo: duplicate limiting
log.info('Key='+keyName)
s3 = boto.connect_s3()
bucket = s3.get_bucket(os.environ.get('AWS_BUCKET_NAME'), validate=False)
k = Key(bucket)
k.key = keyName
#todo: is there a better way than just pretending the string is a file?
# try: https://chromium.googlesource.com/external/boto/+/refs/heads/master/boto/s3/keyfile.py
# It's possible we just need to read this to a temp file to save memory.
# also: use cStringIO instead
replayFile = StringIO.StringIO(k.get_contents_as_string())
srp = StormReplayParser(replayFile)
log.info("Created StormReplayParser, getting data")
retval = {
'unique_match_id': srp.getUniqueMatchId(),
'map': srp.getMapName(),
'players': srp.getReplayPlayers(),
'chat': srp.getChat(),
#'game': srp.getReplayGameEvents(),
}
rk = Key(bucket)
rk.key = resultKeyName
rk.set_metadata('Content-Encoding', 'gzip')
out = StringIO.StringIO()
with gzip.GzipFile(fileobj=out, mode="w") as f:
f.write(json.dumps(retval))
rk.set_contents_from_string(out.getvalue())
out.close()
secondsToExpire = 1*60*60
responseHeaders = {
'response-content-encoding': 'gzip',
'response-content-type': 'application/json',
}
s3UrlToResultKey = rk.generate_url(secondsToExpire, 'GET', response_headers=responseHeaders)
log.info("Result: " + s3UrlToResultKey);
log.info("Finished reading from StormReplay. Cleaning up.")
return {
'url': s3UrlToResultKey
}
# todo: task specific logging?
# http://blog.mapado.com/task-specific-logging-in-celery/
| Python | 0.001237 |
72b3642953d0e14d4b4c9ec03560a96d259f7d16 | Remove monkey patching in favor of inheritance for SpatialReference | contones/srs.py | contones/srs.py | """Spatial reference systems"""
from osgeo import osr
class BaseSpatialReference(osr.SpatialReference):
"""Base class for extending osr.SpatialReference."""
def __repr__(self):
return self.wkt
@property
def srid(self):
"""Returns the EPSG ID as int if it exists."""
epsg_id = (self.GetAuthorityCode('PROJCS') or
self.GetAuthorityCode('GEOGCS'))
try:
return int(epsg_id)
except TypeError:
return
@property
def wkt(self):
"""Returns this projection in WKT format."""
return self.ExportToWkt()
@property
def proj4(self):
"""Returns this projection as a proj4 string."""
return self.ExportToProj4()
class SpatialReference(object):
"""A spatial reference."""
def __new__(cls, sref):
"""Returns a new BaseSpatialReference instance
This allows for customized construction of osr.SpatialReference which
has no init method which precludes the use of super().
"""
sr = BaseSpatialReference()
if isinstance(sref, int):
sr.ImportFromEPSG(sref)
elif isinstance(sref, str):
if sref.strip().startswith('+proj='):
sr.ImportFromProj4(sref)
else:
sr.ImportFromWkt(sref)
# Add EPSG authority if applicable
sr.AutoIdentifyEPSG()
else:
raise TypeError('Cannot create SpatialReference '
'from {}'.format(str(sref)))
return sr
| """Spatial reference systems"""
from osgeo import osr
# Monkey patch SpatialReference since inheriting from SWIG classes is a hack
def srid(self):
"""Returns the EPSG ID as int if it exists."""
epsg_id = (self.GetAuthorityCode('PROJCS') or
self.GetAuthorityCode('GEOGCS'))
try:
return int(epsg_id)
except TypeError:
return
osr.SpatialReference.srid = property(srid)
def wkt(self):
"""Returns this projection in WKT format."""
return self.ExportToWkt()
osr.SpatialReference.wkt = property(wkt)
def proj4(self):
"""Returns this projection as a proj4 string."""
return self.ExportToProj4()
osr.SpatialReference.proj4 = property(proj4)
def __repr__(self): return self.wkt
osr.SpatialReference.__repr__ = __repr__
class SpatialReference(object):
def __new__(cls, sref):
sr = osr.SpatialReference()
if isinstance(sref, int):
sr.ImportFromEPSG(sref)
elif isinstance(sref, str):
if sref.strip().startswith('+proj='):
sr.ImportFromProj4(sref)
else:
sr.ImportFromWkt(sref)
# Add EPSG authority if applicable
sr.AutoIdentifyEPSG()
else:
raise TypeError('Cannot create SpatialReference '
'from {}'.format(str(sref)))
return sr
| Python | 0 |
aef91895b31a615bb9af4cfae96aae97d91cad91 | Add Intercept transformer. | dftransformers.py | dftransformers.py | import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
class ColumnSelector(BaseEstimator, TransformerMixin):
"""Transformer that selects a column in a numpy array or DataFrame
by index or name.
"""
def __init__(self, idxs=None, name=None):
self.idxs = np.asarray(idxs)
self.idxs = idxs
self.name = name
def fit(self, *args, **kwargs):
return self
def transform(self, X, **transform_params):
# Need to teat pandas data frames and numpy arrays slightly differently.
if isinstance(X, pd.DataFrame) and self.idxs:
return X.iloc[:, self.idxs]
if isinstance(X, pd.DataFrame) and self.name:
return X[self.name]
return X[:, self.idxs]
class FeatureUnion(TransformerMixin):
def __init__(self, transformer_list):
self.transformer_list = transformer_list
def fit(self, X, y=None):
for _, t in self.transformer_list:
t.fit(X, y)
def transform(self, X, *args, **kwargs):
Xs = [t.transform(X) for _, t in self.transformer_list]
if isinstance(X, pd.DataFrame):
return pd.concat(Xs, axis=1)
return np.hstack(Xs)
class Intercept(TransformerMixin):
def fit(self, *args, **kwargs):
return self
def transform(self, X, *args, **kwargs):
if isinstance(X, pd.DataFrame) or isinstance(X, pd.Series):
return pd.Series(np.ones(X.shape[0]),
index=X.index, name="intercept")
return np.ones(X.shape[0])
| import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
class ColumnSelector(BaseEstimator, TransformerMixin):
"""Transformer that selects a column in a numpy array or DataFrame
by index or name.
"""
def __init__(self, idxs=None, name=None):
self.idxs = np.asarray(idxs)
self.idxs = idxs
self.name = name
def fit(self, *args, **kwargs):
return self
def transform(self, X, **transform_params):
# Need to teat pandas data frames and numpy arrays slightly differently.
if isinstance(X, pd.DataFrame) and self.idxs:
return X.iloc[:, self.idxs]
if isinstance(X, pd.DataFrame) and self.name:
return X[self.name]
return X[:, self.idxs]
class FeatureUnion(TransformerMixin):
def __init__(self, transformer_list):
self.transformer_list = transformer_list
def fit(self, X, y=None):
for _, t in self.transformer_list:
t.fit(X, y)
def transform(self, X, *args, **kwargs):
Xs = [t.transform(X) for _, t in self.transformer_list]
if isinstance(X, pd.DataFrame):
return pd.concat(Xs, axis=1)
return np.hstack(Xs)
| Python | 0 |
4a67891a1b8a96fcc666e12b2d4c27e3598d20a2 | add support email | product_price_factor_online/__openerp__.py | product_price_factor_online/__openerp__.py | # -*- coding: utf-8 -*-
{
'name': "Product price factor for web shop",
'summary': """Multiplies price depending on product attributes""",
'category': 'Website',
'license': 'GPL-3',
'author': "IT-Projects LLC, Ildar Nasyrov",
'price': 20.00,
'currency': 'EUR',
'images': ['images/1.png'],
"support": "apps@it-projects.info",
'website': "https://twitter.com/nasyrov_ildar",
'depends': ['website_sale', 'product_price_factor'],
'data': [
'views/templates.xml',
],
'auto_install': True,
}
| # -*- coding: utf-8 -*-
{
'name': "Product price factor for web shop",
'summary': """Multiplies price depending on product attributes""",
'category': 'Website',
'license': 'GPL-3',
'author': "IT-Projects LLC, Ildar Nasyrov",
'price': 20.00,
'currency': 'EUR',
'images': ['images/1.png'],
'website': "https://twitter.com/nasyrov_ildar",
'depends': ['website_sale', 'product_price_factor'],
'data': [
'views/templates.xml',
],
'auto_install': True,
}
| Python | 0 |
6853fd1e45370a9db650b0983e9835c14ea9209e | Add diagnostics endpoint | gutenberg_http/views.py | gutenberg_http/views.py | from sanic.exceptions import RequestTimeout
from sanic.request import Request
from sanic.response import json
from gutenberg_http import app
from gutenberg_http.errors import InvalidUsage
from gutenberg_http.logic import body as _body
from gutenberg_http.logic import metadata as _metadata
from gutenberg_http.logic import search as _search
@app.route('/texts/<text_id:int>')
def metadata(request: Request, text_id: int):
include = _metadata(text_id, request.args.get('include'))
return json({'text_id': text_id, 'metadata': include})
# noinspection PyUnusedLocal
@app.route('/texts/<text_id:int>/body')
def body(request: Request, text_id: int):
fulltext = _body(text_id)
return json({'text_id': text_id, 'body': fulltext})
# noinspection PyUnusedLocal
@app.route('/search/<query>')
def search(request: Request, query: str):
results = _search(query, request.args.get('include'))
return json({'texts': results})
# noinspection PyUnusedLocal
@app.exception(InvalidUsage)
def bad_request(request: Request, exception: InvalidUsage):
error = {'error': 'invalid_usage', 'message': exception.message}
return json(error, exception.status_code)
# noinspection PyUnusedLocal
@app.exception(RequestTimeout)
def timeout(request: Request, exception: RequestTimeout):
error = {'error': 'timeout', 'message': 'The request timed out.'}
return json(error, exception.status_code)
# noinspection PyUnusedLocal
@app.exception(Exception)
def on_exception(request: Request, exception: Exception):
error = {'error': exception.__class__.__name__, 'message': str(exception)}
return json(error, getattr(exception, 'status_code', 500))
# noinspection PyUnusedLocal,PyProtectedMember
@app.route('/healthcheck')
def healthcheck(request: Request):
return json({
'caches': {
'metadata': _metadata.cache_info()._asdict(),
'body': _body.cache_info()._asdict(),
'search': _search.cache_info()._asdict(),
}
})
| from sanic.exceptions import RequestTimeout
from sanic.request import Request
from sanic.response import json
from gutenberg_http import app
from gutenberg_http.errors import InvalidUsage
from gutenberg_http.logic import body as _body
from gutenberg_http.logic import metadata as _metadata
from gutenberg_http.logic import search as _search
@app.route('/texts/<text_id:int>')
def metadata(request: Request, text_id: int):
include = _metadata(text_id, request.args.get('include'))
return json({'text_id': text_id, 'metadata': include})
# noinspection PyUnusedLocal
@app.route('/texts/<text_id:int>/body')
def body(request: Request, text_id: int):
fulltext = _body(text_id)
return json({'text_id': text_id, 'body': fulltext})
# noinspection PyUnusedLocal
@app.route('/search/<query>')
def search(request: Request, query: str):
results = _search(query, request.args.get('include'))
return json({'texts': results})
# noinspection PyUnusedLocal
@app.exception(InvalidUsage)
def bad_request(request: Request, exception: InvalidUsage):
error = {'error': 'invalid_usage', 'message': exception.message}
return json(error, exception.status_code)
# noinspection PyUnusedLocal
@app.exception(RequestTimeout)
def timeout(request: Request, exception: RequestTimeout):
error = {'error': 'timeout', 'message': 'The request timed out.'}
return json(error, exception.status_code)
# noinspection PyUnusedLocal
@app.exception(Exception)
def on_exception(request: Request, exception: Exception):
error = {'error': exception.__class__.__name__, 'message': str(exception)}
return json(error, getattr(exception, 'status_code', 500))
| Python | 0.000002 |
c33ffffb0f85e7db96f9856640ad0efdd9dc145f | Wrap all exceptions in JSON | gutenberg_http/views.py | gutenberg_http/views.py | from sanic.exceptions import RequestTimeout
from sanic.request import Request
from sanic.response import json
from gutenberg_http import app
from gutenberg_http.errors import InvalidUsage
from gutenberg_http.logic import body as _body
from gutenberg_http.logic import metadata as _metadata
from gutenberg_http.logic import search as _search
@app.route('/texts/<text_id:int>')
def metadata(request: Request, text_id: int):
include = _metadata(text_id, request.args.get('include'))
return json({'text_id': text_id, 'metadata': include})
# noinspection PyUnusedLocal
@app.route('/texts/<text_id:int>/body')
def body(request: Request, text_id: int):
fulltext = _body(text_id)
return json({'text_id': text_id, 'body': fulltext})
# noinspection PyUnusedLocal
@app.route('/search/<query>')
def search(request: Request, query: str):
results = _search(query, request.args.get('include'))
return json({'texts': results})
# noinspection PyUnusedLocal
@app.exception(InvalidUsage)
def bad_request(request: Request, exception: InvalidUsage):
error = {'error': 'invalid_usage', 'message': exception.message}
return json(error, exception.status_code)
# noinspection PyUnusedLocal
@app.exception(RequestTimeout)
def timeout(request: Request, exception: RequestTimeout):
error = {'error': 'timeout', 'message': 'The request timed out.'}
return json(error, exception.status_code)
# noinspection PyUnusedLocal
@app.exception(Exception)
def on_exception(request: Request, exception: Exception):
error = {'error': exception.__class__.__name__, 'message': str(exception)}
return json(error, getattr(exception, 'status_code', 500))
| from sanic.exceptions import RequestTimeout
from sanic.request import Request
from sanic.response import json
from gutenberg_http import app
from gutenberg_http.errors import InvalidUsage
from gutenberg_http.logic import body as _body
from gutenberg_http.logic import metadata as _metadata
from gutenberg_http.logic import search as _search
@app.route('/texts/<text_id:int>')
def metadata(request: Request, text_id: int):
include = _metadata(text_id, request.args.get('include'))
return json({'text_id': text_id, 'metadata': include})
# noinspection PyUnusedLocal
@app.route('/texts/<text_id:int>/body')
def body(request: Request, text_id: int):
fulltext = _body(text_id)
return json({'text_id': text_id, 'body': fulltext})
# noinspection PyUnusedLocal
@app.route('/search/<query>')
def search(request: Request, query: str):
results = _search(query, request.args.get('include'))
return json({'texts': results})
# noinspection PyUnusedLocal
@app.exception(InvalidUsage)
def bad_request(request: Request, exception: InvalidUsage):
error = {'error': 'invalid_usage', 'message': exception.message}
return json(error, exception.status_code)
# noinspection PyUnusedLocal
@app.exception(RequestTimeout)
def timeout(request: Request, exception: RequestTimeout):
error = {'error': 'timeout', 'message': 'The request timed out.'}
return json(error, exception.status_code)
| Python | 0.999992 |
39f0e8ed68bea6318bcb58801b5368428582f549 | remove SF checks from user api endpoint | api/views.py | api/views.py | from django.core.management import call_command
from django.utils.six import StringIO
from rest_framework import viewsets
from salesforce.models import Adopter
from salesforce.functions import check_if_faculty_pending
from social.apps.django_app.default.models import \
DjangoStorage as SocialAuthStorage
from wagtail.wagtailimages.models import Image
from .serializers import AdopterSerializer, ImageSerializer, UserSerializer
class AdopterViewSet(viewsets.ModelViewSet):
queryset = Adopter.objects.all()
serializer_class = AdopterSerializer
class ImageViewSet(viewsets.ModelViewSet):
queryset = Image.objects.all()
serializer_class = ImageSerializer
class UserView(viewsets.ModelViewSet):
serializer_class = UserSerializer
def get_queryset(self):
user = self.request.user
return [user]
| from django.core.management import call_command
from django.utils.six import StringIO
from rest_framework import viewsets
from salesforce.models import Adopter
from salesforce.functions import check_if_faculty_pending
from social.apps.django_app.default.models import \
DjangoStorage as SocialAuthStorage
from wagtail.wagtailimages.models import Image
from .serializers import AdopterSerializer, ImageSerializer, UserSerializer
class AdopterViewSet(viewsets.ModelViewSet):
queryset = Adopter.objects.all()
serializer_class = AdopterSerializer
class ImageViewSet(viewsets.ModelViewSet):
queryset = Image.objects.all()
serializer_class = ImageSerializer
class UserView(viewsets.ModelViewSet):
serializer_class = UserSerializer
def get_queryset(self):
user = self.request.user
try:
social_auth = SocialAuthStorage.user.get_social_auth_for_user(user)
user.accounts_id = social_auth[0].uid
except:
user.accounts_id = None
try:
out = StringIO()
call_command('update_faculty_status', str(user.pk), stdout=out)
except:
pass
# check if there is a record in salesforce for this user - if so, they are pending verification
user.pending_verification = check_if_faculty_pending(user.pk)
return [user]
| Python | 0 |
d994337007eb9cfe41edef591cbd30765660a822 | Prepare for next development iteration | yarn_api_client/__init__.py | yarn_api_client/__init__.py | # -*- coding: utf-8 -*-
__version__ = '0.3.8.dev'
__all__ = ['ApplicationMaster', 'HistoryServer', 'NodeManager', 'ResourceManager']
from .application_master import ApplicationMaster
from .history_server import HistoryServer
from .node_manager import NodeManager
from .resource_manager import ResourceManager
| # -*- coding: utf-8 -*-
__version__ = '0.3.7'
__all__ = ['ApplicationMaster', 'HistoryServer', 'NodeManager', 'ResourceManager']
from .application_master import ApplicationMaster
from .history_server import HistoryServer
from .node_manager import NodeManager
from .resource_manager import ResourceManager
| Python | 0 |
18e3746969f9a2a4c10553cd7c59793fafd955aa | uniform multi-website public users handling | addons/website_sale/models/res_partner.py | addons/website_sale/models/res_partner.py | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
from odoo.http import request
class ResPartner(models.Model):
_inherit = 'res.partner'
last_website_so_id = fields.Many2one('sale.order', compute='_compute_last_website_so_id', string='Last Online Sales Order')
@api.multi
def _compute_last_website_so_id(self):
SaleOrder = self.env['sale.order']
for partner in self:
is_public = any([u._is_public()
for u in partner.with_context(active_test=False).user_ids])
if request and hasattr(request, 'website') and not is_public:
partner.last_website_so_id = SaleOrder.search([
('partner_id', '=', partner.id),
('team_id.team_type', '=', 'website'),
('website_id', '=', request.website.id),
('state', '=', 'draft'),
], order='write_date desc', limit=1)
else:
partner.last_website_so_id = SaleOrder # Not in a website context or public User
| # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
from odoo.http import request
class ResPartner(models.Model):
_inherit = 'res.partner'
last_website_so_id = fields.Many2one('sale.order', compute='_compute_last_website_so_id', string='Last Online Sales Order')
@api.multi
def _compute_last_website_so_id(self):
SaleOrder = self.env['sale.order']
for partner in self:
if request and hasattr(request, 'website') and not any([u._is_public() for u in partner.user_ids]):
partner.last_website_so_id = SaleOrder.search([
('partner_id', '=', partner.id),
('team_id.team_type', '=', 'website'),
('website_id', '=', request.website.id),
('state', '=', 'draft'),
], order='write_date desc', limit=1)
else:
partner.last_website_so_id = SaleOrder # Not in a website context or public User
| Python | 0.999999 |
e3ff630917d6bb0f744330abace7c41d2b48ba8f | Add "pip_install" task | fabfile.py | fabfile.py | from fabric.api import env, task, local, cd, run, sudo, put
from tempfile import NamedTemporaryFile
env.use_ssh_config = True
env.hosts = ['skylines@skylines']
APP_DIR = '/home/skylines'
SRC_DIR = '%s/src' % APP_DIR
@task
def deploy(branch='master', force=False):
push(branch, force)
restart()
@task
def push(branch='master', force=False):
cmd = 'git push %s:%s %s:master' % (env.host_string, SRC_DIR, branch)
if force:
cmd += ' --force'
local(cmd)
@task
def restart():
with cd(SRC_DIR):
run('git reset --hard')
# compile i18n .mo files
manage('babel compile')
# generate JS/CSS assets
manage('assets build')
# do database migrations
manage('migrate upgrade')
# restart services
restart_service('skylines-api')
restart_service('skylines')
restart_service('mapserver')
restart_service('tracking')
restart_service('celery')
restart_service('mapproxy')
@task
def restart_service(service):
# Using the sudo() command somehow always provokes a password prompt,
# even if NOPASSWD is specified in the sudoers file...
run('sudo supervisorctl restart %s' % service)
@task
def manage(cmd, user=None):
with cd(SRC_DIR):
if user:
sudo('./manage.py %s' % cmd, user=user)
else:
run('./manage.py %s' % cmd)
@task
def update_mapproxy():
with NamedTemporaryFile() as f:
content = open('mapserver/mapproxy/mapproxy.yaml').read()
content = content.replace(
'base_dir: \'/tmp/cache_data\'',
'base_dir: \'%s/cache/mapproxy\'' % APP_DIR,
)
content = content.replace(
'lock_dir: \'/tmp/cache_data/tile_locks\'',
'lock_dir: \'%s/cache/mapproxy/tile_locks\'' % APP_DIR,
)
f.write(content)
f.flush()
put(f.name, '%s/config/mapproxy.yaml' % APP_DIR)
@task
def pip_install():
with cd(SRC_DIR):
run('git reset --hard')
run('pip install -e .')
@task
def clean_mapproxy_cache():
with cd('/home/skylines/cache/mapproxy'):
run('rm -rv *')
| from fabric.api import env, task, local, cd, run, sudo, put
from tempfile import NamedTemporaryFile
env.use_ssh_config = True
env.hosts = ['skylines@skylines']
APP_DIR = '/home/skylines'
SRC_DIR = '%s/src' % APP_DIR
@task
def deploy(branch='master', force=False):
push(branch, force)
restart()
@task
def push(branch='master', force=False):
cmd = 'git push %s:%s %s:master' % (env.host_string, SRC_DIR, branch)
if force:
cmd += ' --force'
local(cmd)
@task
def restart():
with cd(SRC_DIR):
run('git reset --hard')
# compile i18n .mo files
manage('babel compile')
# generate JS/CSS assets
manage('assets build')
# do database migrations
manage('migrate upgrade')
# restart services
restart_service('skylines-api')
restart_service('skylines')
restart_service('mapserver')
restart_service('tracking')
restart_service('celery')
restart_service('mapproxy')
@task
def restart_service(service):
# Using the sudo() command somehow always provokes a password prompt,
# even if NOPASSWD is specified in the sudoers file...
run('sudo supervisorctl restart %s' % service)
@task
def manage(cmd, user=None):
with cd(SRC_DIR):
if user:
sudo('./manage.py %s' % cmd, user=user)
else:
run('./manage.py %s' % cmd)
@task
def update_mapproxy():
with NamedTemporaryFile() as f:
content = open('mapserver/mapproxy/mapproxy.yaml').read()
content = content.replace(
'base_dir: \'/tmp/cache_data\'',
'base_dir: \'%s/cache/mapproxy\'' % APP_DIR,
)
content = content.replace(
'lock_dir: \'/tmp/cache_data/tile_locks\'',
'lock_dir: \'%s/cache/mapproxy/tile_locks\'' % APP_DIR,
)
f.write(content)
f.flush()
put(f.name, '%s/config/mapproxy.yaml' % APP_DIR)
@task
def clean_mapproxy_cache():
with cd('/home/skylines/cache/mapproxy'):
run('rm -rv *')
| Python | 0.000011 |
2d6e3beb7ea4fb5448cba6a123feb73fe25da2a1 | add kobocat deployment details | fabfile.py | fabfile.py | import os
import sys
from fabric.api import env, run, cd
DEFAULTS = {
'home': '/home/wsgi/srv/',
'repo_name': 'formhub',
}
DEPLOYMENTS = {
'dev': {
'home': '/home/ubuntu/src/',
'host_string': 'ubuntu@dev.ona.io',
'project': 'ona',
'key_filename': os.path.expanduser('~/.ssh/ona.pem'),
'virtualenv': '/home/ubuntu/.virtualenvs/ona',
'celeryd': '/etc/init.d/celeryd-ona'
},
'prod': {
'home': '/home/ubuntu/src/',
'host_string': 'ubuntu@ona.io',
'project': 'ona',
'key_filename': os.path.expanduser('~/.ssh/ona.pem'),
'virtualenv': '/home/ubuntu/.virtualenvs/ona',
'celeryd': '/etc/init.d/celeryd-ona'
},
'kobocat': {
'home': '/home/ubuntu/src/',
'host_string':
'ubuntu@ec2-54-200-151-185.us-west-2.compute.amazonaws.com',
'project': 'kobocat',
'key_filename': os.path.expanduser('~/.ssh/kobo01.pem'),
'virtualenv': '/home/ubuntu/.virtualenvs/kobocat',
'celeryd': '/etc/init.d/celeryd'
},
}
def run_in_virtualenv(command):
d = {
'activate': os.path.join(
env.virtualenv, 'bin', 'activate'),
'command': command,
}
run('source %(activate)s && %(command)s' % d)
def check_key_filename(deployment_name):
if 'key_filename' in DEPLOYMENTS[deployment_name] and \
not os.path.exists(DEPLOYMENTS[deployment_name]['key_filename']):
print "Cannot find required permissions file: %s" % \
DEPLOYMENTS[deployment_name]['key_filename']
return False
return True
def setup_env(deployment_name):
env.update(DEFAULTS)
env.update(DEPLOYMENTS[deployment_name])
if not check_key_filename(deployment_name):
sys.exit(1)
env.code_src = os.path.join(env.home, env.project)
env.pip_requirements_file = os.path.join(env.code_src, 'requirements.pip')
def deploy(deployment_name, branch='master'):
setup_env(deployment_name)
with cd(env.code_src):
run("git fetch origin")
run("git checkout origin/%s" % branch)
run("git submodule init")
run("git submodule update")
run('find . -name "*.pyc" -exec rm -rf {} \;')
# numpy pip install from requirements file fails
run_in_virtualenv("pip install numpy")
run_in_virtualenv("pip install -r %s" % env.pip_requirements_file)
with cd(env.code_src):
run_in_virtualenv(
"python manage.py syncdb --settings=formhub.local_settings")
run_in_virtualenv(
"python manage.py migrate --settings=formhub.local_settings")
run_in_virtualenv(
"python manage.py collectstatic --settings=formhub.local_settings "
"--noinput")
run("sudo %s restart" % env.celeryd)
#run("sudo /etc/init.d/celerybeat-ona restart")
run("sudo /usr/local/bin/uwsgi --reload /var/run/ona.pid")
| import os
import sys
from fabric.api import env, run, cd
DEFAULTS = {
'home': '/home/wsgi/srv/',
'repo_name': 'formhub',
}
DEPLOYMENTS = {
'dev': {
'home': '/home/ubuntu/src/',
'host_string': 'ubuntu@dev.ona.io',
'project': 'ona',
'key_filename': os.path.expanduser('~/.ssh/ona.pem'),
'virtualenv': '/home/ubuntu/.virtualenvs/ona'
},
'prod': {
'home': '/home/ubuntu/src/',
'host_string': 'ubuntu@ona.io',
'project': 'ona',
'key_filename': os.path.expanduser('~/.ssh/ona.pem'),
'virtualenv': '/home/ubuntu/.virtualenvs/ona'
},
}
def run_in_virtualenv(command):
d = {
'activate': os.path.join(
env.virtualenv, 'bin', 'activate'),
'command': command,
}
run('source %(activate)s && %(command)s' % d)
def check_key_filename(deployment_name):
if 'key_filename' in DEPLOYMENTS[deployment_name] and \
not os.path.exists(DEPLOYMENTS[deployment_name]['key_filename']):
print "Cannot find required permissions file: %s" % \
DEPLOYMENTS[deployment_name]['key_filename']
return False
return True
def setup_env(deployment_name):
env.update(DEFAULTS)
env.update(DEPLOYMENTS[deployment_name])
if not check_key_filename(deployment_name):
sys.exit(1)
env.code_src = os.path.join(env.home, env.project)
env.pip_requirements_file = os.path.join(env.code_src, 'requirements.pip')
def deploy(deployment_name, branch='master'):
setup_env(deployment_name)
with cd(env.code_src):
run("git fetch origin")
run("git checkout origin/%s" % branch)
run("git submodule init")
run("git submodule update")
run('find . -name "*.pyc" -exec rm -rf {} \;')
# numpy pip install from requirements file fails
run_in_virtualenv("pip install numpy")
run_in_virtualenv("pip install -r %s" % env.pip_requirements_file)
with cd(env.code_src):
run_in_virtualenv(
"python manage.py syncdb --settings=formhub.local_settings")
run_in_virtualenv(
"python manage.py migrate --settings=formhub.local_settings")
run_in_virtualenv(
"python manage.py collectstatic --settings=formhub.local_settings "
"--noinput")
run("sudo /etc/init.d/celeryd-ona restart")
#run("sudo /etc/init.d/celerybeat-ona restart")
run("sudo /usr/local/bin/uwsgi --reload /var/run/ona.pid")
| Python | 0 |
6fae23c1d442880256ed2d4298844a50d6a7968e | Make sure "fab publish" cleans the dist folder | fabfile.py | fabfile.py | import fabric.api as fab
def generate_type_hierarchy():
"""
Generate a document containing the available variable types.
"""
fab.local('./env/bin/python -m puresnmp.types > docs/typetree.rst')
@fab.task
def doc():
generate_type_hierarchy()
fab.local('sphinx-apidoc '
'-o docs/developer_guide/api '
'-f '
'-e '
'puresnmp '
'puresnmp/test')
with fab.lcd('docs'):
fab.local('make html')
@fab.task
def publish():
fab.local('rm -rf dist')
fab.local('python3 setup.py bdist_wheel --universal')
fab.local('python3 setup.py sdist')
fab.local('twine upload dist/*')
| import fabric.api as fab
def generate_type_hierarchy():
"""
Generate a document containing the available variable types.
"""
fab.local('./env/bin/python -m puresnmp.types > docs/typetree.rst')
@fab.task
def doc():
generate_type_hierarchy()
fab.local('sphinx-apidoc '
'-o docs/developer_guide/api '
'-f '
'-e '
'puresnmp '
'puresnmp/test')
with fab.lcd('docs'):
fab.local('make html')
@fab.task
def publish():
fab.local('python3 setup.py bdist_wheel --universal')
fab.local('python3 setup.py sdist')
fab.local('twine upload dist/*')
| Python | 0 |
24c7c61b06ef8f1396d167bc0621d58d7a233fde | Generate local webserver using SimpleHTTPServer | fabfile.py | fabfile.py | from fabric.api import local
import urllib
def clean():
local('rm -rf output')
def update_cv():
"""Get current version of my CV."""
pdf_remote = "https://github.com/dplarson/CV/raw/master/David_Larson.pdf"
pdf_local = "output/David_Larson.pdf"
try:
urllib.urlretrieve(pdf_remote, pdf_local)
except:
print "Error: unable to download newest version of CV."
def gen():
"""Generate website."""
clean()
local('pelican content -s settings.py')
update_cv()
def serve():
"""Server website locally."""
gen()
local('cd output && python -m SimpleHTTPServer')
def push():
"""Push website to server."""
gen()
local('rsync -rav output/ dplarson@ieng6.ucsd.edu:/home/linux/ieng6/oce/60/dplarson/public_html/')
| from fabric.api import local
import urllib
def clean():
local('rm -rf output')
def update_cv():
"""Get current version of my CV."""
pdf_remote = "https://github.com/dplarson/CV/raw/master/David_Larson.pdf"
pdf_local = "output/David_Larson.pdf"
try:
urllib.urlretrieve(pdf_remote, pdf_local)
except:
print "Error: unable to download newest version of CV."
def gen():
clean()
local('pelican content -s settings.py')
update_cv()
def push():
gen()
local('rsync -rav output/ dplarson@ieng6.ucsd.edu:/home/linux/ieng6/oce/60/dplarson/public_html/')
| Python | 0.999891 |
d94af25800a1e0b6d918659ef08d90f596c5a608 | change uwsgi worker reload option to graceful reload | fabfile.py | fabfile.py | # -*- coding: utf-8 -*-
import os
from fabric.api import local, run, cd, prefix, env, sudo, settings, shell_env
from deploy import server
env.host_string = '{user}@{host}:{port}'.format(
user=env.pycon_user,
host=env.pycon_host,
port=env.pycon_port
)
def deploy(target='dev', sha1=None):
if sha1 is None:
# get current working git sha1
sha1 = local('git rev-parse HEAD', capture=True)
# server code reset to current working sha1
home_dir = '/home/pyconkr/{target}.pycon.kr/pyconkr-2016'.format(target=target)
if target == 'dev':
python_env = '/home/pyconkr/.pyenv/versions/pyconkr-2016-dev'
else:
python_env = '/home/pyconkr/.pyenv/versions/pyconkr-2016'
with settings(cd(home_dir), shell_env(DJANGO_SETTINGS_MODULE='pyconkr.settings_prod')):
sudo('git fetch --all -p', user='pyconkr')
sudo('git reset --hard ' + sha1, user='pyconkr')
sudo('bower install', user='pyconkr')
sudo('%s/bin/pip install -r requirements.txt' % python_env, user='pyconkr')
sudo('%s/bin/python manage.py compilemessages' % python_env, user='pyconkr')
sudo('%s/bin/python manage.py migrate' % python_env, user='pyconkr')
sudo('%s/bin/python manage.py collectstatic --noinput' % python_env, user='pyconkr')
# worker reload
run('echo r > /var/run/pyconkr-2016-%s.fifo' % target)
def flatpages_mig(direction='www'):
dev_env = '/home/pyconkr/.pyenv/versions/pyconkr-2016-dev/bin/python'
www_env = '/home/pyconkr/.pyenv/versions/pyconkr-2016/bin/python'
from_env, to_env = (dev_env, www_env) if direction=='www' else (www_env, dev_env)
dev_dir = '/home/pyconkr/dev.pycon.kr/pyconkr-2016'
www_dir = '/home/pyconkr/www.pycon.kr/pyconkr-2016'
from_dir, to_dir = (dev_dir, www_dir) if direction=='www' else (www_dir, dev_dir)
with settings(cd(from_dir),
shell_env(DJANGO_SETTINGS_MODULE='pyconkr.settings_prod')
):
sudo('{python} manage.py dumpdata --indent 2 flatpages -o {fixture_to}'.format(
fixture_to=os.path.join(to_dir, 'pyconkr', 'fixtures', 'flatpages.json'),
python=from_env))
with settings(cd(to_dir),
shell_env(DJANGO_SETTINGS_MODULE='pyconkr.settings_prod')
):
sudo('{python} manage.py loaddata flatpages'.format(
python=to_env))
| # -*- coding: utf-8 -*-
import os
from fabric.api import local, run, cd, prefix, env, sudo, settings, shell_env
from deploy import server
env.host_string = '{user}@{host}:{port}'.format(
user=env.pycon_user,
host=env.pycon_host,
port=env.pycon_port
)
def deploy(target='dev', sha1=None):
if sha1 is None:
# get current working git sha1
sha1 = local('git rev-parse HEAD', capture=True)
# server code reset to current working sha1
home_dir = '/home/pyconkr/{target}.pycon.kr/pyconkr-2016'.format(target=target)
if target == 'dev':
python_env = '/home/pyconkr/.pyenv/versions/pyconkr-2016-dev'
else:
python_env = '/home/pyconkr/.pyenv/versions/pyconkr-2016'
with settings(cd(home_dir), shell_env(DJANGO_SETTINGS_MODULE='pyconkr.settings_prod')):
sudo('git fetch --all -p', user='pyconkr')
sudo('git reset --hard ' + sha1, user='pyconkr')
sudo('bower install', user='pyconkr')
sudo('%s/bin/pip install -r requirements.txt' % python_env, user='pyconkr')
sudo('%s/bin/python manage.py compilemessages' % python_env, user='pyconkr')
sudo('%s/bin/python manage.py migrate' % python_env, user='pyconkr')
sudo('%s/bin/python manage.py collectstatic --noinput' % python_env, user='pyconkr')
# worker reload
if target == 'dev':
sudo('restart pyconkr-2016/uwsgi-%s' % target)
else:
sudo('restart pyconkr-2016/uwsgi')
def flatpages_mig(direction='www'):
dev_env = '/home/pyconkr/.pyenv/versions/pyconkr-2016-dev/bin/python'
www_env = '/home/pyconkr/.pyenv/versions/pyconkr-2016/bin/python'
from_env, to_env = (dev_env, www_env) if direction=='www' else (www_env, dev_env)
dev_dir = '/home/pyconkr/dev.pycon.kr/pyconkr-2016'
www_dir = '/home/pyconkr/www.pycon.kr/pyconkr-2016'
from_dir, to_dir = (dev_dir, www_dir) if direction=='www' else (www_dir, dev_dir)
with settings(cd(from_dir),
shell_env(DJANGO_SETTINGS_MODULE='pyconkr.settings_prod')
):
sudo('{python} manage.py dumpdata --indent 2 flatpages -o {fixture_to}'.format(
fixture_to=os.path.join(to_dir, 'pyconkr', 'fixtures', 'flatpages.json'),
python=from_env))
with settings(cd(to_dir),
shell_env(DJANGO_SETTINGS_MODULE='pyconkr.settings_prod')
):
sudo('{python} manage.py loaddata flatpages'.format(
python=to_env))
| Python | 0 |
fcdb3de7465467e4ea4a50faec1c39ec9a26e15c | set main function | async_crawler.py | async_crawler.py | #!/usr/bin/env python
# python 3.5 async web crawler.
# https://github.com/mehmetkose/python3.5-async-crawler
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2016 Mehmet Kose mehmet@linux.com
import aiohttp
import asyncio
from urllib.parse import urljoin, urldefrag
root_url = "http://python.org"
crawled_urls, url_hub = [], [root_url, "%s/sitemap.xml" % (root_url)]
async def get_body(url):
response = await aiohttp.request('GET', url)
return await response.read()
def remove_fragment(url):
pure_url, frag = urldefrag(url)
return pure_url
def get_links(html):
new_urls = [link.split('"')[0] for link in str(html).replace("'",'"').split('href="')[1:]]
return [urljoin(root_url, remove_fragment(new_url)) for new_url in new_urls]
async def main():
for to_crawl in url_hub:
raw_html = await get_body(to_crawl)
for link in get_links(raw_html):
if root_url in link and not link in crawled_urls:
url_hub.append(link)
url_hub.remove(to_crawl)
crawled_urls.append(to_crawl)
print("url hub: %s | crawled: %s |url : %s" % (len(url_hub), len(crawled_urls), to_crawl))
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main()) | #!/usr/bin/env python
# python 3.5 async web crawler.
# https://github.com/mehmetkose/python3.5-async-crawler
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2016 Mehmet Kose mehmet@linux.com
import aiohttp
import asyncio
from urllib.parse import urljoin, urldefrag
root_url = "http://python.org"
crawled_urls = []
url_hub = [root_url, "%s/robots.txt" % (root_url), "%s/sitemap.xml" % (root_url)]
async def get_body(url):
response = await aiohttp.request('GET', url)
return await response.read()
def remove_fragment(url):
pure_url, frag = urldefrag(url)
return pure_url
def get_links(html):
new_urls = [link.split('"')[0] for link in str(html).replace("'",'"').split('href="')[1:]]
return [urljoin(root_url, remove_fragment(new_url)) for new_url in new_urls]
if __name__ == '__main__':
loop = asyncio.get_event_loop()
client = aiohttp.ClientSession(loop=loop)
for to_crawl in url_hub:
raw_html = loop.run_until_complete(get_body(to_crawl))
for link in get_links(raw_html):
if root_url in link and not link in crawled_urls:
url_hub.append(link)
url_hub.remove(to_crawl)
crawled_urls.append(to_crawl)
print("url hub: %s | crawled: %s |url : %s" % (len(url_hub), len(crawled_urls), to_crawl))
client.close()
| Python | 0.000019 |
604d610d7d7e5d883c643daeba09f5b65db943ce | Make aws template use non-interactive apt-get (#1098) | parsl/providers/aws/template.py | parsl/providers/aws/template.py | template_string = """#!/bin/bash
#sed -i 's/us-east-2\.ec2\.//g' /etc/apt/sources.list
cd ~
export DEBIAN_FRONTEND=noninteractive
apt-get update -y
apt-get install -y python3 python3-pip libffi-dev g++ libssl-dev
pip3 install numpy scipy parsl
$worker_init
$user_script
# Shutdown the instance as soon as the worker scripts exits
# or times out to avoid EC2 costs.
if ! $linger
then
halt
fi
"""
| template_string = """#!/bin/bash
#sed -i 's/us-east-2\.ec2\.//g' /etc/apt/sources.list
cd ~
apt-get update -y
apt-get install -y python3 python3-pip libffi-dev g++ libssl-dev
pip3 install numpy scipy parsl
$worker_init
$user_script
# Shutdown the instance as soon as the worker scripts exits
# or times out to avoid EC2 costs.
if ! $linger
then
halt
fi
"""
| Python | 0 |
406987b860987894e297d5172a2a3eb0f4e082f8 | Fix a bug in the data resizer. | core/data/DataResizer.py | core/data/DataResizer.py | """
DataResizer
:Authors:
Berend Klein Haneveld
"""
from vtk import vtkImageResample
from vtk import vtkVersion
VTK_MAJOR_VERSION = vtkVersion.GetVTKMajorVersion()
class DataResizer(object):
"""
DataResizer is a tool that will resize a given image dataset.
You can specify a certain magnification factor or you can use a maximum
number of voxels that it should contain. If the image is larger than the
maximum amount of voxels, it will resize the volume to just below the
specified maximum.
It will never upscale a volume! So factor value that are higher than 1.0
will not have any result.
"""
def __init__(self):
super(DataResizer, self).__init__()
def ResizeData(self, imageData, factor=1.0, maximum=0):
self.imageResampler = vtkImageResample()
self.imageResampler.SetInterpolationModeToLinear()
if VTK_MAJOR_VERSION <= 5:
self.imageResampler.SetInput(imageData)
else:
self.imageResampler.SetInputData(imageData)
# If a maximum has been set: calculate the right factor
if maximum > 0:
factor = self.calculateFactor(imageData.GetDimensions(), maximum)
# Make sure that we are never upscaling the data
if factor > 1.0:
factor = 1.0
# The factor is now only in amount of pixels. This has to be translated
# to each of the dimensions: factor^(1/3)
axisMagnificationFactor = pow(factor, 1.0/3.0)
self.resampledImageData = None
if factor != 1.0:
self.imageResampler.SetAxisMagnificationFactor(0, axisMagnificationFactor)
self.imageResampler.SetAxisMagnificationFactor(1, axisMagnificationFactor)
self.imageResampler.SetAxisMagnificationFactor(2, axisMagnificationFactor)
self.imageResampler.Update()
self.resampledImageData = self.imageResampler.GetOutput()
else:
self.resampledImageData = imageData
return self.resampledImageData
# Private methods
def calculateFactor(self, dimensions, maximum):
voxels = dimensions[0] * dimensions[1] * dimensions[2]
factor = float(maximum) / float(voxels)
return factor
| """
DataResizer
:Authors:
Berend Klein Haneveld
"""
from vtk import vtkImageResample
from vtk import vtkVersion
VTK_MAJOR_VERSION = vtkVersion.GetVTKMajorVersion()
class DataResizer(object):
"""
DataResizer is a tool that will resize a given image dataset.
You can specify a certain magnification factor or you can use a maximum
number of voxels that it should contain. If the image is larger than the
maximum amount of voxels, it will resize the volume to just below the
specified maximum.
It will never upscale a volume! So factor value that are higher than 1.0
will not have any result.
"""
def __init__(self):
super(DataResizer, self).__init__()
def ResizeData(self, imageData, factor=1.0, maximum=0):
self.imageResampler = vtkImageResample()
self.imageResampler.SetInterpolationModeToLinear()
if VTK_MAJOR_VERSION <= 5:
self.imageResampler.SetInput(imageData)
else:
self.imageResampler.SetInputData(imageData)
# If a maximum has been set: calculate the right factor
if maximum > 0:
factor = self.calculateFactor(imageData.GetDimensions(), maximum)
# Make sure that we are never upscaling the data
if factor > 1.0:
factor = 1.0
self.resampledImageData = None
if factor != 1.0:
self.imageResampler.SetAxisMagnificationFactor(0, factor)
self.imageResampler.SetAxisMagnificationFactor(1, factor)
self.imageResampler.SetAxisMagnificationFactor(2, factor)
self.imageResampler.Update()
self.resampledImageData = self.imageResampler.GetOutput()
else:
self.resampledImageData = imageData
return self.resampledImageData
# Private methods
def calculateFactor(self, dimensions, maximum):
voxels = dimensions[0] * dimensions[1] * dimensions[2]
factor = float(maximum) / float(voxels)
return factor
| Python | 0 |
31b0b97590ce496ba22a39c396ff868c6f511637 | install pre-commit | dacsspace/client.py | dacsspace/client.py | #!/usr/bin/env python3
from configparser import ConfigParser
from asnake.aspace import ASpace
class ArchivesSpaceClient:
"""Handles communication with ArchivesSpace."""
def __init__(self):
config = ConfigParser()
config.read("local_settings.cfg")
self.aspace = ASpace(baseurl=config.get('ArchivesSpace', 'baseURL'),
username=config.get('ArchivesSpace', 'user'),
password=config.get('ArchivesSpace', 'password'))
self.repo = self.aspace.repositories(config.get('ArchivesSpace', 'repository'))
def get_resources(self, published_only):
"""Returns data about resource records from AS.
Args:
published_only (boolean): Fetch only published records from AS
Returns:
resources (list): Full JSON of AS resource records
"""
if published_only is True:
for resource in self.repo.search.with_params(q='publish:true AND primary_type:resource'):
resource_json = resource.json()
return resource_json
else:
for resource in self.repo.search.with_params(q='primary_type:resource'):
resource_json = resource.json()
return resource_json
# ArchivesSpaceClient().get_resources(published_only)
| #!/usr/bin/env python3
import argparse
from configparser import ConfigParser
from asnake.aspace import ASpace
#published_only = False
class ArchivesSpaceClient:
"""Handles communication with ArchivesSpace."""
def __init__(self):
config = ConfigParser()
config.read("local_settings.cfg")
self.aspace = ASpace(baseurl=config.get('ArchivesSpace', 'baseURL'),
username=config.get('ArchivesSpace', 'user'),
password=config.get('ArchivesSpace', 'password'))
self.repo = self.aspace.repositories(config.get('ArchivesSpace', 'repository'))
def get_resources(self, published_only):
"""Returns data about resource records from AS.
Args:
published_only (boolean): Fetch only published records from AS
Returns:
resources (list): Full JSON of AS resource records
"""
if published_only is True:
for resource in self.repo.search.with_params(q='publish:true AND primary_type:resource'):
resource_json = resource.json()
return resource_json
else:
for resource in self.repo.search.with_params(q='primary_type:resource'):
resource_json = resource.json()
return resource_json
#return resource.publish
#return resource_json
#return resource.publish
#build in tests
#ArchivesSpaceClient().get_resources(published_only)
| Python | 0 |
7cfc16d016906e1437580acff42e503d3b2fa188 | Change %s to .format | src/pip/_internal/distributions/source/legacy.py | src/pip/_internal/distributions/source/legacy.py | # The following comment should be removed at some point in the future.
# mypy: disallow-untyped-defs=False
import logging
from pip._internal.build_env import BuildEnvironment
from pip._internal.distributions.base import AbstractDistribution
from pip._internal.exceptions import InstallationError
from pip._internal.utils.subprocess import runner_with_spinner_message
logger = logging.getLogger(__name__)
class SourceDistribution(AbstractDistribution):
"""Represents a source distribution.
The preparation step for these needs metadata for the packages to be
generated, either using PEP 517 or using the legacy `setup.py egg_info`.
NOTE from @pradyunsg (14 June 2019)
I expect SourceDistribution class will need to be split into
`legacy_source` (setup.py based) and `source` (PEP 517 based) when we start
bringing logic for preparation out of InstallRequirement into this class.
"""
def get_pkg_resources_distribution(self):
return self.req.get_dist()
def prepare_distribution_metadata(self, finder, build_isolation):
# Prepare for building. We need to:
# 1. Load pyproject.toml (if it exists)
# 2. Set up the build environment
self.req.load_pyproject_toml()
should_isolate = self.req.use_pep517 and build_isolation
if should_isolate:
self._setup_isolation(finder)
self.req.prepare_metadata()
self.req.assert_source_matches_version()
def _setup_isolation(self, finder):
def _raise_conflicts(conflicting_with, conflicting_reqs):
format_string = (
"Some build dependencies for {requirement} "
"conflict with {conflicting_with}: {description}."
)
error_message = format_string.format(
requirement=self.req,
conflicting_with=conflicting_with,
description=', '.join(
'{} is incompatible with {}'.format(installed, wanted)
for installed, wanted in sorted(conflicting)
)
)
raise InstallationError(error_message)
# Isolate in a BuildEnvironment and install the build-time
# requirements.
self.req.build_env = BuildEnvironment()
self.req.build_env.install_requirements(
finder, self.req.pyproject_requires, 'overlay',
"Installing build dependencies"
)
conflicting, missing = self.req.build_env.check_requirements(
self.req.requirements_to_check
)
if conflicting:
_raise_conflicts("PEP 517/518 supported requirements",
conflicting)
if missing:
logger.warning(
"Missing build requirements in pyproject.toml for %s.",
self.req,
)
logger.warning(
"The project does not specify a build backend, and "
"pip cannot fall back to setuptools without %s.",
" and ".join(map(repr, sorted(missing)))
)
# Install any extra build dependencies that the backend requests.
# This must be done in a second pass, as the pyproject.toml
# dependencies must be installed before we can call the backend.
with self.req.build_env:
runner = runner_with_spinner_message(
"Getting requirements to build wheel"
)
backend = self.req.pep517_backend
with backend.subprocess_runner(runner):
reqs = backend.get_requires_for_build_wheel()
conflicting, missing = self.req.build_env.check_requirements(reqs)
if conflicting:
_raise_conflicts("the backend dependencies", conflicting)
self.req.build_env.install_requirements(
finder, missing, 'normal',
"Installing backend dependencies"
)
| # The following comment should be removed at some point in the future.
# mypy: disallow-untyped-defs=False
import logging
from pip._internal.build_env import BuildEnvironment
from pip._internal.distributions.base import AbstractDistribution
from pip._internal.exceptions import InstallationError
from pip._internal.utils.subprocess import runner_with_spinner_message
logger = logging.getLogger(__name__)
class SourceDistribution(AbstractDistribution):
"""Represents a source distribution.
The preparation step for these needs metadata for the packages to be
generated, either using PEP 517 or using the legacy `setup.py egg_info`.
NOTE from @pradyunsg (14 June 2019)
I expect SourceDistribution class will need to be split into
`legacy_source` (setup.py based) and `source` (PEP 517 based) when we start
bringing logic for preparation out of InstallRequirement into this class.
"""
def get_pkg_resources_distribution(self):
return self.req.get_dist()
def prepare_distribution_metadata(self, finder, build_isolation):
# Prepare for building. We need to:
# 1. Load pyproject.toml (if it exists)
# 2. Set up the build environment
self.req.load_pyproject_toml()
should_isolate = self.req.use_pep517 and build_isolation
if should_isolate:
self._setup_isolation(finder)
self.req.prepare_metadata()
self.req.assert_source_matches_version()
def _setup_isolation(self, finder):
def _raise_conflicts(conflicting_with, conflicting_reqs):
format_string = (
"Some build dependencies for {requirement} "
"conflict with {conflicting_with}: {description}."
)
error_message = format_string.format(
requirement=self.req,
conflicting_with=conflicting_with,
description=', '.join(
'%s is incompatible with %s' % (installed, wanted)
for installed, wanted in sorted(conflicting)
)
)
raise InstallationError(error_message)
# Isolate in a BuildEnvironment and install the build-time
# requirements.
self.req.build_env = BuildEnvironment()
self.req.build_env.install_requirements(
finder, self.req.pyproject_requires, 'overlay',
"Installing build dependencies"
)
conflicting, missing = self.req.build_env.check_requirements(
self.req.requirements_to_check
)
if conflicting:
_raise_conflicts("PEP 517/518 supported requirements",
conflicting)
if missing:
logger.warning(
"Missing build requirements in pyproject.toml for %s.",
self.req,
)
logger.warning(
"The project does not specify a build backend, and "
"pip cannot fall back to setuptools without %s.",
" and ".join(map(repr, sorted(missing)))
)
# Install any extra build dependencies that the backend requests.
# This must be done in a second pass, as the pyproject.toml
# dependencies must be installed before we can call the backend.
with self.req.build_env:
runner = runner_with_spinner_message(
"Getting requirements to build wheel"
)
backend = self.req.pep517_backend
with backend.subprocess_runner(runner):
reqs = backend.get_requires_for_build_wheel()
conflicting, missing = self.req.build_env.check_requirements(reqs)
if conflicting:
_raise_conflicts("the backend dependencies", conflicting)
self.req.build_env.install_requirements(
finder, missing, 'normal',
"Installing backend dependencies"
)
| Python | 0.000007 |
0ca727f0ce5877ba2ca3ef74c9309c752a51fbf6 | Fix enable action on plugins | src/sentry/web/frontend/project_plugin_enable.py | src/sentry/web/frontend/project_plugin_enable.py | from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.plugins import plugins
from sentry.web.frontend.base import ProjectView
class ProjectPluginEnableView(ProjectView):
required_scope = 'project:write'
def post(self, request, organization, team, project, slug):
try:
plugin = plugins.get(slug)
except KeyError:
return self.redirect(reverse('sentry-configure-project-plugin', args=[project.organization.slug, project.slug, slug]))
if plugin.is_enabled(project):
return self.redirect(reverse('sentry-configure-project-plugin', args=[project.organization.slug, project.slug, slug]))
plugin.enable(project=project)
return self.redirect(reverse('sentry-configure-project-plugin', args=[project.organization.slug, project.slug, slug]))
| from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.plugins import plugins
from sentry.web.frontend.base import ProjectView
class ProjectPluginEnableView(ProjectView):
required_scope = 'project:write'
def post(self, request, organization, team, project, slug):
try:
plugin = plugins.get(slug)
except KeyError:
return self.redirect(reverse('sentry-configure-project-plugin', args=[project.organization.slug, project.slug, slug]))
if not plugin.is_enabled(project):
return self.redirect(reverse('sentry-configure-project-plugin', args=[project.organization.slug, project.slug, slug]))
plugin.enable(project=project)
return self.redirect(reverse('sentry-configure-project-plugin', args=[project.organization.slug, project.slug, slug]))
| Python | 0 |
0599b259ed08121160196734f7212dc7fa33149f | Remove execute_auth_api_request method | devicehive/token.py | devicehive/token.py | from devicehive.api_request import ApiRequest
class Token(object):
"""Token class."""
AUTH_HEADER_NAME = 'Authorization'
AUTH_HEADER_VALUE_PREFIX = 'Bearer '
def __init__(self, transport, auth):
self._transport = transport
self._login = auth.get('login')
self._password = auth.get('password')
self._refresh_token = auth.get('refresh_token')
self._access_token = auth.get('access_token')
def _login(self):
# TODO: implement token/login request.
# Set self._refresh_token and self._access_token after success login.
pass
def _auth(self):
api_request = ApiRequest(self._transport)
if not api_request.websocket_transport:
return
api_request.action('authenticate')
api_request.set('token', self._access_token)
api_request.execute('Authentication failure')
@property
def access_token(self):
return self._access_token
@property
def auth_header(self):
auth_header_name = self.AUTH_HEADER_NAME
auth_header_value = self.AUTH_HEADER_VALUE_PREFIX + self._access_token
return auth_header_name, auth_header_value
def refresh(self):
api_request = ApiRequest(self._transport)
api_request.method('POST')
api_request.url('token/refresh')
api_request.action('token/refresh')
api_request.set('refreshToken', self._refresh_token)
tokens = api_request.execute('Token refresh failure')
self._access_token = tokens['accessToken']
def auth(self):
if self._refresh_token:
self.refresh()
else:
self._login()
self._auth()
| from devicehive.api_request import ApiRequest
from devicehive.api_response import ApiResponseError
class Token(object):
"""Token class."""
AUTH_HEADER_NAME = 'Authorization'
AUTH_HEADER_VALUE_PREFIX = 'Bearer '
def __init__(self, transport, auth):
self._transport = transport
self._login = auth.get('login')
self._password = auth.get('password')
self._refresh_token = auth.get('refresh_token')
self._access_token = auth.get('access_token')
def _login(self):
# TODO: implement token/login request.
# Set self._refresh_token and self._access_token after success login.
pass
def _auth(self):
api_request = ApiRequest(self._transport)
if not api_request.websocket_transport:
return
api_request.action('authenticate')
api_request.set('token', self._access_token)
api_request.execute('Authentication failure')
@property
def access_token(self):
return self._access_token
@property
def auth_header(self):
auth_header_name = self.AUTH_HEADER_NAME
auth_header_value = self.AUTH_HEADER_VALUE_PREFIX + self._access_token
return auth_header_name, auth_header_value
def execute_auth_api_request(self, api_request, error_message):
api_request.header(*self.auth_header)
try:
return api_request.execute(error_message)
except ApiResponseError as api_response_error:
if api_response_error.code != 401:
raise
self.auth()
api_request.header(*self.auth_header)
return api_request.execute(error_message)
def refresh(self):
api_request = ApiRequest(self._transport)
api_request.method('POST')
api_request.url('token/refresh')
api_request.action('token/refresh')
api_request.set('refreshToken', self._refresh_token)
tokens = api_request.execute('Token refresh failure')
self._access_token = tokens['accessToken']
def auth(self):
if self._refresh_token:
self.refresh()
else:
self._login()
self._auth()
| Python | 0.000025 |
e32acfcfa14ec785a3d716f60b61cc66d6c496ea | add celery | app/tasks.py | app/tasks.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: Kun Jia
# date: 20/06/2017
# email: me@jack003.com
from celery.schedules import crontab
from celery.task import periodic_task
from celery.utils.log import get_task_logger
from flask import current_app
from pymongo import MongoClient
from app import celery
from .api import get_list, get_content
logger = get_task_logger(__name__)
"""
Example:
crontab() Execute every minute.
crontab(minute=0, hour=0) Execute daily at midnight.
crontab(minute=0, hour='*/3') Execute every three hours: midnight, 3am, 6am, 9am, noon, 3pm, 6pm, 9pm.
crontab(minute=0, hour='0,3,6,9,12,15,18,21') Same as previous.
crontab(minute='*/15') Execute every 15 minutes.
crontab(day_of_week='sunday') Execute every minute (!) at Sundays.
crontab(minute='*', hour='*', day_of_week='sun') Same as previous.
crontab(minute='*/10', hour='3,17,22', day_of_week='thu,fri') Execute every ten minutes, but only between 3-4 am, 5-6 pm, and 10-11 pm on Thursdays or Fridays.
crontab(minute=0, hour='*/2,*/3') Execute every even hour, and every hour divisible by three. This means: at every hour except: 1am, 5am, 7am, 11am, 1pm, 5pm, 7pm, 11pm
crontab(minute=0, hour='*/5') Execute hour divisible by 5. This means that it is triggered at 3pm, not 5pm (since 3pm equals the 24-hour clock value of �~@~\15�~@~], which is divisible by 5).
crontab(minute=0, hour='*/3,8-17') Execute every hour divisible by 3, and every hour during office hours (8am-5pm).
crontab(0, 0, day_of_month='2') Execute on the second day of every month.
crontab(0, 0, day_of_month='2-30/3') Execute on every even numbered day.
crontab(0, 0, day_of_month='1-7,15-21') Execute on the first and third weeks of the month.
crontab(0, 0, day_of_month='11', month_of_year='5') Execute on the eleventh of May every year.
crontab(0, 0, month_of_year='*/3') Execute on the first month of every quarter.
"""
# @periodic_task(run_every=crontab())
# def test_beat():
# return 'beat ok'
#
#
# @celery.task
# def test_add(a, b):
# return a + b
@periodic_task(run_every=crontab(minute='*/2'))
def cache_data():
app = current_app._get_current_object()
client = MongoClient(app.config['MONGODB_SETTINGS']['host'], app.config['MONGODB_SETTINGS']['port'])
db = client.hacker_news
types = ['top', 'new', 'best', 'ask', 'show', 'job']
for i, t in enumerate(types):
dlist = get_list(t)
dcontent = get_content(dlist)
data = {'_id': i + 1, 'stype': t, 'slist': dlist, 'scontent': dcontent}
db.cache.update({'_id': data['_id']}, data, True)
client.close()
return True
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: Kun Jia
# date: 20/06/2017
# email: me@jack003.com
from celery.schedules import crontab
from celery.task import periodic_task
from celery.utils.log import get_task_logger
from flask import current_app
from pymongo import MongoClient
from app import celery
from .api import get_list, get_content
logger = get_task_logger(__name__)
"""
Example:
crontab() Execute every minute.
crontab(minute=0, hour=0) Execute daily at midnight.
crontab(minute=0, hour='*/3') Execute every three hours: midnight, 3am, 6am, 9am, noon, 3pm, 6pm, 9pm.
crontab(minute=0, hour='0,3,6,9,12,15,18,21') Same as previous.
crontab(minute='*/15') Execute every 15 minutes.
crontab(day_of_week='sunday') Execute every minute (!) at Sundays.
crontab(minute='*', hour='*', day_of_week='sun') Same as previous.
crontab(minute='*/10', hour='3,17,22', day_of_week='thu,fri') Execute every ten minutes, but only between 3-4 am, 5-6 pm, and 10-11 pm on Thursdays or Fridays.
crontab(minute=0, hour='*/2,*/3') Execute every even hour, and every hour divisible by three. This means: at every hour except: 1am, 5am, 7am, 11am, 1pm, 5pm, 7pm, 11pm
crontab(minute=0, hour='*/5') Execute hour divisible by 5. This means that it is triggered at 3pm, not 5pm (since 3pm equals the 24-hour clock value of �~@~\15�~@~], which is divisible by 5).
crontab(minute=0, hour='*/3,8-17') Execute every hour divisible by 3, and every hour during office hours (8am-5pm).
crontab(0, 0, day_of_month='2') Execute on the second day of every month.
crontab(0, 0, day_of_month='2-30/3') Execute on every even numbered day.
crontab(0, 0, day_of_month='1-7,15-21') Execute on the first and third weeks of the month.
crontab(0, 0, day_of_month='11', month_of_year='5') Execute on the eleventh of May every year.
crontab(0, 0, month_of_year='*/3') Execute on the first month of every quarter.
"""
# @periodic_task(run_every=crontab())
# def test_beat():
# return 'beat ok'
#
#
# @celery.task
# def test_add(a, b):
# return a + b
@periodic_task(run_every=crontab(minute='*/2'))
def cache_data():
app = current_app._get_current_object()
client = MongoClient(app.config['MONGODB_SETTINGS']['host'], app.config['MONGODB_SETTINGS']['port'])
db = client.hacker_news
types = ['top', 'new', 'best', 'ask', 'show', 'job']
for i, t in enumerate(types):
dlist = get_list(t)
dcontent = get_content(dlist)
data = {'_id': i + 1, 'stype': t, 'dlist': dlist, 'dcontent': dcontent}
db.cache.update({'_id': data['_id']}, data, True)
client.close()
return True
| Python | 0.999861 |
0237fb8114f5a8423d39f44b2882d5dbf10954d7 | make .seen replies for CTCP ACTIONs say "doing nick message"; leaves .seen replies for PRIVMSG to channel the same ("saying message"). | willie/modules/seen.py | willie/modules/seen.py | # coding=utf8
"""
seen.py - Willie Seen Module
Copyright 2008, Sean B. Palmer, inamidst.com
Copyright © 2012, Elad Alfassa <elad@fedoraproject.org>
Licensed under the Eiffel Forum License 2.
http://willie.dftba.net
"""
from __future__ import unicode_literals
import time
import datetime
from willie.tools import Identifier
from willie.tools.time import get_timezone, format_time
from willie.module import commands, rule, priority, thread
@commands('seen')
def seen(bot, trigger):
"""Reports when and where the user was last seen."""
if not trigger.group(2):
bot.say(".seen <nick> - Reports when <nick> was last seen.")
return
nick = trigger.group(2).strip()
timestamp = bot.db.get_nick_value(nick, 'seen_timestamp')
if timestamp:
channel = bot.db.get_nick_value(nick, 'seen_channel')
message = bot.db.get_nick_value(nick, 'seen_message')
action = bot.db.get_nick_value(nick, 'seen_action')
tz = get_timezone(bot.db, bot.config, None, trigger.nick,
trigger.sender)
saw = datetime.datetime.utcfromtimestamp(timestamp)
timestamp = format_time(bot.db, bot.config, tz, trigger.nick,
trigger.sender, saw)
msg = "I last saw {} at {}".format(nick, timestamp)
if Identifier(channel) == trigger.sender:
if action:
msg = msg + " in here, doing " + nick + " " + message
else:
msg = msg + " in here, saying " + message
else:
msg += " in another channel."
bot.say(str(trigger.nick) + ': ' + msg)
else:
bot.say("Sorry, I haven't seen {} around.".format(nick))
@thread(False)
@rule('(.*)')
@priority('low')
def note(bot, trigger):
if not trigger.is_privmsg:
bot.db.set_nick_value(trigger.nick, 'seen_timestamp', time.time())
bot.db.set_nick_value(trigger.nick, 'seen_channel', trigger.sender)
bot.db.set_nick_value(trigger.nick, 'seen_message', trigger)
bot.db.set_nick_value(trigger.nick, 'seen_action', 'intent' in trigger.tags)
| # coding=utf8
"""
seen.py - Willie Seen Module
Copyright 2008, Sean B. Palmer, inamidst.com
Copyright © 2012, Elad Alfassa <elad@fedoraproject.org>
Licensed under the Eiffel Forum License 2.
http://willie.dftba.net
"""
from __future__ import unicode_literals
import time
import datetime
from willie.tools import Identifier
from willie.tools.time import get_timezone, format_time
from willie.module import commands, rule, priority, thread
@commands('seen')
def seen(bot, trigger):
"""Reports when and where the user was last seen."""
if not trigger.group(2):
bot.say(".seen <nick> - Reports when <nick> was last seen.")
return
nick = trigger.group(2).strip()
timestamp = bot.db.get_nick_value(nick, 'seen_timestamp')
if timestamp:
channel = bot.db.get_nick_value(nick, 'seen_channel')
message = bot.db.get_nick_value(nick, 'seen_message')
tz = get_timezone(bot.db, bot.config, None, trigger.nick,
trigger.sender)
saw = datetime.datetime.utcfromtimestamp(timestamp)
timestamp = format_time(bot.db, bot.config, tz, trigger.nick,
trigger.sender, saw)
msg = "I last saw {} at {}".format(nick, timestamp)
if Identifier(channel) == trigger.sender:
msg = msg + " in here, saying " + message
else:
msg += " in another channel."
bot.say(str(trigger.nick) + ': ' + msg)
else:
bot.say("Sorry, I haven't seen {} around.".format(nick))
@thread(False)
@rule('(.*)')
@priority('low')
def note(bot, trigger):
if not trigger.is_privmsg:
bot.db.set_nick_value(trigger.nick, 'seen_timestamp', time.time())
bot.db.set_nick_value(trigger.nick, 'seen_channel', trigger.sender)
bot.db.set_nick_value(trigger.nick, 'seen_message', trigger)
| Python | 0 |
b4af07754c64e915fcfc5fbec00389dee6a11020 | disable one unit test on travis | _unittests/ut_helpgen/test_notebooks_api.py | _unittests/ut_helpgen/test_notebooks_api.py | """
@brief test log(time=8s)
@author Xavier Dupre
"""
import sys
import os
import unittest
try:
import src
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..")))
if path not in sys.path:
sys.path.append(path)
import src
from src.pyquickhelper.loghelper import fLOG
from src.pyquickhelper.pycode import get_temp_folder, is_travis_or_appveyor
from src.pyquickhelper.helpgen import nb2slides, nb2html, nb2rst
from src.pyquickhelper.ipythonhelper import read_nb
class TestNotebookAPI (unittest.TestCase):
def test_convert_slides_api_html(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
if sys.version_info[0] == 2:
return
path = os.path.abspath(os.path.split(__file__)[0])
fold = os.path.normpath(
os.path.join(
path,
"..",
"..",
"_doc",
"notebooks"))
nb = os.path.join(fold, "example_pyquickhelper.ipynb")
self.assertTrue(os.path.exists(nb))
nbr = read_nb(nb, kernel=False)
temp = get_temp_folder(__file__, "temp_nb_api_html")
outfile = os.path.join(temp, "out_nb_slides.slides.html")
res = nb2slides(nbr, outfile)
self.assertTrue(len(res) > 1)
for r in res:
self.assertTrue(os.path.exists(r))
outfile = os.path.join(temp, "out_nb_slides.html")
res = nb2html(nbr, outfile)
self.assertEqual(len(res), 1)
for r in res:
self.assertTrue(os.path.exists(r))
def test_convert_slides_api_rst(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
if sys.version_info[0] == 2:
return
if is_travis_or_appveyor() in ('travis', 'appveyor'):
# no latex, no pandoc
return
path = os.path.abspath(os.path.split(__file__)[0])
fold = os.path.normpath(
os.path.join(
path,
"..",
"..",
"_doc",
"notebooks"))
nb = os.path.join(fold, "example_pyquickhelper.ipynb")
self.assertTrue(os.path.exists(nb))
nbr = read_nb(nb, kernel=False)
temp = get_temp_folder(__file__, "temp_nb_api_rst")
outfile = os.path.join(temp, "out_nb_slides.rst")
res = nb2rst(nbr, outfile)
self.assertEqual(len(res), 1)
for r in res:
self.assertTrue(os.path.exists(r))
if __name__ == "__main__":
unittest.main()
| """
@brief test log(time=8s)
@author Xavier Dupre
"""
import sys
import os
import unittest
try:
import src
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..")))
if path not in sys.path:
sys.path.append(path)
import src
from src.pyquickhelper.loghelper import fLOG
from src.pyquickhelper.pycode import get_temp_folder
from src.pyquickhelper.helpgen import nb2slides, nb2html, nb2rst
from src.pyquickhelper.ipythonhelper import read_nb
class TestNotebookAPI (unittest.TestCase):
def test_convert_slides_api_html(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
if sys.version_info[0] == 2:
return
path = os.path.abspath(os.path.split(__file__)[0])
fold = os.path.normpath(
os.path.join(
path,
"..",
"..",
"_doc",
"notebooks"))
nb = os.path.join(fold, "example_pyquickhelper.ipynb")
self.assertTrue(os.path.exists(nb))
nbr = read_nb(nb, kernel=False)
temp = get_temp_folder(__file__, "temp_nb_api_html")
outfile = os.path.join(temp, "out_nb_slides.slides.html")
res = nb2slides(nbr, outfile)
self.assertTrue(len(res) > 1)
for r in res:
self.assertTrue(os.path.exists(r))
outfile = os.path.join(temp, "out_nb_slides.html")
res = nb2html(nbr, outfile)
self.assertEqual(len(res), 1)
for r in res:
self.assertTrue(os.path.exists(r))
def test_convert_slides_api_rst(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
if sys.version_info[0] == 2:
return
path = os.path.abspath(os.path.split(__file__)[0])
fold = os.path.normpath(
os.path.join(
path,
"..",
"..",
"_doc",
"notebooks"))
nb = os.path.join(fold, "example_pyquickhelper.ipynb")
self.assertTrue(os.path.exists(nb))
nbr = read_nb(nb, kernel=False)
temp = get_temp_folder(__file__, "temp_nb_api_rst")
outfile = os.path.join(temp, "out_nb_slides.rst")
res = nb2rst(nbr, outfile)
self.assertEqual(len(res), 1)
for r in res:
self.assertTrue(os.path.exists(r))
if __name__ == "__main__":
unittest.main()
| Python | 0 |
b03b9276e48edfa53a70a46dc5779cd1de2299e0 | add priv_dns_name sub command | aws_utils/ec2.py | aws_utils/ec2.py | #!/usr/bin/env python3
import sys
import argparse
import boto3
# --------------------------------------------------------------------------------
# arg parse
# --------------------------------------------------------------------------------
parser = argparse.ArgumentParser(description='ec2 utils')
parser.set_defaults(target='')
sub_parsers = parser.add_subparsers(title='sub commands')
# --------------------------------------------------------------------------------
# other functions
# --------------------------------------------------------------------------------
def getInstanceName(instance):
for tag in instance.tags:
if tag['Key'] == 'Name':
return tag['Value']
return None
def printIfMatchOrEmpty(ins, cond, out):
if cond == None or ins == cond:
print(out)
# --------------------------------------------------------------------------------
# sub commands
# --------------------------------------------------------------------------------
def ids(ns):
ec2 = boto3.resource('ec2')
for instance in ec2.instances.all():
printIfMatchOrEmpty(getInstanceName(instance), ns.name, instance.instance_id)
sub_parser = sub_parsers.add_parser('ids')
sub_parser.set_defaults(target='ids')
sub_parser.set_defaults(func=ids)
sub_parser.add_argument('--name')
def names(ns):
ec2 = boto3.resource('ec2')
for instance in ec2.instances.all():
name = ''
for tag in instance.tags:
if tag['Key'] == 'Name':
name = tag['Value']
break
print(name)
sub_parser = sub_parsers.add_parser('names')
sub_parser.set_defaults(target='names')
sub_parser.set_defaults(func=names)
def status(ns):
ec2 = boto3.resource('ec2')
for instance in ec2.instances.all():
name = getInstanceName(instance)
print("id : {0}, {2} ({1})".format(
instance.instance_id,
name,
instance.state['Name']
))
sub_parser = sub_parsers.add_parser('status')
sub_parser.set_defaults(target='status')
sub_parser.set_defaults(func=status)
def ip_pub(ns):
ec2 = boto3.resource('ec2')
for instance in ec2.instances.all():
printIfMatchOrEmpty(getInstanceName(instance), ns.name, instance.public_ip_address)
sub_parser = sub_parsers.add_parser('ip_pub')
sub_parser.set_defaults(target='ip_pub')
sub_parser.add_argument('--name')
sub_parser.set_defaults(func=ip_pub)
def ip_pub(ns):
ec2 = boto3.resource('ec2')
for instance in ec2.instances.all():
printIfMatchOrEmpty(getInstanceName(instance), ns.name, instance.private_dns_name)
sub_parser = sub_parsers.add_parser('priv_dns_name')
sub_parser.set_defaults(target='priv_dns_name')
sub_parser.add_argument('--name')
sub_parser.set_defaults(func=ip_pub)
def start(ns):
ec2 = boto3.resource('ec2')
for instance in ec2.instances.all():
if ns.name and getInstanceName(instance) in ns.args:
instance.start()
elif instance.id in ns.args:
instance.start()
sub_parser = sub_parsers.add_parser('start')
sub_parser.set_defaults(target='start')
sub_parser.add_argument('--name', action='store_true')
sub_parser.add_argument('args')
sub_parser.set_defaults(func=start)
def stop(ns):
ec2 = boto3.resource('ec2')
for instance in ec2.instances.all():
if ns.name and getInstanceName(instance) in ns.args:
instance.stop()
elif instance.id in ns.args:
instance.stop()
sub_parser = sub_parsers.add_parser('stop')
sub_parser.set_defaults(target='stop')
sub_parser.add_argument('--name', action='store_true')
sub_parser.add_argument('args')
sub_parser.set_defaults(func=stop)
# --------------------------------------------------------------------------------
# main
# --------------------------------------------------------------------------------
def main():
namespace = parser.parse_args()
if namespace.target is not None and namespace.target:
namespace.func(namespace)
else:
parser.print_help()
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
import sys
import argparse
import boto3
# --------------------------------------------------------------------------------
# arg parse
# --------------------------------------------------------------------------------
parser = argparse.ArgumentParser(description='ec2 utils')
parser.set_defaults(target='')
sub_parsers = parser.add_subparsers(title='sub commands')
# --------------------------------------------------------------------------------
# other functions
# --------------------------------------------------------------------------------
def getInstanceName(instance):
for tag in instance.tags:
if tag['Key'] == 'Name':
return tag['Value']
return None
def printIfMatchOrEmpty(ins, cond, out):
if cond == None or ins == cond:
print(out)
# --------------------------------------------------------------------------------
# sub commands
# --------------------------------------------------------------------------------
def ids(ns):
ec2 = boto3.resource('ec2')
for instance in ec2.instances.all():
printIfMatchOrEmpty(getInstanceName(instance), ns.name, instance.instance_id)
sub_parser = sub_parsers.add_parser('ids')
sub_parser.set_defaults(target='ids')
sub_parser.set_defaults(func=ids)
sub_parser.add_argument('--name')
def names(ns):
ec2 = boto3.resource('ec2')
for instance in ec2.instances.all():
name = ''
for tag in instance.tags:
if tag['Key'] == 'Name':
name = tag['Value']
break
print(name)
sub_parser = sub_parsers.add_parser('names')
sub_parser.set_defaults(target='names')
sub_parser.set_defaults(func=names)
def status(ns):
ec2 = boto3.resource('ec2')
for instance in ec2.instances.all():
name = getInstanceName(instance)
print("id : {0}, {2} ({1})".format(
instance.instance_id,
name,
instance.state['Name']
))
sub_parser = sub_parsers.add_parser('status')
sub_parser.set_defaults(target='status')
sub_parser.set_defaults(func=status)
def ip_pub(ns):
ec2 = boto3.resource('ec2')
for instance in ec2.instances.all():
printIfMatchOrEmpty(getInstanceName(instance), ns.name, instance.public_ip_address)
sub_parser = sub_parsers.add_parser('ip_pub')
sub_parser.set_defaults(target='ip_pub')
sub_parser.add_argument('--name')
sub_parser.set_defaults(func=ip_pub)
def start(ns):
ec2 = boto3.resource('ec2')
for instance in ec2.instances.all():
if ns.name and getInstanceName(instance) in ns.args:
instance.start()
elif instance.id in ns.args:
instance.start()
sub_parser = sub_parsers.add_parser('start')
sub_parser.set_defaults(target='start')
sub_parser.add_argument('--name', action='store_true')
sub_parser.add_argument('args')
sub_parser.set_defaults(func=start)
def stop(ns):
ec2 = boto3.resource('ec2')
for instance in ec2.instances.all():
if ns.name and getInstanceName(instance) in ns.args:
instance.stop()
elif instance.id in ns.args:
instance.stop()
sub_parser = sub_parsers.add_parser('stop')
sub_parser.set_defaults(target='stop')
sub_parser.add_argument('--name', action='store_true')
sub_parser.add_argument('args')
sub_parser.set_defaults(func=stop)
# --------------------------------------------------------------------------------
# main
# --------------------------------------------------------------------------------
def main():
namespace = parser.parse_args()
if namespace.target is not None and namespace.target:
namespace.func(namespace)
else:
parser.print_help()
if __name__ == '__main__':
main()
| Python | 0.000003 |
107b72da8629d97452dc6b7ee0f44eeb7d9e351c | remove x-based matplotlib rendering | gamma_limits_sensitivity/__init__.py | gamma_limits_sensitivity/__init__.py | '''
This is the hard working code in order to calculate ULs, sensitivities,
and time to detections.
'''
import matplotlib.pyplot as plt
matplotlib.use('Agg')
def upper_limit(N_on, N_off, alpha, l_lim, A_eff):
figures = [plt.figure()]
dictionary = {
'plots': figures
}
return dictionary
def sensitivity(s_bg, alpha, t_obs, A_eff):
figures = [plt.figure()]
dictionary = {
'plots': figures
}
return dictionary
def predict(s_bg, alpha, f_0, df_0, Gamma, dGamma, E_0, A_eff):
figures = [plt.figure()]
times = [1., 2., 3.]
dictionary = {
'times': times,
'plots': figures
}
return dictionary
| '''
This is the hard working code in order to calculate ULs, sensitivities,
and time to detections.
'''
import matplotlib.pyplot as plt
def upper_limit(N_on, N_off, alpha, l_lim, A_eff):
figures = [plt.figure()]
dictionary = {
'plots': figures
}
return dictionary
def sensitivity(s_bg, alpha, t_obs, A_eff):
figures = [plt.figure()]
dictionary = {
'plots': figures
}
return dictionary
def predict(s_bg, alpha, f_0, df_0, Gamma, dGamma, E_0, A_eff):
figures = [plt.figure()]
times = [1., 2., 3.]
dictionary = {
'times': times,
'plots': figures
}
return dictionary
| Python | 0.000001 |
2a67ef989fa79aefccb9bcefe543715346642f91 | Normalize time to UTC. | abusehelper/contrib/autoshun/autoshunbot.py | abusehelper/contrib/autoshun/autoshunbot.py | import idiokit
import time as _time
import calendar
from abusehelper.core import utils, cymruwhois, bot, events
AUTOSHUN_CSV_URL = "http://www.autoshun.org/files/shunlist.csv"
class AutoshunBot(bot.PollingBot):
COLUMNS = ["ip", "time", "type"]
feed_url = bot.Param(default=AUTOSHUN_CSV_URL)
use_cymru_whois = bot.BoolParam(default=True)
def poll(self):
pipe = self._poll(url=self.feed_url)
if self.use_cymru_whois:
pipe = pipe | cymruwhois.augment("ip")
return pipe | self._normalize()
@idiokit.stream
def _poll(self, url):
self.log.info("Downloading %s" % url)
try:
info, fileobj = yield utils.fetch_url(url)
except utils.FetchUrlFailed, fuf:
self.log.error("Download failed: %r", fuf)
idiokit.stop()
self.log.info("Downloaded")
# Skip first line
fileobj.readline()
yield utils.csv_to_events(fileobj,
columns=self.COLUMNS,
charset=info.get_param("charset"))
@idiokit.stream
def _normalize(self):
while True:
event = yield idiokit.next()
event.add("feed", "autoshun")
event.add("source url", self.feed_url)
times = event.values("time")
event.clear("time")
for time in times:
event.add("time", self._normalize_time(time))
yield idiokit.send(event)
def _normalize_time(self, time):
parsed = _time.strptime(time, "%Y-%m-%d %H:%M:%S")
seconds = calendar.timegm(parsed)
seconds += 5 * 3600 # UTC-5 to UTC
time_tuple = _time.gmtime(seconds)
return _time.strftime("%Y-%m-%d %H:%M:%S UTC", time_tuple)
if __name__ == "__main__":
AutoshunBot.from_command_line().execute()
| import idiokit
from abusehelper.core import utils, cymruwhois, bot, events
AUTOSHUN_CSV_URL = "http://www.autoshun.org/files/shunlist.csv"
class AutoshunBot(bot.PollingBot):
COLUMNS = ["ip", "time", "type"]
feed_url = bot.Param(default=AUTOSHUN_CSV_URL)
use_cymru_whois = bot.BoolParam(default=True)
def poll(self):
pipe = self._poll(url=self.feed_url)
if self.use_cymru_whois:
pipe = pipe | cymruwhois.augment("ip")
return pipe | self._normalize()
@idiokit.stream
def _poll(self, url):
self.log.info("Downloading %s" % url)
try:
info, fileobj = yield utils.fetch_url(url)
except utils.FetchUrlFailed, fuf:
self.log.error("Download failed: %r", fuf)
idiokit.stop()
self.log.info("Downloaded")
# Skip first line
fileobj.readline()
yield utils.csv_to_events(fileobj,
columns=self.COLUMNS,
charset=info.get_param("charset"))
@idiokit.stream
def _normalize(self):
while True:
event = yield idiokit.next()
event.add("feed", "autoshun")
event.add("source url", self.feed_url)
yield idiokit.send(event)
if __name__ == "__main__":
AutoshunBot.from_command_line().execute()
| Python | 0.960634 |
86af85e46b0b313ecd0804916539d18556fba84a | Use api.constrains | account_analytic_required/models/account.py | account_analytic_required/models/account.py | # -*- coding: utf-8 -*-
# © 2011 Akretion
# © 2016 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl)
from openerp import _, api, exceptions, fields, models
from openerp.tools import float_is_zero
class AccountAccountType(models.Model):
_inherit = "account.account.type"
@api.model
def _get_policies(self):
"""This is the method to be inherited for adding policies"""
return [('optional', 'Optional'),
('always', 'Always'),
('never', 'Never')]
analytic_policy = fields.Selection(
_get_policies,
'Policy for analytic account',
required=True,
default='optional',
help="Set the policy for analytic accounts : if you select "
"'Optional', the accountant is free to put an analytic account "
"on an account move line with this type of account ; if you "
"select 'Always', the accountant will get an error message if "
"there is no analytic account ; if you select 'Never', the "
"accountant will get an error message if an analytic account "
"is present.")
class AccountMoveLine(models.Model):
_inherit = "account.move.line"
@api.model
def _get_analytic_policy(self, account):
""" Extension point to obtain analytic policy for an account """
return account.user_type_id.analytic_policy
@api.multi
def _check_analytic_required_msg(self):
for move_line in self:
prec = move_line.company_currency_id.rounding
if (float_is_zero(move_line.debit, precision_rounding=prec) and
float_is_zero(move_line.credit, precision_rounding=prec)):
continue
analytic_policy = self._get_analytic_policy(move_line.account_id)
if (analytic_policy == 'always' and
not move_line.analytic_account_id):
return _("Analytic policy is set to 'Always' with account "
"%s '%s' but the analytic account is missing in "
"the account move line with label '%s'."
) % (move_line.account_id.code,
move_line.account_id.name,
move_line.name)
elif (analytic_policy == 'never' and
move_line.analytic_account_id):
return _("Analytic policy is set to 'Never' with account %s "
"'%s' but the account move line with label '%s' "
"has an analytic account %s '%s'."
) % (move_line.account_id.code,
move_line.account_id.name,
move_line.name,
move_line.analytic_account_id.code,
move_line.analytic_account_id.name)
@api.constrains('analytic_account_id', 'account_id', 'debit', 'credit')
def _check_analytic_required(self):
for rec in self:
message = rec._check_analytic_required_msg()
if message:
raise exceptions.ValidationError(message)
| # -*- coding: utf-8 -*-
# © 2011 Akretion
# © 2016 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl)
from openerp import _, api, fields, models
from openerp.tools import float_is_zero
class AccountAccountType(models.Model):
_inherit = "account.account.type"
@api.model
def _get_policies(self):
"""This is the method to be inherited for adding policies"""
return [('optional', 'Optional'),
('always', 'Always'),
('never', 'Never')]
analytic_policy = fields.Selection(
_get_policies,
'Policy for analytic account',
required=True,
default='optional',
help="Set the policy for analytic accounts : if you select "
"'Optional', the accountant is free to put an analytic account "
"on an account move line with this type of account ; if you "
"select 'Always', the accountant will get an error message if "
"there is no analytic account ; if you select 'Never', the "
"accountant will get an error message if an analytic account "
"is present.")
class AccountMoveLine(models.Model):
_inherit = "account.move.line"
@api.model
def _get_analytic_policy(self, account):
""" Extension point to obtain analytic policy for an account """
return account.user_type_id.analytic_policy
@api.multi
def _check_analytic_required_msg(self):
for move_line in self:
prec = move_line.company_currency_id.rounding
if (float_is_zero(move_line.debit, precision_rounding=prec) and
float_is_zero(move_line.credit, precision_rounding=prec)):
continue
analytic_policy = self._get_analytic_policy(move_line.account_id)
if (analytic_policy == 'always' and
not move_line.analytic_account_id):
return _("Analytic policy is set to 'Always' with account "
"%s '%s' but the analytic account is missing in "
"the account move line with label '%s'."
) % (move_line.account_id.code,
move_line.account_id.name,
move_line.name)
elif (analytic_policy == 'never' and
move_line.analytic_account_id):
return _("Analytic policy is set to 'Never' with account %s "
"'%s' but the account move line with label '%s' "
"has an analytic account %s '%s'."
) % (move_line.account_id.code,
move_line.account_id.name,
move_line.name,
move_line.analytic_account_id.code,
move_line.analytic_account_id.name)
@api.multi
def _check_analytic_required(self):
return not self._check_analytic_required_msg()
_constraints = [(_check_analytic_required,
_check_analytic_required_msg,
['analytic_account_id', 'account_id', 'debit', 'credit'])]
| Python | 0.000001 |
19afe973bffe1bb90942757fcbf81f3630ffddda | Update code formatting. | crawler/args.py | crawler/args.py | #!/usr/bin/env python3
# chameleon-crawler
#
# Copyright 2015 ghostwords.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from os import path
import argparse
def is_valid_file(f, parser):
if path.isfile(f):
return f
raise argparse.ArgumentTypeError("%s does not exist!" % f)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"crx", metavar='CHAMELEON_CRX_FILE_PATH',
type=lambda x: is_valid_file(x, parser),
help="path to Chameleon CRX package"
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"--headless", action="store_true", default=True,
help="use a virtual display (default)"
)
group.add_argument("--no-headless", dest='headless', action="store_false")
parser.add_argument(
"-n", dest='num_crawlers', type=int,
choices=range(1, 9), default=4,
help="how many browsers to use in parallel "
"(default: %(default)s)"
)
parser.add_argument(
"-q", "--quiet", action="store_true", default=False,
help="turn off standard output"
)
parser.add_argument(
"-t", "--timeout", metavar='SECONDS',
type=int, default=20,
help="how many seconds to wait for pages to finish "
"loading before timing out (default: %(default)s)"
)
parser.add_argument(
"--urls", metavar='URL_FILE_PATH',
type=argparse.FileType('r'), default='urls.txt',
help="path to URL list file (default: %(default)s)"
)
return parser.parse_args()
| #!/usr/bin/env python3
# chameleon-crawler
#
# Copyright 2015 ghostwords.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from os import path
import argparse
def is_valid_file(f, parser):
if path.isfile(f):
return f
raise argparse.ArgumentTypeError("%s does not exist!" % f)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("crx", metavar='CHAMELEON_CRX_FILE_PATH',
type=lambda x: is_valid_file(x, parser),
help="path to Chameleon CRX package")
group = parser.add_mutually_exclusive_group()
group.add_argument("--headless", action="store_true", default=True,
help="use a virtual display (default)")
group.add_argument("--no-headless", dest='headless', action="store_false")
parser.add_argument("-n", dest='num_crawlers', type=int,
choices=range(1, 9), default=4,
help="how many browsers to use in parallel "
"(default: %(default)s)")
parser.add_argument("-q", "--quiet", action="store_true", default=False,
help="turn off standard output")
parser.add_argument("-t", "--timeout", metavar='SECONDS',
type=int, default=20,
help="how many seconds to wait for pages to finish "
"loading before timing out (default: %(default)s)")
parser.add_argument("--urls", metavar='URL_FILE_PATH',
type=argparse.FileType('r'), default='urls.txt',
help="path to URL list file (default: %(default)s)")
return parser.parse_args()
| Python | 0 |
696b4e093171e9d6f17502650f15c9299438b874 | Drop Py2 and six on tests/integration/modules/test_virtualenv_mod.py | tests/integration/modules/test_virtualenv_mod.py | tests/integration/modules/test_virtualenv_mod.py | import os
import tempfile
import salt.utils.path
from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES
from tests.support.case import ModuleCase
from tests.support.helpers import slowTest
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import skipIf
@skipIf(
salt.utils.path.which_bin(KNOWN_BINARY_NAMES) is None, "virtualenv not installed"
)
class VirtualenvModuleTest(ModuleCase):
"""
Validate the virtualenv module
"""
def setUp(self):
super().setUp()
self.venv_test_dir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
self.venv_dir = os.path.join(self.venv_test_dir, "venv")
@slowTest
def test_create_defaults(self):
"""
virtualenv.managed
"""
self.run_function("virtualenv.create", [self.venv_dir])
pip_file = os.path.join(self.venv_dir, "bin", "pip")
self.assertTrue(os.path.exists(pip_file))
@slowTest
def test_site_packages(self):
pip_bin = os.path.join(self.venv_dir, "bin", "pip")
self.run_function(
"virtualenv.create", [self.venv_dir], system_site_packages=True
)
with_site = self.run_function("pip.freeze", bin_env=pip_bin)
self.run_function("file.remove", [self.venv_dir])
self.run_function("virtualenv.create", [self.venv_dir])
without_site = self.run_function("pip.freeze", bin_env=pip_bin)
self.assertFalse(with_site == without_site)
@slowTest
def test_clear(self):
pip_bin = os.path.join(self.venv_dir, "bin", "pip")
self.run_function("virtualenv.create", [self.venv_dir])
self.run_function("pip.install", [], pkgs="pep8", bin_env=pip_bin)
self.run_function("virtualenv.create", [self.venv_dir], clear=True)
packages = self.run_function("pip.list", prefix="pep8", bin_env=pip_bin)
self.assertFalse("pep8" in packages)
def test_virtualenv_ver(self):
ret = self.run_function("virtualenv.virtualenv_ver", [self.venv_dir])
assert isinstance(ret, list)
assert all([isinstance(x, int) for x in ret])
def tearDown(self):
self.run_function("file.remove", [self.venv_test_dir])
| # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import os
import tempfile
import salt.utils.path
from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES
from tests.support.case import ModuleCase
from tests.support.helpers import slowTest
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import skipIf
@skipIf(
salt.utils.path.which_bin(KNOWN_BINARY_NAMES) is None, "virtualenv not installed"
)
class VirtualenvModuleTest(ModuleCase):
"""
Validate the virtualenv module
"""
def setUp(self):
super(VirtualenvModuleTest, self).setUp()
self.venv_test_dir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
self.venv_dir = os.path.join(self.venv_test_dir, "venv")
@slowTest
def test_create_defaults(self):
"""
virtualenv.managed
"""
self.run_function("virtualenv.create", [self.venv_dir])
pip_file = os.path.join(self.venv_dir, "bin", "pip")
self.assertTrue(os.path.exists(pip_file))
@slowTest
def test_site_packages(self):
pip_bin = os.path.join(self.venv_dir, "bin", "pip")
self.run_function(
"virtualenv.create", [self.venv_dir], system_site_packages=True
)
with_site = self.run_function("pip.freeze", bin_env=pip_bin)
self.run_function("file.remove", [self.venv_dir])
self.run_function("virtualenv.create", [self.venv_dir])
without_site = self.run_function("pip.freeze", bin_env=pip_bin)
self.assertFalse(with_site == without_site)
@slowTest
def test_clear(self):
pip_bin = os.path.join(self.venv_dir, "bin", "pip")
self.run_function("virtualenv.create", [self.venv_dir])
self.run_function("pip.install", [], pkgs="pep8", bin_env=pip_bin)
self.run_function("virtualenv.create", [self.venv_dir], clear=True)
packages = self.run_function("pip.list", prefix="pep8", bin_env=pip_bin)
self.assertFalse("pep8" in packages)
def test_virtualenv_ver(self):
ret = self.run_function("virtualenv.virtualenv_ver", [self.venv_dir])
assert isinstance(ret, list)
assert all([isinstance(x, int) for x in ret])
def tearDown(self):
self.run_function("file.remove", [self.venv_test_dir])
| Python | 0 |
d8b144c3142534714ecff90cf88749a6b8ed347d | Remove django 1.8 workaround no longer needed | pucas/management/commands/createcasuser.py | pucas/management/commands/createcasuser.py | from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand
from pucas.ldap import LDAPSearch, LDAPSearchException, \
user_info_from_ldap
class Command(BaseCommand):
help = 'Initialize a new CAS user account'
def add_arguments(self, parser):
parser.add_argument('netids', nargs='+')
parser.add_argument(
'--admin',
help='Give the specified user(s) superuser permissions (equivalent to createsuperuser)',
action='store_true',
default=False
)
parser.add_argument(
'--staff',
help='Give the specified user(s) staff permissions',
action='store_true',
default=False
)
def handle(self, *args, **options):
User = get_user_model()
ldap_search = LDAPSearch()
netids = options['netids']
admin = options['admin']
staff = options['staff']
for netid in netids:
try:
# make sure we can find the netid in LDAP first
ldap_search.find_user(netid)
user, created = User.objects.get_or_create(username=netid)
# NOTE: should we re-init data from ldap even if user
# already exists, or error?
user_info_from_ldap(user)
# If admin flag is set, make the user an admin
if admin or staff:
user.is_staff = True
if admin:
user.is_superuser = True
user.save()
self.stdout.write(
self.style.SUCCESS(
"%s user '%s'"
% ('Created' if created else 'Updated', netid)))
except LDAPSearchException:
self.stderr.write(
self.style.ERROR("LDAP information for '%s' not found"
% netid))
| from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand
from pucas.ldap import LDAPSearch, LDAPSearchException, \
user_info_from_ldap
class Command(BaseCommand):
help = 'Initialize a new CAS user account'
def add_arguments(self, parser):
parser.add_argument('netids', nargs='+')
parser.add_argument(
'--admin',
help='Give the specified user(s) superuser permissions (equivalent to createsuperuser)',
action='store_true',
default=False
)
parser.add_argument(
'--staff',
help='Give the specified user(s) staff permissions',
action='store_true',
default=False
)
def handle(self, *args, **options):
User = get_user_model()
ldap_search = LDAPSearch()
netids = options['netids']
admin = options['admin']
staff = options['staff']
for netid in netids:
try:
# make sure we can find the netid in LDAP first
ldap_search.find_user(netid)
user, created = User.objects.get_or_create(username=netid)
# NOTE: should we re-init data from ldap even if user
# already exists, or error?
user_info_from_ldap(user)
# If admin flag is set, make the user an admin
if admin or staff:
user.is_staff = True
if admin:
user.is_superuser = True
user.save()
self.stdout.write(
self.style_success("%s user '%s'" \
% ('Created' if created else 'Updated', netid)))
except LDAPSearchException:
self.stderr.write(
self.style.ERROR("LDAP information for '%s' not found" \
% netid))
def style_success(self, msg):
# workaround to support django 1.8 - style.SUCCESS
# only added in django 1.9
if hasattr(self.style, 'SUCCESS'):
return self.style.SUCCESS(msg)
else:
return msg
| Python | 0 |
872320e02d5c922e177434f6b9fa70af8cf822b9 | Revert "RT-26" | wkhtmltopdf/__init__.py | wkhtmltopdf/__init__.py | import os
if 'DJANGO_SETTINGS_MODULE' in os.environ:
from .utils import *
__author__ = 'Incuna Ltd'
__version__ = '2.0.3'
| # Have to comment this import to perfrom pip install at the same time as django install
# import os
# if 'DJANGO_SETTINGS_MODULE' in os.environ:
# from .utils import *
__author__ = 'Incuna Ltd'
__version__ = '2.0.3'
| Python | 0.000001 |
563a82246180d949917bcd444411bbeb82604e97 | Add an assertion in search.py | recipe_modules/buildbucket/tests/search.py | recipe_modules/buildbucket/tests/search.py | # Copyright 2019 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
from recipe_engine import types
from google.protobuf import json_format
from PB.go.chromium.org.luci.buildbucket.proto import build as build_pb2
from PB.go.chromium.org.luci.buildbucket.proto import common as common_pb2
from PB.go.chromium.org.luci.buildbucket.proto import rpc as rpc_pb2
DEPS = [
'buildbucket',
'json',
'properties',
'runtime',
'step'
]
def RunSteps(api):
limit = api.properties.get('limit')
builds = api.buildbucket.search(
rpc_pb2.BuildPredicate(
gerrit_changes=list(api.buildbucket.build.input.gerrit_changes),
),
limit=limit,
)
assert limit is None or len(builds) <= limit
pres = api.step.active_result.presentation
for b in builds:
pres.logs['build %s' % b.id] = json_format.MessageToJson(b).splitlines()
def GenTests(api):
def test(test_name, tags=None, **req):
return (
api.test(test_name) +
api.runtime(is_luci=True, is_experimental=False) +
api.buildbucket.try_build(
project='chromium',
builder='Builder',
git_repo='https://chromium.googlesource.com/chromium/src',
)
)
yield (
test('basic')
)
yield (
test('two builds') +
api.buildbucket.simulated_search_results([
build_pb2.Build(id=1, status=common_pb2.SUCCESS),
build_pb2.Build(id=2, status=common_pb2.FAILURE),
])
)
yield (
test('search failed') +
api.step_data(
'buildbucket.search',
api.json.output_stream(
json_format.MessageToDict(rpc_pb2.BatchResponse(
responses=[dict(error=dict(message='there was a problem'))],
)),
),
)
)
yield (
test('limit') +
api.properties(limit=5) +
api.buildbucket.simulated_search_results([
build_pb2.Build(id=i+1, status=common_pb2.SUCCESS)
# Returning more to test trimming of the returned list.
for i in xrange(10)
])
)
| # Copyright 2019 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
from recipe_engine import types
from google.protobuf import json_format
from PB.go.chromium.org.luci.buildbucket.proto import build as build_pb2
from PB.go.chromium.org.luci.buildbucket.proto import common as common_pb2
from PB.go.chromium.org.luci.buildbucket.proto import rpc as rpc_pb2
DEPS = [
'buildbucket',
'json',
'properties',
'runtime',
'step'
]
def RunSteps(api):
builds = api.buildbucket.search(
rpc_pb2.BuildPredicate(
gerrit_changes=list(api.buildbucket.build.input.gerrit_changes),
),
limit=api.properties.get('limit'),
)
pres = api.step.active_result.presentation
for b in builds:
pres.logs['build %s' % b.id] = json_format.MessageToJson(b).splitlines()
def GenTests(api):
def test(test_name, tags=None, **req):
return (
api.test(test_name) +
api.runtime(is_luci=True, is_experimental=False) +
api.buildbucket.try_build(
project='chromium',
builder='Builder',
git_repo='https://chromium.googlesource.com/chromium/src',
)
)
yield (
test('basic')
)
yield (
test('two builds') +
api.buildbucket.simulated_search_results([
build_pb2.Build(id=1, status=common_pb2.SUCCESS),
build_pb2.Build(id=2, status=common_pb2.FAILURE),
])
)
yield (
test('search failed') +
api.step_data(
'buildbucket.search',
api.json.output_stream(
json_format.MessageToDict(rpc_pb2.BatchResponse(
responses=[dict(error=dict(message='there was a problem'))],
)),
),
)
)
yield (
test('limit') +
api.properties(limit=5) +
api.buildbucket.simulated_search_results([
build_pb2.Build(id=i+1, status=common_pb2.SUCCESS)
for i in xrange(10)
])
)
| Python | 0 |
ebe5a4ce8c12489bceb8991f627fdea29329e854 | Enable discovery server to reply also when started w/o net connection | xfd_discovery_server.py | xfd_discovery_server.py | #!/usr/bin/env python
#
# Author Aske Olsson aske.olsson@switch-gears.dk
#
import socket
import struct
import time
#MCAST_GRP = '224.1.1.1'
#MCAST_PORT = 5007
MCAST_ADDR = "239.77.124.213"
MCAST_PORT = 19418
MCAST_ANS_PORT = 19419
def socket_setup():
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('', MCAST_PORT))
mreq = struct.pack("4sl", socket.inet_aton(MCAST_ADDR), socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
except socket.error, e:
sock = None
return sock
def listen():
#ip = socket.gethostbyname(socket.gethostname())
myMAC = open('/sys/class/net/eth0/address').read()
print "listen loop"
sock = socket_setup()
while True:
print "lock aquired", sock
try:
if sock:
data, sender_addr = sock.recvfrom(1024)
print data, sender_addr
# Answer back
ans_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
ans_sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2)
ans_sock.sendto("MAC=" + myMAC, (sender_addr[0], MCAST_ANS_PORT))
else:
print "setup socket"
sock = socket_setup()
time.sleep(1)
except socket.error, e:
sock = None
def main():
listen()
if __name__ == "__main__":
main()
| #!/usr/bin/env python
#
# Author Aske Olsson aske.olsson@switch-gears.dk
#
import socket
import struct
#MCAST_GRP = '224.1.1.1'
#MCAST_PORT = 5007
MCAST_ADDR = "239.77.124.213"
MCAST_PORT = 19418
MCAST_ANS_PORT = 19419
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('', MCAST_PORT))
mreq = struct.pack("4sl", socket.inet_aton(MCAST_ADDR), socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
#ip = socket.gethostbyname(socket.gethostname())
myMAC = open('/sys/class/net/eth0/address').read()
while True:
try:
data, sender_addr = sock.recvfrom(1024)
# print data, sender_addr
# Answer back
ans_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
ans_sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2)
ans_sock.sendto("MAC=" + myMAC, (sender_addr[0], MCAST_ANS_PORT))
except Exception:
pass
| Python | 0 |
2cf4a0b93db423207798ffd93b2e91cdb73b6d2b | Add identifier for UT Brownsville | tx_salaries/utils/transformers/ut_brownsville.py | tx_salaries/utils/transformers/ut_brownsville.py | from . import base
from . import mixins
class TransformedRecord(mixins.GenericCompensationMixin,
mixins.GenericDepartmentMixin, mixins.GenericIdentifierMixin,
mixins.GenericJobTitleMixin, mixins.GenericPersonMixin,
mixins.MembershipMixin, mixins.OrganizationMixin, mixins.PostMixin,
mixins.RaceMixin, base.BaseTransformedRecord):
MAP = {
'last_name': 'Last Name',
'first_name': 'First Name',
'middle_name': 'Middle Name',
'department': 'Department',
'job_title': 'Title',
'hire_date': 'Hire Date',
'compensation': 'Annualized',
'race': 'Race',
'gender': 'Gender'
}
NAME_FIELDS = ('first_name', 'last_name', )
ORGANIZATION_NAME = 'University of Texas at Brownsville'
ORGANIZATION_CLASSIFICATION = 'University'
# TODO not given on spreadsheet, but they appear to give part time
compensation_type = 'Full Time'
@property
def is_valid(self):
# Adjust to return False on invalid fields. For example:
return self.last_name.strip() != ''
@property
def identifier(self):
"""
Identifier for UT Brownsville
"""
excluded = [self.department_key, self.job_title_key,
self.hire_date_key, self.compensation_key]
return {
'scheme': 'tx_salaries_hash',
'identifier': base.create_hash_for_record(self.data,
exclude=excluded)
}
transform = base.transform_factory(TransformedRecord)
| from . import base
from . import mixins
class TransformedRecord(mixins.GenericCompensationMixin,
mixins.GenericDepartmentMixin, mixins.GenericIdentifierMixin,
mixins.GenericJobTitleMixin, mixins.GenericPersonMixin,
mixins.MembershipMixin, mixins.OrganizationMixin, mixins.PostMixin,
mixins.RaceMixin, base.BaseTransformedRecord):
MAP = {
'last_name': 'Last Name',
'first_name': 'First Name',
'department': 'Department',
'job_title': 'Title',
'hire_date': 'Hire Date',
'status': 'LABEL FOR FT/PT STATUS',
'compensation': 'Annualized',
'race': 'Race',
'gender': 'Gender'
}
NAME_FIELDS = ('first_name', 'last_name', )
ORGANIZATION_NAME = 'University of Texas at Brownsville'
ORGANIZATION_CLASSIFICATION = 'University'
# TODO not given on spreadsheet, but they appear to give part time
compensation_type = 'Full Time'
@property
def is_valid(self):
# Adjust to return False on invalid fields. For example:
return self.last_name.strip() != ''
transform = base.transform_factory(TransformedRecord)
| Python | 0.00011 |
c3df6a10d008441c79eb07b889f52fe0de22538b | Fix the default prefix | powerline_vaulted_segment/vaulted.py | powerline_vaulted_segment/vaulted.py | from __future__ import (unicode_literals, division, absolute_import, print_function)
from powerline.theme import requires_segment_info
@requires_segment_info
def vaulted(pl, segment_info, prefix=''):
'''Return the current vaulted vault
:param string prefix:
The prefix to use in front of the vault name
'''
vault = segment_info['environ'].get('VAULTED_ENV', None)
if vault:
return '{0}{1}'.format(prefix, vault)
| from __future__ import (unicode_literals, division, absolute_import, print_function)
from powerline.theme import requires_segment_info
@requires_segment_info
def vaulted(pl, segment_info, prefix=None):
'''Return the current vaulted vault
:param string prefix:
The prefix to use in front of the vault name
'''
vault = segment_info['environ'].get('VAULTED_ENV', None)
if vault:
return '{0}{1}'.format(prefix, vault)
| Python | 0.998784 |
1ba440ca24b0108d4dcf911f1f3c967ff7de1dc4 | Create a unique filename when uploading a team file. | hackday/teams/models.py | hackday/teams/models.py | import time
from django.contrib.auth.models import User
from django.db import models
from django import forms
from django.template.defaultfilters import slugify
from assets.models import Attachment, ImageAttachment, Link
from charities.models import Charity
from voting.moremodels import Category
class STATUS(object):
"""
Status of the team
"""
ACTIVE = 'A'
DISQUALIFIED = 'D'
DELETED = 'X'
CHOICES = (
(ACTIVE, 'Active'),
(DISQUALIFIED, 'Disqualified'),
(DELETED, 'Deleted'),
)
class PROJECT_TYPE(object):
"""
Type of project -- 'implemented' (working code) or 'concept' (smoke and
Powerpoint mirrors)
"""
# I honestly came really close to calling these 'SMOKE' and 'MIRRORS' but
# couldn't decide which to assign to which. - mpirnat
IMPLEMENTED = 'I'
CONCEPT = 'C'
CHOICES = (
(IMPLEMENTED, 'Implemented'),
(CONCEPT, 'Concept'),
)
def create_unique_team_filename(instance, filename):
""" Return a uniqque filename for an uploaded team file.
-- called when saving a Team to the DB
"""
filename_parts = filename.split('.')
return 'teams/{team_slug}/{file_prefix}-{stamp}.{file_suffix}'.format(
team_slug=instance.slug,
file_prefix='.'.join(filename_parts[:-1]),
stamp=time.time(),
file_suffix=filename_parts[-1])
class Team(models.Model):
"""
A team of participants that will work on a project and compete for fabulous
prizes, fame, and glory.
Upon creation, a team needs:
* a name--hopefully an awesome one
* a slug, to be used for the URL of the team's page
* a project description
* a project type, so that we can differentiate "real" hacks vs. thought
experiments (aka "code vs. ppt")
* a creator
* a captain
* team members
* a judged category
* a charity that the team is supporting
The creator and captain may have management powers above and beyond
those of a mere member.
"""
name = models.CharField('name of team', max_length=255, db_index=True,
unique=True)
slug = models.SlugField('slugified team name', db_index=True, unique=True,
editable=False)
project = models.TextField('description of project')
logo = models.ImageField('team logo image', blank=True,
upload_to=create_unique_team_filename)
project_type = models.CharField('type of project', max_length=1,
db_index=True, choices=PROJECT_TYPE.CHOICES)
status = models.CharField(max_length=1, db_index=True,
choices=STATUS.CHOICES)
creator = models.ForeignKey(User,
related_name="%(app_label)s_%(class)s_creator")
captain = models.ForeignKey(User,
related_name="%(app_label)s_%(class)s_captain")
members = models.ManyToManyField(User,
related_name="%(app_label)s_%(class)s_members")
attachments = models.ManyToManyField(Attachment, blank=True,
related_name="%(app_label)s_%(class)s_attachments")
images = models.ManyToManyField(ImageAttachment, blank=True,
related_name="%(app_label)s_%(class)s_images")
links = models.ManyToManyField(Link, blank=True,
related_name="%(app_label)s_%(class)s_links")
category = models.ForeignKey(Category)
charity = models.ForeignKey(Charity)
create_date = models.DateTimeField('date created', auto_now_add=True)
mod_date = models.DateTimeField('date modified', auto_now=True)
def save(self, *args, **kwargs):
#TODO: check if slug exists in DB
if not self.slug:
self.slug = slugify(self.name)
return super(Team, self).save()
def __unicode__(self):
return self.name
class Meta:
ordering = ['name']
| from django.contrib.auth.models import User
from django.db import models
from django import forms
from django.template.defaultfilters import slugify
from assets.models import Attachment, ImageAttachment, Link
from charities.models import Charity
from voting.moremodels import Category
class STATUS(object):
"""
Status of the team
"""
ACTIVE = 'A'
DISQUALIFIED = 'D'
DELETED = 'X'
CHOICES = (
(ACTIVE, 'Active'),
(DISQUALIFIED, 'Disqualified'),
(DELETED, 'Deleted'),
)
class PROJECT_TYPE(object):
"""
Type of project -- 'implemented' (working code) or 'concept' (smoke and
Powerpoint mirrors)
"""
# I honestly came really close to calling these 'SMOKE' and 'MIRRORS' but
# couldn't decide which to assign to which. - mpirnat
IMPLEMENTED = 'I'
CONCEPT = 'C'
CHOICES = (
(IMPLEMENTED, 'Implemented'),
(CONCEPT, 'Concept'),
)
class Team(models.Model):
"""
A team of participants that will work on a project and compete for fabulous
prizes, fame, and glory.
Upon creation, a team needs:
* a name--hopefully an awesome one
* a slug, to be used for the URL of the team's page
* a project description
* a project type, so that we can differentiate "real" hacks vs. thought
experiments (aka "code vs. ppt")
* a creator
* a captain
* team members
* a judged category
* a charity that the team is supporting
The creator and captain may have management powers above and beyond
those of a mere member.
"""
name = models.CharField('name of team', max_length=255, db_index=True,
unique=True)
slug = models.SlugField('slugified team name', db_index=True, unique=True,
editable=False)
project = models.TextField('description of project')
logo = models.ImageField('team logo image', blank=True, upload_to='teams')
project_type = models.CharField('type of project', max_length=1,
db_index=True, choices=PROJECT_TYPE.CHOICES)
status = models.CharField(max_length=1, db_index=True,
choices=STATUS.CHOICES)
creator = models.ForeignKey(User,
related_name="%(app_label)s_%(class)s_creator")
captain = models.ForeignKey(User,
related_name="%(app_label)s_%(class)s_captain")
members = models.ManyToManyField(User,
related_name="%(app_label)s_%(class)s_members")
attachments = models.ManyToManyField(Attachment, blank=True,
related_name="%(app_label)s_%(class)s_attachments")
images = models.ManyToManyField(ImageAttachment, blank=True,
related_name="%(app_label)s_%(class)s_images")
links = models.ManyToManyField(Link, blank=True,
related_name="%(app_label)s_%(class)s_links")
category = models.ForeignKey(Category)
charity = models.ForeignKey(Charity)
create_date = models.DateTimeField('date created', auto_now_add=True)
mod_date = models.DateTimeField('date modified', auto_now=True)
def save(self, *args, **kwargs):
#TODO: check if slug exists in DB
if not self.slug:
self.slug = slugify(self.name)
return super(Team, self).save()
def __unicode__(self):
return self.name
class Meta:
ordering = ['name']
| Python | 0 |
e4cc4447bf9aca4f579eef34baccd3aaf73939c3 | Print statement verwijderd uit functie | hamming-code/hamming.py | hamming-code/hamming.py | from matrix import Matrix
#The encoding matrix
encoding_matrix = Matrix([
[1, 1, 0, 1],
[1, 0, 1, 1],
[1, 0, 0, 0],
[0, 1, 1, 1],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]
])
#The parity checking matrix
checking_matrix = Matrix([
[1, 0, 1, 0, 1, 0, 1],
[0, 1, 1, 0, 0, 1, 1],
[0, 0, 0, 1, 1, 1, 1]
])
#Takes binary vector of length 4 and adds the parity bits
#Returns result as vector
def encodemessage(message):
vector_with_paritybits = encoding_matrix*(message.transpose())
return Matrix(vector_with_paritybits.getbinary())
#repairs message, may not work, can't test it yet
#Takes a matrix
def repairmessage(message):
vector = checking_matrix*message
checker = True
#checks if the return vector is the zero vector. If this is the case
#checker = True, and there is no mistake
for element in vector.values[0]:
if element == 1:
checker = False
if checker == False:
#finds out at what position the mistake is and saves it as
#counter
counter = 0
for i, element in enumerate(vector.values[0]):
counter += element * 2 ** i
else:
#in this case checker = True, so it returns the message
return message
new_message = message.values[0]
#fixes the message
if new_message[counter - 1] == 0:
new_message[counter - 1] = 1
else:
new_message[counter - 1] = 0
return Matrix(new_message)
#Example:
#boodschap = input('Vul hier je boodschap in: ')
#testvector = Matrix([[1, 0, 1, 1]])
#print(repairmessage(encodemessage(testvector)))
| from matrix import Matrix
#The encoding matrix
encoding_matrix = Matrix([
[1, 1, 0, 1],
[1, 0, 1, 1],
[1, 0, 0, 0],
[0, 1, 1, 1],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]
])
#The parity checking matrix
checking_matrix = Matrix([
[1, 0, 1, 0, 1, 0, 1],
[0, 1, 1, 0, 0, 1, 1],
[0, 0, 0, 1, 1, 1, 1]
])
#Takes binary vector of length 4 and adds the parity bits
#Returns result as vector
def encodemessage(message):
print(message.transpose())
vector_with_paritybits = encoding_matrix*(message.transpose())
return Matrix(vector_with_paritybits.getbinary())
#repairs message, may not work, can't test it yet
#Takes a matrix
def repairmessage(message):
vector = checking_matrix*message
checker = True
#checks if the return vector is the zero vector. If this is the case
#checker = True, and there is no mistake
for element in vector.values[0]:
if element == 1:
checker = False
if checker == False:
#finds out at what position the mistake is and saves it as
#counter
counter = 0
for i, element in enumerate(vector.values[0]):
counter += element * 2 ** i
else:
#in this case checker = True, so it returns the message
return message
new_message = message.values[0]
#fixes the message
if new_message[counter - 1] == 0:
new_message[counter - 1] = 1
else:
new_message[counter - 1] = 0
return Matrix(new_message)
#Example:
#boodschap = input('Vul hier je boodschap in: ')
#testvector = Matrix([[1, 0, 1, 1]])
#print(repairmessage(encodemessage(testvector)))
| Python | 0.999972 |
825eb37e15e2fb08ac205b7495e93a91acb79c26 | Add function for flashing all form errors | app/utils.py | app/utils.py | import re
from flask import url_for, flash
def register_template_utils(app):
"""Register Jinja 2 helpers (called from __init__.py)."""
@app.template_test()
def equalto(value, other):
return value == other
@app.template_global()
def is_hidden_field(field):
from wtforms.fields import HiddenField
return isinstance(field, HiddenField)
app.add_template_global(index_for_role)
def index_for_role(role):
return url_for(role.index)
def parse_phone_number(phone_number):
"""Make phone number conform to E.164 (https://en.wikipedia.org/wiki/E.164)
"""
stripped = re.sub(r'\D', '', phone_number)
if len(stripped) == 10:
stripped = '1' + stripped
stripped = '+' + stripped
return stripped
def flash_errors(form):
for field, errors in form.errors.items():
for error in errors:
flash(u"Error in the %s field - %s" % (
getattr(form, field).label.text,
error
))
| import re
from flask import url_for
def register_template_utils(app):
"""Register Jinja 2 helpers (called from __init__.py)."""
@app.template_test()
def equalto(value, other):
return value == other
@app.template_global()
def is_hidden_field(field):
from wtforms.fields import HiddenField
return isinstance(field, HiddenField)
app.add_template_global(index_for_role)
def index_for_role(role):
return url_for(role.index)
def parse_phone_number(phone_number):
"""Make phone number conform to E.164 (https://en.wikipedia.org/wiki/E.164)
"""
stripped = re.sub(r'\D', '', phone_number)
if len(stripped) == 10:
stripped = '1' + stripped
stripped = '+' + stripped
return stripped
| Python | 0 |
3289a259a7fa9ed959eb18d012c2d2e52198b033 | Update malshare.py | plugins/analytics/public/malshare.py | plugins/analytics/public/malshare.py | import json
import requests
import logging
from core.analytics import OneShotAnalytics
from core.errors import ObservableValidationError
from core.observables import Url, Hash
class MalshareAPI(object):
"""Base class for querying the Malshare API.
This is the public API, 1000 samples per day.
Limit rejection, as it could cause api key deactivation.
"""
settings = {
'malshare_api_key': {
'name': 'Malshare API Key',
'description': 'API Key provided by malshare.com'
}
}
@staticmethod
def fetch(observable, api_key):
"""
:param observable: The extended observable klass
:param api_key: The api key obtained from Malshare
:return: malshare json response or None if error
"""
try:
params = {
'hash': observable.value,
'api_key': api_key,
'action': 'details'
}
response = requests.get('https://malshare.com/api.php', params=params)
if response.ok:
return response.json()
else:
return None
except Exception as e:
# TODO(sebdraven): Catch a better exception
print 'Exception while getting ip report {}'.format(e.message)
return None
class MalshareQuery(OneShotAnalytics, MalshareAPI):
default_values = {
'name': 'MalShare',
'description': 'Perform a MalShare query.',
}
ACTS_ON = ['Hash']
@staticmethod
def analyze(observable, results):
links = set()
json_result = MalshareAPI.fetch(
observable, results.settings['malshare_api_key'])
if json_result is not None:
json_string = json.dumps(
json_result, sort_keys=True, indent=4, separators=(',', ': '))
results.update(raw=json_string)
result = {'raw': json_string}
if 'SOURCES' in json_result:
for source in json_result['SOURCES']:
new_url = None
try:
new_url = Url.get_or_create(value=source.strip())
links.update(
observable.active_link_to(
new_url, 'c2', 'malshare_query'))
except ObservableValidationError:
logging.error(
"An error occurred when trying to add {} to the database".
format(source.strip()))
result['nb C2'] = len(json_result['SOURCES'])
try:
new_hash = Hash.get_or_create(value=json_result['MD5'])
links.update(
new_hash.active_link_to(observable, 'md5', 'malshare_query'))
new_hash = Hash.get_or_create(value=json_result['SHA1'])
links.update(
new_hash.active_link_to(observable, 'sha1', 'malshare_query'))
new_hash = Hash.get_or_create(value=json_result['SHA256'])
links.update(
new_hash.active_link_to(observable, 'sha256', 'malshare_query'))
except ObservableValidationError:
logging.error(
"An error occurred when trying to add hashes {} to the database".
format(json_string))
return list(links)
| import json
import requests
import logging
from core.analytics import OneShotAnalytics
from core.errors import ObservableValidationError
from core.observables import Url, Hash
class MalshareAPI(object):
"""Base class for querying the Malshare API.
This is the public API, 1000 samples per day.
Limit rejection, as it could cause api key deactivation.
"""
settings = {
'malshare_api_key': {
'name': 'Malshare API Key',
'description': 'API Key provided by malshare.com'
}
}
@staticmethod
def fetch(observable, api_key):
"""
:param observable: The extended observable klass
:param api_key: The api key obtained from Malshare
:return: malshare json response or None if error
"""
try:
params = {
'hash': observable.value,
'api_key': api_key,
'action': 'details'
}
response = requests.get('https://malshare.com/api.php', params, verify=False)
if response.ok:
return response.json()
else:
return None
except Exception as e:
# TODO(sebdraven): Catch a better exception
print 'Exception while getting ip report {}'.format(e.message)
return None
class MalshareQuery(OneShotAnalytics, MalshareAPI):
default_values = {
'name': 'MalShare',
'description': 'Perform a MalShare query.',
}
ACTS_ON = ['Hash']
@staticmethod
def analyze(observable, results):
links = set()
json_result = MalshareAPI.fetch(
observable, results.settings['malshare_api_key'])
if json_result is not None:
json_string = json.dumps(
json_result, sort_keys=True, indent=4, separators=(',', ': '))
results.update(raw=json_string)
result = {'raw': json_string}
if 'SOURCES' in json_result:
for source in json_result['SOURCES']:
new_url = None
try:
new_url = Url.get_or_create(value=source.strip())
links.update(
observable.active_link_to(
new_url, 'c2', 'malshare_query'))
except ObservableValidationError:
logging.error(
"An error occurred when trying to add {} to the database".
format(source.strip()))
result['nb C2'] = len(json_result['SOURCES'])
try:
new_hash = Hash.get_or_create(value=json_result['MD5'])
links.update(
new_hash.active_link_to(observable, 'md5', 'malshare_query'))
new_hash = Hash.get_or_create(value=json_result['SHA1'])
links.update(
new_hash.active_link_to(observable, 'sha1', 'malshare_query'))
new_hash = Hash.get_or_create(value=json_result['SHA256'])
links.update(
new_hash.active_link_to(observable, 'sha256', 'malshare_query'))
except ObservableValidationError:
logging.error(
"An error occurred when trying to add hashes {} to the database".
format(json_string))
return list(links)
| Python | 0 |
c0e1bed70bc331041622e0db06871d4f3e3277f3 | Update activate-devices.py | cron/activate-devices.py | cron/activate-devices.py | #!/usr/bin/env python
import MySQLdb
#import datetime
#import urllib2
#import os
import datetime
import RPi.GPIO as GPIO
try:
import RPi.GPIO as GPIO
except RuntimeError:
print("Error importing RPi.GPIO!")
servername = "localhost"
username = "pi"
password = "password"
dbname = "pi_heating_db"
GPIO.setmode(GPIO.BOARD)
cnx = MySQLdb.connect(host=servername, user=username, passwd=password, db=dbname)
cursorselect = cnx.cursor()
query = ("SELECT * FROM devices;")
cursorselect.execute(query)
results_devices =cursorselect.fetchall()
cursorselect.close()
for result in results_devices:
print("* * * * * *")
DEVICE_PIN = int( result[2] )
DEVICE_VALUE = int( result[3] )
GPIO.setup(DEVICE_PIN, GPIO.OUT, initial=GPIO.LOW)
GPIO.output(DEVICE_PIN, DEVICE_VALUE)
print( DEVICE_PIN, DEVICE_VALUE )
print("- - -")
cnx.close()
| #!/usr/bin/env python
import MySQLdb
#import datetime
#import urllib2
#import os
import datetime
import RPi.GPIO as GPIO
try:
import RPi.GPIO as GPIO
except RuntimeError:
print("Error importing RPi.GPIO!")
servername = "localhost"
username = "pi"
password = "password"
dbname = "pi_heating_db"
GPIO.setmode(GPIO.BOARD)
cnx = MySQLdb.connect(host=servername, user=username, passwd=password, db=dbname)
cursorselect = cnx.cursor()
query = ("SELECT * FROM devices;")
cursorselect.execute(query)
results_devices =cursorselect.fetchall()
cursorselect.close()
for result in results_devices:
print("* * * * * *")
DEVICE_PIN = result[2]
DEVICE_VALUE = result[3]
GPIO.setup(DEVICE_PIN, GPIO.OUT, initial=GPIO.LOW)
GPIO.output(DEVICE_PIN, DEVICE_VALUE)
print( DEVICE_PIN, DEVICE_VALUE )
print("- - -")
cnx.close()
| Python | 0.000001 |
8701318037b9d425149f0689fa137be78a782aa7 | return the name of the face found | app/views.py | app/views.py | from app import app
from flask import Flask, request, jsonify
import kairos
DEFAULT_GALLERY = 'default_gallery'
# App Logic
@app.route('/', methods=['GET'])
def index():
return 'yo'
@app.route('/upload/<name>', methods=['POST'])
def upload(name):
img_url = request.form['img_url']
success = kairos.add_face_url(img_url, name, DEFAULT_GALLERY)
return jsonify({'success': success})
@app.route('/verify', methods=['GET'])
def verify():
link = request.args.get('img_url')
name = kairos.identify_face_url(img_url, DEFAULT_GALLERY)
allowed = name is not None
# TODO: open the door.
return jsonify({'allowed': allowed,
'name': name})
| from app import app
from flask import Flask, request, jsonify
import kairos
DEFAULT_GALLERY = 'default_gallery'
# App Logic
@app.route('/', methods=['GET'])
def index():
return 'yo'
@app.route('/upload/<name>', methods=['POST'])
def upload(name):
img_url = request.form['img_url']
success = kairos.add_face_url(img_url, name, DEFAULT_GALLERY)
return jsonify({'success': success})
@app.route('/verify', methods=['GET'])
def verify():
link = request.args.get('img_url')
allowed = kairos.check_face_url(img_url, DEFAULT_GALLERY)
return jsonify({'allowed': allowed})
| Python | 0.999999 |
b3b489fb8b476a17e8d9e08d70f90aec38756c8a | Allow use of a custom user model | cuser/fields.py | cuser/fields.py | # Copyright (c) 2009-2011 Dennis Kaarsemaker <dennis@kaarsemaker.net>
# 2011 Atamert Olcgen <muhuk@muhuk.com>
# 2012 Alireza Savand <alireza.savand@gmail.com>
#
# Small piece of middleware to be able to access authentication data from
# everywhere in the django code.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Django nor the names of its contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from django.conf import settings
if hasattr(settings, 'AUTH_USER_MODEL'):
User = settings.AUTH_USER_MODEL
else:
from django.contrib.auth.models import User
from django.db.models.fields.related import ForeignKey, ManyToOneRel
from cuser.middleware import CuserMiddleware
if 'cuser' not in settings.INSTALLED_APPS:
raise ValueError("Cuser middleware is not enabled")
# Register fields with south, if installed
if 'south' in settings.INSTALLED_APPS:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^cuser\.fields\.CurrentUserField"])
class CurrentUserField(ForeignKey):
def __init__(self, to_field=None, rel_class=ManyToOneRel, **kwargs):
self.add_only = kwargs.pop('add_only', False)
kwargs.update({
'editable': False,
'null': True,
'rel_class': rel_class,
'to': User,
'to_field': to_field,
})
super(CurrentUserField, self).__init__(**kwargs)
def pre_save(self, model_instance, add):
if add or not self.add_only:
user = CuserMiddleware.get_user()
if user:
setattr(model_instance, self.attname, user.pk)
return user.pk
return super(CurrentUserField, self).pre_save(model_instance, add)
| # Copyright (c) 2009-2011 Dennis Kaarsemaker <dennis@kaarsemaker.net>
# 2011 Atamert Olcgen <muhuk@muhuk.com>
# 2012 Alireza Savand <alireza.savand@gmail.com>
#
# Small piece of middleware to be able to access authentication data from
# everywhere in the django code.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Django nor the names of its contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from django.conf import settings
from django.contrib.auth.models import User
from django.db.models.fields.related import ForeignKey, ManyToOneRel
from cuser.middleware import CuserMiddleware
if 'cuser' not in settings.INSTALLED_APPS:
raise ValueError("Cuser middleware is not enabled")
# Register fields with south, if installed
if 'south' in settings.INSTALLED_APPS:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^cuser\.fields\.CurrentUserField"])
class CurrentUserField(ForeignKey):
def __init__(self, to_field=None, rel_class=ManyToOneRel, **kwargs):
self.add_only = kwargs.pop('add_only', False)
kwargs.update({
'editable': False,
'null': True,
'rel_class': rel_class,
'to': User,
'to_field': to_field,
})
super(CurrentUserField, self).__init__(**kwargs)
def pre_save(self, model_instance, add):
if add or not self.add_only:
user = CuserMiddleware.get_user()
if user:
setattr(model_instance, self.attname, user.pk)
return user.pk
return super(CurrentUserField, self).pre_save(model_instance, add)
| Python | 0 |
194dd71de22e34e5f262b8fe0735347d6b7f1bd8 | Support title page (-1) | portfolio/pdf-scripts/do-page-generate.py | portfolio/pdf-scripts/do-page-generate.py | import subprocess
from music21 import *
from pyPdf import PdfFileReader, PdfFileWriter
from reportlab.pdfgen import canvas
from reportlab.lib import pagesizes
from reportlab.lib.units import inch
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
# some important constants
MUSIC_XML_DIR = "C:\\wamp\\www\\syntaxblitz.github.io\\portfolio\\music-xml\\"
MUSIC_LY_DIR = "C:\\wamp\\www\\syntaxblitz.github.io\\portfolio\\music-ly\\"
MUSIC_PDF_DIR = "C:\\wamp\\www\\syntaxblitz.github.io\\portfolio\\music-pdf\\"
PAGENUM_PDF_DIR = "C:\\wamp\\www\\syntaxblitz.github.io\\portfolio\\pagenum-pdf\\"
PAGE_PDF_DIR = "C:\\wamp\\www\\syntaxblitz.github.io\\portfolio\\page-pdf\\"
OUTPUT_DIR = "C:\\wamp\\www\\syntaxblitz.github.io\\portfolio\\pdf-output\\"
PATH_TO_CAMBRIA = "C:\\Windows\\Fonts\\CAMBRIA.TTC"
LILYPOND_EXE_LOCATION = r"c:\Program Files (x86)\lilypond\usr\bin\lilypond.exe"
pageNum = 0
pageNumber = str(pageNum)
numOfParts = 1
# generate .ly file in music21
music = converter.parse(MUSIC_XML_DIR + pageNumber + ".xml")
numOfParts = len(music.getElementsByClass(stream.Part))
music.write("lily", MUSIC_LY_DIR + pageNumber + ".ly")
# add styling information to .ly file
outFile = open(MUSIC_LY_DIR + pageNumber + ".ly", "a") # 'a' opens for appending
if numOfParts == 1:
outFile.write(file("ly-one-line.txt","r").read()) # 'r' opens for just reading
else:
outFile.write(file("ly-two-lines.txt","r").read()) # 'r' opens for just reading
outFile.close()
# turn .ly into .pdf
subprocess.call([ #will wait for finish exec
LILYPOND_EXE_LOCATION,
"-o", MUSIC_PDF_DIR,
MUSIC_LY_DIR + pageNumber + ".ly"
])
# merge pages and add page number:
musicLine = PdfFileReader(file(MUSIC_PDF_DIR + pageNumber + ".pdf", "rb"))
page = PdfFileReader(file(PAGE_PDF_DIR + pageNumber + ".pdf", "rb"))
page.getPage(0).mergePage(musicLine.getPage(0))
hexPageNumber = str(hex(pageNum))[2:]
pageNumberPdfCanvas = canvas.Canvas(PAGENUM_PDF_DIR + pageNumber + ".pdf", pagesize=pagesizes.letter)
pdfmetrics.registerFont(TTFont("Cambria", PATH_TO_CAMBRIA))
pageNumberPdfCanvas.setFont("Cambria", 12)
if pageNum != -1: # title page is -1, and we don't want a page number there.
if pageNum % 2 == 0: # even pages are on left, so put text on right
widthOfText = pageNumberPdfCanvas.stringWidth(hexPageNumber, "Cambria", 12)
pageNumberPdfCanvas.drawString(inch * 8.5 - inch * .5 - widthOfText, inch * 11 - inch * .5, hexPageNumber)
else: # put number on left
pageNumberPdfCanvas.drawString(inch * .5, inch * 11 - inch * .5, hexPageNumber)
pageNumberPdfCanvas.showPage()
pageNumberPdfCanvas.save()
pageNumberPdf = PdfFileReader(file(PAGENUM_PDF_DIR + pageNumber + ".pdf", "rb"))
page.getPage(0).mergePage(pageNumberPdf.getPage(0))
output = PdfFileWriter()
output.addPage(page.getPage(0))
outStream = file(OUTPUT_DIR + pageNumber + ".pdf", "wb")
output.write(outStream)
outStream.close() | import subprocess
from music21 import *
from pyPdf import PdfFileReader, PdfFileWriter
from reportlab.pdfgen import canvas
from reportlab.lib import pagesizes
from reportlab.lib.units import inch
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
# some important constants
MUSIC_XML_DIR = "C:\\wamp\\www\\syntaxblitz.github.io\\portfolio\\music-xml\\"
MUSIC_LY_DIR = "C:\\wamp\\www\\syntaxblitz.github.io\\portfolio\\music-ly\\"
MUSIC_PDF_DIR = "C:\\wamp\\www\\syntaxblitz.github.io\\portfolio\\music-pdf\\"
PAGENUM_PDF_DIR = "C:\\wamp\\www\\syntaxblitz.github.io\\portfolio\\pagenum-pdf\\"
PAGE_PDF_DIR = "C:\\wamp\\www\\syntaxblitz.github.io\\portfolio\\page-pdf\\"
OUTPUT_DIR = "C:\\wamp\\www\\syntaxblitz.github.io\\portfolio\\pdf-output\\"
PATH_TO_CAMBRIA = "C:\\Windows\\Fonts\\CAMBRIA.TTC"
LILYPOND_EXE_LOCATION = r"c:\Program Files (x86)\lilypond\usr\bin\lilypond.exe"
pageNum = 0
pageNumber = str(pageNum)
numOfParts = 1
# generate .ly file in music21
music = converter.parse(MUSIC_XML_DIR + pageNumber + ".xml")
numOfParts = len(music.getElementsByClass(stream.Part))
music.write("lily", MUSIC_LY_DIR + pageNumber + ".ly")
# add styling information to .ly file
outFile = open(MUSIC_LY_DIR + pageNumber + ".ly", "a") # 'a' opens for appending
if numOfParts == 1:
outFile.write(file("ly-one-line.txt","r").read()) # 'r' opens for just reading
else:
outFile.write(file("ly-two-lines.txt","r").read()) # 'r' opens for just reading
outFile.close()
# turn .ly into .pdf
subprocess.call([ #will wait for finish exec
LILYPOND_EXE_LOCATION,
"-o", MUSIC_PDF_DIR,
MUSIC_LY_DIR + pageNumber + ".ly"
])
# merge pages and add page number:
musicLine = PdfFileReader(file(MUSIC_PDF_DIR + pageNumber + ".pdf", "rb"))
page = PdfFileReader(file(PAGE_PDF_DIR + pageNumber + ".pdf", "rb"))
page.getPage(0).mergePage(musicLine.getPage(0))
hexPageNumber = str(hex(pageNum))[2:]
pageNumberPdfCanvas = canvas.Canvas(PAGENUM_PDF_DIR + pageNumber + ".pdf", pagesize=pagesizes.letter)
pdfmetrics.registerFont(TTFont("Cambria", PATH_TO_CAMBRIA))
pageNumberPdfCanvas.setFont("Cambria", 12)
if pageNum % 2 == 0: # even pages are on left, so put text on right
widthOfText = pageNumberPdfCanvas.stringWidth(hexPageNumber, "Cambria", 12)
pageNumberPdfCanvas.drawString(inch * 8.5 - inch * .5 - widthOfText, inch * 11 - inch * .5, hexPageNumber)
else: # put number on left
pageNumberPdfCanvas.drawString(inch * .5, inch * 11 - inch * .5, hexPageNumber)
pageNumberPdfCanvas.showPage()
pageNumberPdfCanvas.save()
pageNumberPdf = PdfFileReader(file(PAGENUM_PDF_DIR + pageNumber + ".pdf", "rb"))
page.getPage(0).mergePage(pageNumberPdf.getPage(0))
output = PdfFileWriter()
output.addPage(page.getPage(0))
outStream = file(OUTPUT_DIR + pageNumber + ".pdf", "wb")
output.write(outStream)
outStream.close() | Python | 0 |
f7ce07f6775fd88a0d8e5bf0f980eb22050f2d92 | Update file_regression.py | src/pytest_regressions/file_regression.py | src/pytest_regressions/file_regression.py | # encoding: UTF-8
from functools import partial
import six
from .common import perform_regression_check, check_text_files
class FileRegressionFixture(object):
"""
Implementation of `file_regression` fixture.
"""
def __init__(self, datadir, original_datadir, request):
"""
:type datadir: Path
:type original_datadir: Path
:type request: FixtureRequest
"""
self.request = request
self.datadir = datadir
self.original_datadir = original_datadir
self.force_regen = False
def check(
self,
contents,
encoding=None,
extension=".txt",
newline=None,
basename=None,
fullpath=None,
binary=False,
obtained_filename=None,
check_fn=None,
):
"""
Checks the contents against a previously recorded version, or generate a new file.
:param str contents: content to be verified.
:param str|None encoding: Encoding used to write file, if any.
:param str extension: Extension of file.
:param str|None newline: See `io.open` docs.
:param bool binary: If the file is binary or text.
:param obtained_filename: ..see:: FileRegressionCheck
:param check_fn: a function with signature ``(obtained_filename, expected_filename)`` that should raise
AssertionError if both files differ.
If not given, use internal function which compares text using :py:mod:`difflib`.
"""
__tracebackhide__ = True
if binary and encoding:
raise ValueError(
"Only binary ({!r}) or encoding ({!r}) parameters must be passed at the same time.".format(
binary, encoding
)
)
if binary:
assert isinstance(
contents, six.binary_type
), "Expected bytes contents but received type {}".format(
type(contents).__name__
)
else:
assert isinstance(
contents, six.text_type
), "Expected text/unicode contents but received type {}".format(
type(contents).__name__
)
import io
if check_fn is None:
if binary:
def check_fn(obtained_filename, expected_filename):
if obtained_filename.read_bytes() != expected_filename.read_bytes():
raise AssertionError(
"Binary files {} and {} differ.".format(
obtained_filename, expected_filename
)
)
else:
check_fn = partial(check_text_files, encoding=encoding)
def dump_fn(filename):
mode = "wb" if binary else "w"
with io.open(
six.text_type(filename), mode, encoding=encoding, newline=newline
) as f:
f.write(contents)
perform_regression_check(
datadir=self.datadir,
original_datadir=self.original_datadir,
request=self.request,
check_fn=check_fn,
dump_fn=dump_fn,
extension=extension,
basename=basename,
fullpath=fullpath,
force_regen=self.force_regen,
obtained_filename=obtained_filename,
)
# non-PEP 8 alias used internally at ESSS
Check = check
| # encoding: UTF-8
from functools import partial
import six
from .common import perform_regression_check, check_text_files
class FileRegressionFixture(object):
"""
Implementation of `file_regression` fixture.
"""
def __init__(self, datadir, original_datadir, request):
"""
:type datadir: Path
:type original_datadir: Path
:type request: FixtureRequest
"""
self.request = request
self.datadir = datadir
self.original_datadir = original_datadir
self.force_regen = False
def check(
self,
contents,
encoding=None,
extension=".txt",
newline=None,
basename=None,
fullpath=None,
binary=False,
obtained_filename=None,
check_fn=None,
):
"""
Checks the contents against a previously recorded version, or generate a new file.
:param str contents: contents to write to the file
:param str|None encoding: Encoding used to write file, if any.
:param str extension: Extension of file.
:param str|None newline: See `io.open` docs.
:param bool binary: If the file is binary or text.
:param obtained_filename: ..see:: FileRegressionCheck
:param check_fn: a function with signature ``(obtained_filename, expected_filename)`` that should raise
AssertionError if both files differ.
If not given, use internal function which compares text using :py:mod:`difflib`.
"""
__tracebackhide__ = True
if binary and encoding:
raise ValueError(
"Only binary ({!r}) or encoding ({!r}) parameters must be passed at the same time.".format(
binary, encoding
)
)
if binary:
assert isinstance(
contents, six.binary_type
), "Expected bytes contents but received type {}".format(
type(contents).__name__
)
else:
assert isinstance(
contents, six.text_type
), "Expected text/unicode contents but received type {}".format(
type(contents).__name__
)
import io
if check_fn is None:
if binary:
def check_fn(obtained_filename, expected_filename):
if obtained_filename.read_bytes() != expected_filename.read_bytes():
raise AssertionError(
"Binary files {} and {} differ.".format(
obtained_filename, expected_filename
)
)
else:
check_fn = partial(check_text_files, encoding=encoding)
def dump_fn(filename):
mode = "wb" if binary else "w"
with io.open(
six.text_type(filename), mode, encoding=encoding, newline=newline
) as f:
f.write(contents)
perform_regression_check(
datadir=self.datadir,
original_datadir=self.original_datadir,
request=self.request,
check_fn=check_fn,
dump_fn=dump_fn,
extension=extension,
basename=basename,
fullpath=fullpath,
force_regen=self.force_regen,
obtained_filename=obtained_filename,
)
# non-PEP 8 alias used internally at ESSS
Check = check
| Python | 0.000001 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.