text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from corehq.apps.reports.dont_use.fields import ReportField, ReportSelectField
from corehq.apps.reports.dont_use.fields import SelectFilteredMobileWorkerField
from corehq.apps.fixtures.models import FixtureDataType, FixtureDataItem
from corehq.apps.reports.filters.base import (BaseSingleOptionFilter,
BaseDrilldownOptionFilter)
from corehq.apps.reports.filters.users import LinkedUserFilter
import hsph.const as const
class SiteField(ReportField):
slug = "hsph_site"
domain = 'hsph'
slugs = dict(site="hsph_site",
district="hsph_district",
region="hsph_region")
template = "hsph/fields/sites.html"
def update_context(self):
self.context['sites'] = self.getFacilities(domain=self.domain)
self.context['selected'] = dict(region=self.request.GET.get(self.slugs['region'], ''),
district=self.request.GET.get(self.slugs['district'], ''),
siteNum=self.request.GET.get(self.slugs['site'], ''))
self.context['slugs'] = self.slugs
@classmethod
def getFacilities(cls, domain=None):
cls.domain = domain or cls.domain
facs = dict()
data_type = FixtureDataType.by_domain_tag(cls.domain, 'site').first()
fixtures = FixtureDataItem.by_data_type(cls.domain, data_type.get_id)
for fix in fixtures:
region = fix.fields_without_attributes.get("region_id")
district = fix.fields_without_attributes.get("district_id")
site = fix.fields_without_attributes.get("site_number")
if region not in facs:
facs[region] = dict(name=fix.fields_without_attributes.get("region_name"), districts=dict())
if district not in facs[region]["districts"]:
facs[region]["districts"][district] = dict(name=fix.fields_without_attributes.get("district_name"), sites=dict())
if site not in facs[region]["districts"][district]["sites"]:
facs[region]["districts"][district]["sites"][site] = dict(name=fix.fields_without_attributes.get("site_name"))
return facs
class NameOfFADAField(SelectFilteredMobileWorkerField):
slug = "fada_name"
name = "Name of FADA"
group_names = [const.FADA_GROUP_NAME]
cssId = "fada_name"
show_only_group_option = False
default_option = "All FADAs"
class NameOfFIDAField(SelectFilteredMobileWorkerField):
slug = "fida_name"
name = "Name of FIDA"
group_names = [const.FIDA_GROUP_NAME]
cssId = "fida_name"
show_only_group_option = False
default_option = "All FIDAs"
class NameOfCATIField(SelectFilteredMobileWorkerField):
slug = "cati_name"
name = "Name of CATI"
cssId = "cati_name"
group_names = [const.CATI_GROUP_NAME]
show_only_group_option = False
default_option = "All CATIs"
class NameOfCATITLField(SelectFilteredMobileWorkerField):
slug = "cati_tl_name"
name = "Name of CATI TL"
cssId = "cati_tl_name"
group_names = [const.CATI_TL_GROUP_NAME]
show_only_group_option = False
default_option = "All CATI TLs"
class NameOfCITLField(SelectFilteredMobileWorkerField):
slug = "citl_name"
name = "Name of CITL"
cssId = "citl_name"
group_names = ["CITL"]
class NameOfDCTLField(ReportSelectField):
slug = "dctl_name"
name = "Name of DCTL"
cssId = "dctl_name"
domain = 'hsph'
default_option = "All DCTLs..."
cssClasses = "span3"
def update_params(self):
super(NameOfDCTLField, self).update_params()
self.options = self.get_dctl_list()
@classmethod
def get_dctl_list(cls):
data_type = FixtureDataType.by_domain_tag(cls.domain, 'dctl').first()
data_items = FixtureDataItem.by_data_type(cls.domain, data_type.get_id if data_type else None)
return [dict(text=item.fields_without_attributes.get("name"), val=item.fields_without_attributes.get("id")) for item in data_items]
@classmethod
def get_users_per_dctl(cls):
dctls = dict()
data_type = FixtureDataType.by_domain_tag(cls.domain, 'dctl').first()
data_items = FixtureDataItem.by_data_type(cls.domain, data_type.get_id if data_type else None)
for item in data_items:
dctls[item.fields_without_attributes.get("id")] = item.get_users(wrap=False)
return dctls
class DCTLToFIDAFilter(LinkedUserFilter):
domain = 'hsph'
user_types = ("DCTL", "FIDA")
class AllocatedToFilter(BaseSingleOptionFilter):
slug = "allocated_to"
label = "Allocated To"
cssId = 'allocated_to'
cssClasses = "span2"
options = [
('cati', 'CATI'),
('field', 'Field')
]
default_text = "All"
class SelectReferredInStatusField(ReportSelectField):
slug = "referred_in_status"
name = "Referred In Status"
cssId = "hsph_referred_in_status"
cssClasses = "span3"
options = [dict(val="referred", text="Only Referred In Births")]
default_option = "All Birth Data"
class SelectCaseStatusField(ReportSelectField):
slug = "case_status"
name = "Home Visit Status"
cssId = "hsph_case_status"
cssClasses = "span2"
options = [dict(val="closed", text="CLOSED"),
dict(val="open", text="OPEN")]
default_option = "Select Status..."
class IHForCHFField(ReportSelectField):
slug = "ihf_or_chf"
name = "IHF/CHF"
domain = 'hsph'
cssId = "hsph_ihf_or_chf"
cssClasses = "span2"
options = [dict(val="IHF", text="IHF only"),
dict(val="CHF", text="CHF only")]
default_option = "IHF and CHF"
@classmethod
def _get_facilities(cls, domain=None):
domain = domain or cls.domain
facilities = dict(ihf=[], chf=[])
data_type = FixtureDataType.by_domain_tag(domain, 'site').first()
data_items = FixtureDataItem.by_data_type(domain, data_type.get_id)
for item in data_items:
ihf_chf = item.fields_without_attributes.get("ihf_chf", "").lower()
if ihf_chf == 'ifh': # typo in some test data
ihf_chf = 'ihf'
try:
facilities[ihf_chf].append(item.fields_without_attributes)
except KeyError:
# there's a site fixture item without an IHF/CHF value
pass
return facilities
@classmethod
def get_facilities(cls, domain=None):
domain = domain or cls.domain
return dict([(ihf_chf, map(lambda f: f['site_id'], facilities))
for (ihf_chf, facilities)
in cls._get_facilities(domain).items()])
@classmethod
def get_selected_facilities(cls, site_map, domain=None):
domain = domain or cls.domain
def filter_by_sitefield(facilities):
ret = []
for f in facilities:
region_id = f['region_id']
if region_id not in site_map:
continue
district_id = f['district_id']
districts = site_map[region_id]['districts']
if district_id not in districts:
continue
site_number = f['site_number']
if site_number in districts[district_id]['sites']:
ret.append(f['site_id'])
return ret
return dict([(ihf_chf.lower(), filter_by_sitefield(facilities))
for (ihf_chf, facilities)
in cls._get_facilities(domain).items()])
class FacilityStatusField(ReportSelectField):
slug = "facility_status"
name = "Facility Status"
cssId = "hsph_facility_status"
cssClasses = "span4"
options = [dict(val="-1", text="On Board"),
dict(val="0", text="S.B.R. Deployed"),
dict(val="1", text="Baseline"),
dict(val="2", text="Trial Data")]
default_option = "Select Status..."
class FacilityField(ReportSelectField):
slug = "facility"
domain = 'hsph'
name = "Facility"
cssId = "hsph_facility_name"
default_option = "All Facilities..."
cssClasses = "span3"
def update_params(self):
super(FacilityField, self).update_params()
self.options = self.getFacilities()
@classmethod
def getFacilities(cls, domain=None):
domain = domain or cls.domain
data_type = FixtureDataType.by_domain_tag(domain, 'site').first()
data_items = FixtureDataItem.by_data_type(domain, data_type.get_id)
return [dict(text=item.fields_without_attributes.get("site_name"), val=item.fields_without_attributes.get("site_id")) for item in data_items]
|
{
"content_hash": "3dcec1083adf7b76d9f96028a39c9e0f",
"timestamp": "",
"source": "github",
"line_count": 240,
"max_line_length": 149,
"avg_line_length": 36.020833333333336,
"alnum_prop": 0.621283979178716,
"repo_name": "SEL-Columbia/commcare-hq",
"id": "258bfb35cc2536c683d4cf99863235301880f41f",
"size": "8645",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "custom/_legacy/hsph/fields.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "768322"
},
{
"name": "JavaScript",
"bytes": "2647080"
},
{
"name": "Python",
"bytes": "7806659"
},
{
"name": "Shell",
"bytes": "28569"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('djconnectwise', '0023_auto_20170605_0705'),
]
operations = [
migrations.CreateModel(
name='OpportunityStage',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('created', django_extensions.db.fields.CreationDateTimeField(verbose_name='created', auto_now_add=True)),
('modified', django_extensions.db.fields.ModificationDateTimeField(verbose_name='modified', auto_now=True)),
('name', models.CharField(max_length=50)),
],
options={
'ordering': ('-modified', '-created'),
'get_latest_by': 'modified',
'abstract': False,
},
),
]
|
{
"content_hash": "c1c1b310b0893ff79b4aa0fcdbe1ec22",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 124,
"avg_line_length": 34.857142857142854,
"alnum_prop": 0.5799180327868853,
"repo_name": "AparatTechnologies/django-connectwise",
"id": "b072245d281750fc82a9c3fe09ce530627d542d6",
"size": "1000",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "djconnectwise/migrations/0024_opportunitystage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "944"
},
{
"name": "Python",
"bytes": "220964"
}
],
"symlink_target": ""
}
|
"""Support for Homekit switches."""
from aiohomekit.model.characteristics import (
CharacteristicsTypes,
InUseValues,
IsConfiguredValues,
)
from homeassistant.components.switch import SwitchEntity
from homeassistant.core import callback
from . import KNOWN_DEVICES, HomeKitEntity
OUTLET_IN_USE = "outlet_in_use"
ATTR_IN_USE = "in_use"
ATTR_IS_CONFIGURED = "is_configured"
ATTR_REMAINING_DURATION = "remaining_duration"
class HomeKitSwitch(HomeKitEntity, SwitchEntity):
"""Representation of a Homekit switch."""
def get_characteristic_types(self):
"""Define the homekit characteristics the entity cares about."""
return [CharacteristicsTypes.ON, CharacteristicsTypes.OUTLET_IN_USE]
@property
def is_on(self):
"""Return true if device is on."""
return self.service.value(CharacteristicsTypes.ON)
async def async_turn_on(self, **kwargs):
"""Turn the specified switch on."""
await self.async_put_characteristics({CharacteristicsTypes.ON: True})
async def async_turn_off(self, **kwargs):
"""Turn the specified switch off."""
await self.async_put_characteristics({CharacteristicsTypes.ON: False})
@property
def device_state_attributes(self):
"""Return the optional state attributes."""
outlet_in_use = self.service.value(CharacteristicsTypes.OUTLET_IN_USE)
if outlet_in_use is not None:
return {OUTLET_IN_USE: outlet_in_use}
class HomeKitValve(HomeKitEntity, SwitchEntity):
"""Represents a valve in an irrigation system."""
def get_characteristic_types(self):
"""Define the homekit characteristics the entity cares about."""
return [
CharacteristicsTypes.ACTIVE,
CharacteristicsTypes.IN_USE,
CharacteristicsTypes.IS_CONFIGURED,
CharacteristicsTypes.REMAINING_DURATION,
]
async def async_turn_on(self, **kwargs):
"""Turn the specified valve on."""
await self.async_put_characteristics({CharacteristicsTypes.ACTIVE: True})
async def async_turn_off(self, **kwargs):
"""Turn the specified valve off."""
await self.async_put_characteristics({CharacteristicsTypes.ACTIVE: False})
@property
def icon(self) -> str:
"""Return the icon."""
return "mdi:water"
@property
def is_on(self):
"""Return true if device is on."""
return self.service.value(CharacteristicsTypes.ACTIVE)
@property
def device_state_attributes(self):
"""Return the optional state attributes."""
attrs = {}
in_use = self.service.value(CharacteristicsTypes.IN_USE)
if in_use is not None:
attrs[ATTR_IN_USE] = in_use == InUseValues.IN_USE
is_configured = self.service.value(CharacteristicsTypes.IS_CONFIGURED)
if is_configured is not None:
attrs[ATTR_IS_CONFIGURED] = is_configured == IsConfiguredValues.CONFIGURED
remaining = self.service.value(CharacteristicsTypes.REMAINING_DURATION)
if remaining is not None:
attrs[ATTR_REMAINING_DURATION] = remaining
return attrs
ENTITY_TYPES = {
"switch": HomeKitSwitch,
"outlet": HomeKitSwitch,
"valve": HomeKitValve,
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Homekit switches."""
hkid = config_entry.data["AccessoryPairingID"]
conn = hass.data[KNOWN_DEVICES][hkid]
@callback
def async_add_service(aid, service):
entity_class = ENTITY_TYPES.get(service["stype"])
if not entity_class:
return False
info = {"aid": aid, "iid": service["iid"]}
async_add_entities([entity_class(conn, info)], True)
return True
conn.add_listener(async_add_service)
|
{
"content_hash": "c0fb3679794795538667eb76f0bd8b1b",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 86,
"avg_line_length": 32.08403361344538,
"alnum_prop": 0.6639601885804086,
"repo_name": "balloob/home-assistant",
"id": "3408d036b584936a779bd4c568d641d518e2758b",
"size": "3818",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/homekit_controller/switch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1099"
},
{
"name": "Python",
"bytes": "12903869"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17137"
}
],
"symlink_target": ""
}
|
from __future__ import print_function # Python 2/3 compatibility
"""
Example of script to parcellate mutli-subject data.
May take some time to complete.
Author: Bertrand Thirion, 2005-2009
"""
from os import mkdir, getcwd, path
from numpy import array
from nipy.labs.spatial_models.parcel_io import parcel_input, \
write_parcellation_images, parcellation_based_analysis
from nipy.labs.spatial_models.hierarchical_parcellation import hparcel
# Local import
from get_data_light import DATA_DIR, get_second_level_dataset
# Get the data
nb_subj = 12
subj_id = ['subj_%02d' % s for s in range(nb_subj)]
nbeta = '0029'
data_dir = path.join(DATA_DIR, 'group_t_images')
mask_images = [path.join(data_dir, 'mask_subj%02d.nii' % n)
for n in range(nb_subj)]
learn_images = [path.join(data_dir, 'spmT_%s_subj_%02d.nii' % (nbeta, n))
for n in range(nb_subj)]
missing_file = array(
[not path.exists(m) for m in mask_images + learn_images]).any()
learn_images = [[m] for m in learn_images]
if missing_file:
get_second_level_dataset()
# parameter for the intersection of the mask
ths = .5
# number of parcels
nbparcel = 200
# write directory
write_dir = path.join(getcwd(), 'results')
if not path.exists(write_dir):
mkdir(write_dir)
# prepare the parcel structure
domain, ldata = parcel_input(mask_images, learn_images, ths)
# run the algorithm
fpa = hparcel(domain, ldata, nbparcel, verbose=1)
# produce some output images
write_parcellation_images(fpa, subject_id=subj_id, swd=write_dir)
# do some parcellation-based analysis:
# take some test images whose parcel-based signal needs to be assessed
test_images = [path.join(data_dir, 'spmT_%s_subj_%02d.nii' % (nbeta, n))
for n in range(nb_subj)]
# compute and write the parcel-based statistics
rfx_path = path.join(write_dir, 'prfx_%s.nii' % nbeta)
parcellation_based_analysis(fpa, test_images, 'one_sample', rfx_path=rfx_path)
print("Wrote everything in %s" % write_dir)
|
{
"content_hash": "fba2aa1791bedbd19847f6d5ee52fba6",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 78,
"avg_line_length": 30.106060606060606,
"alnum_prop": 0.7106190236537494,
"repo_name": "arokem/nipy",
"id": "c304554feee53f49423487336dee51b5e044e0fc",
"size": "2123",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/labs/need_data/parcel_multisubj.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1601255"
},
{
"name": "C++",
"bytes": "999"
},
{
"name": "Makefile",
"bytes": "2786"
},
{
"name": "Matlab",
"bytes": "5508"
},
{
"name": "Python",
"bytes": "3047221"
}
],
"symlink_target": ""
}
|
from azure.identity import DefaultAzureCredential
from azure.mgmt.apimanagement import ApiManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-apimanagement
# USAGE
python api_management_list_issues.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = ApiManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.issue.list_by_service(
resource_group_name="rg1",
service_name="apimService1",
)
for item in response:
print(item)
# x-ms-original-file: specification/apimanagement/resource-manager/Microsoft.ApiManagement/stable/2021-08-01/examples/ApiManagementListIssues.json
if __name__ == "__main__":
main()
|
{
"content_hash": "1000fcb7ce8bccb1343a919476f7bbc8",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 146,
"avg_line_length": 32.470588235294116,
"alnum_prop": 0.7273550724637681,
"repo_name": "Azure/azure-sdk-for-python",
"id": "9d69bec09967c999734bc992f6f19e98808ebf19",
"size": "1572",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/apimanagement/azure-mgmt-apimanagement/generated_samples/api_management_list_issues.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
import sys, os, time, random, ctypes
#--------------------------------------------------------------------------------------------------
def BytesString(n):
suffixes = ['B','KB','MB','GB','TB','PB','EB','ZB','YB']
suffix = 0
while n % 1024 == 0 and suffix+1 < len(suffixes):
suffix += 1
n //= 1024
return '{0}{1}'.format(n, suffixes[suffix])
def BytesInt(s):
if all(c in '0123456789' for c in s):
return int(s)
suffixes = ['B','KB','MB','GB','TB','PB','EB','ZB','YB']
for power,suffix in reversed(list(enumerate(suffixes))):
if s.endswith(suffix):
return int(s.rstrip(suffix))*1024**power
raise ValueError('BytesInt requires proper suffix ('+' '.join(suffixes)+').')
def BytesStringFloat(n):
x = float(n)
suffixes = ['B','KB','MB','GB','TB','PB','EB','ZB','YB']
suffix = 0
while x > 1024.0 and suffix+1 < len(suffixes):
suffix += 1
x /= 1024.0
return '{0:0.2f}{1}'.format(x, suffixes[suffix])
#--------------------------------------------------------------------------------------------------
libc = ctypes.CDLL(None, use_errno=True)
def readahead(fileno, offset, count):
libc.readahead(ctypes.c_int(fileno), ctypes.c_longlong(offset), ctypes.c_size_t(count))
#--------------------------------------------------------------------------------------------------
def timeit2(resetfunc, func, mintotal=5.0):
total = 0
times = []
while True:
t1 = time.perf_counter()
x = resetfunc()
t2 = time.perf_counter()
func(x)
t3 = time.perf_counter()
times.append(t3-t2)
total += t3-t1
if total >= mintotal:
return times
def timeit3(resetfunc, func, arguments):
times = []
for arg in arguments:
resetfunc(arg)
for arg in arguments:
t1 = time.perf_counter()
func(arg)
t2 = time.perf_counter()
times.append(t2-t1)
return times
#--------------------------------------------------------------------------------------------------
import functools
print = functools.partial(print, flush=True)
#--------------------------------------------------------------------------------------------------
if len(sys.argv) < 2:
print('Syntax: sudo ./test-disk.py /dev/sda | tee log')
print('Path can also use /dev/disk/by-id/ by-label/ by-path/ by-uuid/')
print('Redirect to a log file is optional.')
sys.exit()
dev = os.path.realpath(sys.argv[1]).split('/')[-1]
disk = open('/dev/%s' % dev, 'rb')
disksize = disk.seek(0, 2)
os.system('echo none | sudo tee /sys/block/%s/queue/scheduler > /dev/null' % dev)
print('Disk name: {0} Disk size: {1} Scheduler disabled.'.format(disk.name, BytesStringFloat(disksize)))
#--------------------------------------------------------------------------------------------------
bufsize = 512
print()
print('Measuring: Seek times when track buffer is working forwards.')
print('Buffer size: 512 bytes')
for areasize in [BytesInt('1MB')*(2**i) for i in range(0,64)] + [disksize]:
if areasize > disksize:
continue
os.system('echo 3 | sudo tee /proc/sys/vm/drop_caches > /dev/null')
def resetfunc():
offset = random.randrange(0, areasize-2*bufsize)
os.pread(disk.fileno(), bufsize, offset)
return offset
def func(offset):
os.pread(disk.fileno(), bufsize, offset+bufsize)
times = timeit2(resetfunc, func)
print('Area size: {:6} Average: {:6.2f} ms Max: {:6.2f} ms'.format(
BytesString(areasize) if areasize < disksize else BytesStringFloat(areasize),
sum(times)/len(times)*1000, max(times)*1000 ))
#--------------------------------------------------------------------------------------------------
bufsize = 512
print()
print('Measuring: Seek times when track buffer is working backwards.')
print('Buffer size: 512 bytes')
for areasize in [BytesInt('1MB')*(2**i) for i in range(0,64)] + [disksize]:
if areasize > disksize:
continue
os.system('echo 3 | sudo tee /proc/sys/vm/drop_caches > /dev/null')
def resetfunc():
offset = random.randrange(bufsize, areasize-bufsize)
os.pread(disk.fileno(), bufsize, offset)
return offset
def func(offset):
os.pread(disk.fileno(), bufsize, offset-bufsize)
times = timeit2(resetfunc, func)
print('Area size: {:6} Average: {:6.2f} ms Max: {:6.2f} ms'.format(
BytesString(areasize) if areasize < disksize else BytesStringFloat(areasize),
sum(times)/len(times)*1000, max(times)*1000 ))
#--------------------------------------------------------------------------------------------------
bufsize = 512
print()
print('Measuring: Seek times of random reads made in sequence (no readahead) over some area.')
print('Buffer size: 512 bytes')
for areasize in [BytesInt('1MB')*(2**i) for i in range(0,64)] + [disksize]:
if areasize > disksize:
continue
os.system('echo 3 | sudo tee /proc/sys/vm/drop_caches > /dev/null')
def resetfunc():
return random.randrange(0, areasize-bufsize)
def func(offset):
os.pread(disk.fileno(), bufsize, offset)
times = timeit2(resetfunc, func)
print('Area size: {:6} Average: {:5.2f} ms Min: {:5.2f} ms Max: {:5.2f} ms'.format(
BytesString(areasize) if areasize < disksize else BytesStringFloat(areasize),
sum(times)/len(times)*1000, min(times)*1000, max(times)*1000))
#--------------------------------------------------------------------------------------------------
bufsize = 512
bufcount = 100
print()
print('Measuring: Seek times of random reads made concurrently (using readahead) over some area.')
print('Buffer count: {0} Buffer size: {1}'.format(bufcount, bufsize))
for area in [BytesInt('1MB')*(2**i) for i in range(0,64)] + [disksize]:
if area > disksize:
continue
os.system('echo 3 | sudo tee /proc/sys/vm/drop_caches > /dev/null')
offsets = [random.randrange(0, area-bufsize) for i in range(bufcount)]
def resetfunc(offset):
readahead(disk.fileno(), offset, bufsize)
def func(offset):
os.pread(disk.fileno(), bufsize, offset)
times = timeit3(resetfunc, func, offsets)
print('Area size: {:6} Average: {:5.2f} ms Min: {:5.2f} ms Max: {:5.2f} ms'.format(
BytesString(area) if area < disksize else BytesStringFloat(area),
sum(times)/len(times)*1000, min(times)*1000, max(times)*1000))
#--------------------------------------------------------------------------------------------------
print()
print('Measuring: Random read throughput with various buffer sizes.')
for bs in ['1MB','2MB','4MB','8MB','16MB','32MB','64MB','128MB','256MB']:
os.system('echo 3 | sudo tee /proc/sys/vm/drop_caches > /dev/null')
bufsize = BytesInt(bs)
def resetfunc():
return random.randrange(0, disksize-bufsize)
def func(offset):
os.pread(disk.fileno(), bufsize, offset)
times = timeit2(resetfunc, func)
print('Buffer size: {:5} Average: {}/sec Samples: {}'.format(
BytesString(bufsize), BytesStringFloat(bufsize/(sum(times)/len(times))).rjust(9), len(times), ))
#--------------------------------------------------------------------------------------------------
print()
print('Measuring: Sequential read throughput with various buffer sizes.')
for bs in ['1MB','2MB','4MB','8MB','16MB','32MB','64MB','128MB','256MB']:
os.system('echo 3 | sudo tee /proc/sys/vm/drop_caches > /dev/null')
bufsize = BytesInt(bs)
bufindex = 0
def resetfunc():
global bufindex
bufindex += 1
return bufindex * bufsize
def func(offset):
os.pread(disk.fileno(), bufsize, offset)
times = timeit2(resetfunc, func)
print('Buffer size: {:5} Average: {}/sec Samples: {}'.format(
BytesString(bufsize), BytesStringFloat(bufsize/(sum(times)/len(times))).rjust(9), len(times), ))
#--------------------------------------------------------------------------------------------------
os.system('echo mq-deadline | sudo tee /sys/block/%s/queue/scheduler > /dev/null' % dev)
print()
print('Returned disk scheduler to mq-deadline.')
|
{
"content_hash": "eae47b47533265362ea9ab98e611e5d4",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 106,
"avg_line_length": 33.8477366255144,
"alnum_prop": 0.5324012158054712,
"repo_name": "arekbulski/Cameleonica",
"id": "ef6d61940dee773ec35a5be54e57390b34efbf1b",
"size": "8244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/measuring disk performance/test-disk.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "796"
},
{
"name": "Python",
"bytes": "23562"
},
{
"name": "Shell",
"bytes": "116"
}
],
"symlink_target": ""
}
|
import sys
from workflow import Workflow
from lipsum import lipsum
def main(wf):
lipsum(wf, "bytes")
if __name__ == u"__main__":
wf = Workflow()
sys.exit(wf.run(main))
|
{
"content_hash": "a7e6b046ea2b1efee7f5aa319ea08177",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 29,
"avg_line_length": 16.636363636363637,
"alnum_prop": 0.6338797814207651,
"repo_name": "lipsumio/alfred-lipsum",
"id": "afe49b771d51855467f945f9608ac9ebb2b461cc",
"size": "202",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lipsum-byte.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "127188"
}
],
"symlink_target": ""
}
|
import argparse
import json
import os
import sys
_CWD = os.getcwd()
GRD_BEGIN_TEMPLATE = '<?xml version="1.0" encoding="UTF-8"?>\n'\
'<grit latest_public_release="0" current_release="1" '\
'output_all_resource_defines="false">\n'\
' <outputs>\n'\
' <output filename="{out_dir}/{prefix}_resources.h" '\
'type="rc_header">\n'\
' <emit emit_type=\'prepend\'></emit>\n'\
' </output>\n'\
' <output filename="{out_dir}/{prefix}_resources_map.cc"\n'\
' type="resource_file_map_source" />\n'\
' <output filename="{out_dir}/{prefix}_resources_map.h"\n'\
' type="resource_map_header" />\n'\
' <output filename="{prefix}_resources.pak" '\
'type="data_package" />\n'\
' </outputs>\n'\
' <release seq="1">\n'\
' <includes>\n'
GRD_INCLUDE_TEMPLATE = ' <include name="{name}" ' \
'file="{file}" resource_path="{path}" ' \
'use_base_dir="false" type="{type}" />\n'
GRD_INCLUDE_TEMPLATE_PP = ' <include name="{name}" ' \
'file="{file}" resource_path="{path}" ' \
'use_base_dir="false" preprocess="true" '\
'type="{type}" />\n'
GRD_END_TEMPLATE = ' </includes>\n'\
' </release>\n'\
'</grit>\n'
GRDP_BEGIN_TEMPLATE = '<?xml version="1.0" encoding="UTF-8"?>\n'\
'<grit-part>\n'
GRDP_END_TEMPLATE = '</grit-part>\n'
# Generates an <include .... /> row for the given file.
def _generate_include_row(grd_prefix, filename, pathname, \
resource_path_rewrites, resource_path_prefix):
name_suffix = filename.upper().replace('/', '_').replace('.', '_'). \
replace('-', '_').replace('@', '_AT_')
name = 'IDR_%s_%s' % (grd_prefix.upper(), name_suffix)
extension = os.path.splitext(filename)[1]
type = 'chrome_html' if extension == '.html' or extension == '.js' \
else 'BINDATA'
resource_path = resource_path_rewrites[filename] \
if filename in resource_path_rewrites else filename
if resource_path_prefix != None:
resource_path = resource_path_prefix + '/' + resource_path
# This is a temporary workaround, since Polymer 2 shared resource files are
# not preprocessed.
# TODO(rbpotter): Remove this once OS Settings has been migrated to Polymer 3.
if ('vulcanized' in pathname or 'crisper' in pathname):
return GRD_INCLUDE_TEMPLATE_PP.format(
file=pathname,
path=resource_path,
name=name,
type=type)
return GRD_INCLUDE_TEMPLATE.format(
file=pathname,
path=resource_path,
name=name,
type=type)
def _generate_part_row(filename):
return ' <part file="%s" />\n' % filename
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--manifest-files', nargs="*")
parser.add_argument('--out-grd', required=True)
parser.add_argument('--grd-prefix', required=True)
parser.add_argument('--root-gen-dir', required=True)
parser.add_argument('--input-files', nargs="*")
parser.add_argument('--input-files-base-dir')
parser.add_argument('--output-files-base-dir', default='grit')
parser.add_argument('--grdp-files', nargs="*")
parser.add_argument('--resource-path-rewrites', nargs="*")
parser.add_argument('--resource-path-prefix')
args = parser.parse_args(argv)
grd_path = os.path.normpath(os.path.join(_CWD, args.out_grd))
with open(grd_path, 'w', newline='') as grd_file:
begin_template = GRDP_BEGIN_TEMPLATE if args.out_grd.endswith('.grdp') else \
GRD_BEGIN_TEMPLATE
grd_file.write(begin_template.format(prefix=args.grd_prefix,
out_dir=args.output_files_base_dir))
if args.grdp_files != None:
for grdp_file in args.grdp_files:
grdp_path = os.path.relpath(grdp_file, os.path.dirname(args.out_grd)).replace('\\', '/')
grd_file.write(_generate_part_row(grdp_path))
resource_path_rewrites = {}
if args.resource_path_rewrites != None:
for r in args.resource_path_rewrites:
[original, rewrite] = r.split("|")
resource_path_rewrites[original] = rewrite
if args.input_files != None:
assert(args.input_files_base_dir)
args.input_files_base_dir = args.input_files_base_dir.replace('\\', '/')
args.root_gen_dir = args.root_gen_dir.replace('\\', '/')
# Detect whether the input files reside under $root_src_dir or
# $root_gen_dir.
base_dir = os.path.join('${root_src_dir}', args.input_files_base_dir)
if args.input_files_base_dir.startswith(args.root_gen_dir + '/'):
base_dir = args.input_files_base_dir.replace(
args.root_gen_dir + '/', '${root_gen_dir}/')
for filename in args.input_files:
filepath = os.path.join(base_dir, filename).replace('\\', '/')
grd_file.write(_generate_include_row(
args.grd_prefix, filename, filepath,
resource_path_rewrites, args.resource_path_prefix))
if args.manifest_files != None:
for manifest_file in args.manifest_files:
manifest_path = os.path.normpath(os.path.join(_CWD, manifest_file))
with open(manifest_path, 'r') as f:
data = json.load(f)
base_dir= os.path.normpath(os.path.join(_CWD, data['base_dir']))
for filename in data['files']:
filepath = os.path.join(base_dir, filename)
rebased_path = os.path.relpath(filepath, args.root_gen_dir).replace('\\', '/')
grd_file.write(_generate_include_row(
args.grd_prefix, filename, '${root_gen_dir}/' + rebased_path,
resource_path_rewrites, args.resource_path_prefix))
end_template = GRDP_END_TEMPLATE if args.out_grd.endswith('.grdp') else \
GRD_END_TEMPLATE
grd_file.write(end_template)
return
if __name__ == '__main__':
main(sys.argv[1:])
|
{
"content_hash": "b990e128ca4b6193acbaf7c28c3ed84e",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 96,
"avg_line_length": 41.630872483221474,
"alnum_prop": 0.570530388521683,
"repo_name": "nwjs/chromium.src",
"id": "3f3476d26040d4a81e904a0112f1fc8a0c813303",
"size": "7943",
"binary": false,
"copies": "6",
"ref": "refs/heads/nw70",
"path": "ui/webui/resources/tools/generate_grd.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""Test a Fast R-CNN network on an imdb (image database)."""
from fast_rcnn.config import cfg, get_output_dir
from fast_rcnn.bbox_transform import clip_boxes, bbox_transform_inv
import argparse
from utils.timer import Timer
import numpy as np
import cv2
import caffe
from fast_rcnn.nms_wrapper import nms
import cPickle
from utils.blob import im_list_to_blob
import os
from objectness.map import HeatMap
from objectness.utils import semantic_segment_image
def _get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def _get_rois_blob(im_rois, im_scale_factors):
"""Converts RoIs into network inputs.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
im_scale_factors (list): scale factors as returned by _get_image_blob
Returns:
blob (ndarray): R x 5 matrix of RoIs in the image pyramid
"""
rois, levels = _project_im_rois(im_rois, im_scale_factors)
rois_blob = np.hstack((levels, rois))
return rois_blob.astype(np.float32, copy=False)
def _project_im_rois(im_rois, scales):
"""Project image RoIs into the image pyramid built by _get_image_blob.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
scales (list): scale factors as returned by _get_image_blob
Returns:
rois (ndarray): R x 4 matrix of projected RoI coordinates
levels (list): image pyramid levels used by each projected RoI
"""
im_rois = im_rois.astype(np.float, copy=False)
if len(scales) > 1:
widths = im_rois[:, 2] - im_rois[:, 0] + 1
heights = im_rois[:, 3] - im_rois[:, 1] + 1
areas = widths * heights
scaled_areas = areas[:, np.newaxis] * (scales[np.newaxis, :] ** 2)
diff_areas = np.abs(scaled_areas - 224 * 224)
levels = diff_areas.argmin(axis=1)[:, np.newaxis]
else:
levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)
rois = im_rois * scales[levels]
return rois, levels
def _get_blobs(im, rois):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {'data' : None, 'rois' : None}
blobs['data'], im_scale_factors = _get_image_blob(im)
if not cfg.TEST.HAS_RPN:
blobs['rois'] = _get_rois_blob(rois, im_scale_factors)
return blobs, im_scale_factors
def im_detect(net, im, boxes=None):
"""Detect object classes in an image given object proposals.
Arguments:
net (caffe.Net): Fast R-CNN network to use
im (ndarray): color image to test (in BGR order)
boxes (ndarray): R x 4 array of object proposals or None (for RPN)
Returns:
scores (ndarray): R x K array of object class scores (K includes
background as object category 0)
boxes (ndarray): R x (4*K) array of predicted bounding boxes
"""
blobs, im_scales = _get_blobs(im, boxes)
# When mapping from image ROIs to feature map ROIs, there's some aliasing
# (some distinct image ROIs get mapped to the same feature ROI).
# Here, we identify duplicate feature ROIs, so we only compute features
# on the unique subset.
if cfg.DEDUP_BOXES > 0 and not cfg.TEST.HAS_RPN:
v = np.array([1, 1e3, 1e6, 1e9, 1e12])
hashes = np.round(blobs['rois'] * cfg.DEDUP_BOXES).dot(v)
_, index, inv_index = np.unique(hashes, return_index=True,
return_inverse=True)
blobs['rois'] = blobs['rois'][index, :]
boxes = boxes[index, :]
if cfg.TEST.HAS_RPN:
im_blob = blobs['data']
blobs['im_info'] = np.array(
[[im_blob.shape[2], im_blob.shape[3], im_scales[0]]],
dtype=np.float32)
# reshape network inputs
net.blobs['data'].reshape(*(blobs['data'].shape))
if cfg.TEST.HAS_RPN:
net.blobs['im_info'].reshape(*(blobs['im_info'].shape))
else:
net.blobs['rois'].reshape(*(blobs['rois'].shape))
# do forward
forward_kwargs = {'data': blobs['data'].astype(np.float32, copy=False)}
if cfg.TEST.HAS_RPN:
forward_kwargs['im_info'] = blobs['im_info'].astype(np.float32, copy=False)
else:
forward_kwargs['rois'] = blobs['rois'].astype(np.float32, copy=False)
blobs_out = net.forward(**forward_kwargs)
if cfg.TEST.HAS_RPN:
assert len(im_scales) == 1, "Only single-image batch implemented"
rois = net.blobs['rois'].data.copy()
# unscale back to raw image space
boxes = rois[:, 1:5] / im_scales[0]
if cfg.TEST.SVM:
# use the raw scores before softmax under the assumption they
# were trained as linear SVMs
scores = net.blobs['cls_score'].data
else:
# use softmax estimated probabilities
scores = blobs_out['cls_prob']
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = blobs_out['bbox_pred']
pred_boxes = bbox_transform_inv(boxes, box_deltas)
pred_boxes = clip_boxes(pred_boxes, im.shape)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
if cfg.DEDUP_BOXES > 0 and not cfg.TEST.HAS_RPN:
# Map scores and predictions back to the original set of boxes
scores = scores[inv_index, :]
pred_boxes = pred_boxes[inv_index, :]
return scores, pred_boxes
def vis_detections(im, class_name, dets, thresh=0.3):
"""Visual debugging of detections."""
import matplotlib.pyplot as plt
im = im[:, :, (2, 1, 0)]
for i in xrange(np.minimum(10, dets.shape[0])):
bbox = dets[i, :4]
score = dets[i, -1]
if score > thresh:
plt.cla()
plt.imshow(im)
plt.gca().add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='g', linewidth=3)
)
plt.title('{} {:.3f}'.format(class_name, score))
plt.show()
def apply_nms(all_boxes, thresh):
"""Apply non-maximum suppression to all predicted boxes output by the
test_net method.
"""
num_classes = len(all_boxes)
num_images = len(all_boxes[0])
nms_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(num_classes)]
for cls_ind in xrange(num_classes):
for im_ind in xrange(num_images):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
# CPU NMS is much faster than GPU NMS when the number of boxes
# is relative small (e.g., < 10k)
# TODO(rbg): autotune NMS dispatch
keep = nms(dets, thresh, force_cpu=True)
if len(keep) == 0:
continue
nms_boxes[cls_ind][im_ind] = dets[keep, :].copy()
return nms_boxes
def test_net(net, imdb, max_per_image=400, thresh=-np.inf, vis=False, refine=False):
"""Test a Fast R-CNN network on an image database."""
num_images = len(imdb.image_index)
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_classes)]
output_dir = get_output_dir(imdb, net)
# Heatmap image
hm = HeatMap()
# timers
_t = {'im_detect' : Timer(), 'misc' : Timer()}
if not cfg.TEST.HAS_RPN:
roidb = imdb.roidb
det_file = os.path.join(output_dir, 'detections.pkl')
# if not os.path.exists(det_file):
if True: # Forcing recomputation.
for i in xrange(num_images):
# filter out any ground truth boxes
if cfg.TEST.HAS_RPN:
box_proposals = None
else:
# The roidb may contain ground-truth rois (for example, if the roidb
# comes from the training or val split). We only want to evaluate
# detection on the *non*-ground-truth rois. We select those the rois
# that have the gt_classes field set to 0, which means there's no
# ground truth.
box_proposals = roidb[i]['boxes'][roidb[i]['gt_classes'] == 0]
im = cv2.imread(imdb.image_path_at(i))
_t['im_detect'].tic()
scores, boxes = im_detect(net, im, box_proposals)
_t['im_detect'].toc()
_t['misc'].tic()
# skip j = 0, because it's the background class
skip_counter = 0
add_counter = 0
for j in xrange(1, imdb.num_classes):
inds = np.where(scores[:, j] > thresh)[0]
cls_scores = scores[inds, j]
if cfg.TEST.AGNOSTIC:
cls_boxes = boxes[inds, 4:8]
else:
cls_boxes = boxes[inds, j*4:(j+1)*4]
cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
.astype(np.float32, copy=False)
keep = nms(cls_dets, cfg.TEST.NMS)
cls_dets = cls_dets[keep, :]
if vis:
vis_detections(im, imdb.classes[j], cls_dets)
if refine:
print "cls_dets.shape before", cls_dets.shape
for index in range(0, len(cls_dets)):
bbox = cls_dets[index, :4]
patch = im[int(bbox[1]):int(bbox[3]), int(bbox[0]):int(bbox[2])]
_, iou, obj_score = semantic_segment_image(hm, patch, 'red')
if obj_score > .1 and iou > .2:
add_counter += 1
else:
skip_counter += 1
np.delete(cls_dets, index)
print "cls_dets.shape after", cls_dets.shape
all_boxes[j][i] = cls_dets
# print 'Eleminated ', skip_counter, '. Retained ', add_counter, ' elements.'
# print 'Shape of all_boxes: ', np.array(all_boxes).shape
# Limit to max_per_image detections *over all classes*
if max_per_image > 0:
image_scores = np.hstack([all_boxes[j][i][:, -1]
for j in xrange(1, imdb.num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in xrange(1, imdb.num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
_t['misc'].toc()
print 'im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
.format(i + 1, num_images, _t['im_detect'].average_time,
_t['misc'].average_time)
with open(det_file, 'wb') as f:
cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)
else:
with open(det_file, 'rb') as fid:
all_boxes = cPickle.load(fid)
print 'Detections loaded from {}'.format(det_file)
print 'Evaluating detections'
imdb.evaluate_detections(all_boxes, output_dir)
|
{
"content_hash": "1cef1ebf259e84abe68739f265c4382b",
"timestamp": "",
"source": "github",
"line_count": 322,
"max_line_length": 89,
"avg_line_length": 38.58385093167702,
"alnum_prop": 0.5643915003219575,
"repo_name": "JosephKJ/SDD-RFCN-python",
"id": "074ba9f0cfaf4a0d2bc2b7f0e5fa7b5bf02eda63",
"size": "12673",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/fast_rcnn/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9632"
},
{
"name": "C++",
"bytes": "146"
},
{
"name": "Cuda",
"bytes": "5064"
},
{
"name": "Makefile",
"bytes": "56"
},
{
"name": "Matlab",
"bytes": "1821"
},
{
"name": "Python",
"bytes": "261900"
},
{
"name": "Shell",
"bytes": "12904"
}
],
"symlink_target": ""
}
|
import numpy as np
from optparse import OptionParser
from pylearn2.models.independent_multiclass_logistic import IndependentMulticlassLogistic
from galatea.s3c.feature_loading import get_features
from pylearn2.utils import serial
from pylearn2.datasets.cifar10 import CIFAR10
from pylearn2.datasets.cifar100 import CIFAR100
import gc
gc.collect()
def train(fold_train_X, fold_train_y, C):
model = IndependentMulticlassLogistic(C).fit(fold_train_X, fold_train_y)
gc.collect()
return model
def get_labels_and_fold_indices(cifar10, cifar100, stl10):
assert stl10 or cifar10 or cifar100
assert stl10+cifar10+cifar100 == 1
if stl10:
print 'loading entire stl-10 train set just to get the labels and folds'
stl10 = serial.load("${PYLEARN2_DATA_PATH}/stl10/stl10_32x32/train.pkl")
train_y = stl10.y
fold_indices = stl10.fold_indices
elif cifar10 or cifar100:
if cifar10:
print 'loading entire cifar10 train set just to get the labels'
cifar = CIFAR10(which_set = 'train')
else:
assert cifar100
print 'loading entire cifar100 train set just to get the labels'
cifar = CIFAR100(which_set = 'train')
cifar.y = cifar.y_fine
train_y = cifar.y
assert train_y is not None
fold_indices = np.zeros((5,40000),dtype='uint16')
idx_list = np.cast['uint16'](np.arange(1,50001)) #mimic matlab format of stl10
for i in xrange(5):
mask = idx_list < i * 10000 + 1
mask += idx_list >= (i+1) * 10000 + 1
fold_indices[i,:] = idx_list[mask]
assert fold_indices.min() == 1
assert fold_indices.max() == 50000
return train_y, fold_indices
def main(train_path,
out_path,
dataset,
standardize,
C,
**kwargs):
stl10 = dataset == 'stl10'
cifar10 = dataset == 'cifar10'
cifar100 = dataset == 'cifar100'
assert stl10 + cifar10 + cifar100 == 1
print 'getting labels and oflds'
train_y, fold_indices = get_labels_and_fold_indices(cifar10, cifar100, stl10)
gc.collect()
assert train_y is not None
print 'loading training features'
train_X = get_features(train_path, split = False, standardize = standardize)
assert str(train_X.dtype) == 'float32'
if stl10:
assert train_X.shape[0] == 5000
if cifar10 or cifar100:
assert train_X.shape[0] == 50000
assert train_y.shape == (50000,)
print 'training model'
model = train(train_X, train_y, C)
print 'saving model'
serial.save(out_path, model)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-d", "--train",
action="store", type="string", dest="train")
parser.add_option("-o", "--out",
action="store", type="string", dest="out")
parser.add_option('-C', type='float', dest='C', action='store', default = None)
parser.add_option('--dataset', type='string', dest = 'dataset', action='store', default = None)
parser.add_option('--standardize',action="store_true", dest="standardize", default=False)
parser.add_option('--fold', action='store', type='int', dest='fold', default = None)
#(options, args) = parser.parse_args()
#assert options.dataset is not None
#assert options.C is not None
#assert options.out is not None
#assert options.fold is not None
#log = open(options.out+'.log.txt','w')
#log.write('log file started succesfully\n')
#log.flush()
print 'parsed the args'
main(train_path='features.npy',
out_path = 'final_model.pkl',
C = .01,
dataset = 'cifar100',
standardize = False,
#fold = options.fold,
#log = log
)
#log.close()
|
{
"content_hash": "20dfd1097976b21b93058fa50f0bc7dc",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 99,
"avg_line_length": 31.33606557377049,
"alnum_prop": 0.6215014386607376,
"repo_name": "shiquanwang/pylearn2",
"id": "1de75bac5dd913ae93f388cdf1808f32495b941e",
"size": "3823",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "pylearn2/scripts/papers/jia_huang_wkshp_11/fit_final_model.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "53316"
},
{
"name": "C++",
"bytes": "46935"
},
{
"name": "CSS",
"bytes": "10655"
},
{
"name": "Cuda",
"bytes": "1267472"
},
{
"name": "Objective-C",
"bytes": "953"
},
{
"name": "Python",
"bytes": "3452538"
},
{
"name": "Shell",
"bytes": "4195"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
class SessionStorage(object):
def get(self, key):
raise NotImplementedError()
def set(self, key, value, ttl=None):
raise NotImplementedError()
def delete(self, key):
raise NotImplementedError()
def __getitem__(self, key):
self.get(key)
def __setitem__(self, key, value):
self.set(key, value)
def __delitem__(self, key):
self.delete(key)
|
{
"content_hash": "8c4daf338a45fe72b87f0ceefecd6ba2",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 56,
"avg_line_length": 21.545454545454547,
"alnum_prop": 0.6118143459915611,
"repo_name": "Luckyseal/wechatpy",
"id": "5a6fd74c9b79c64120158ff5d920d57340ce50a2",
"size": "498",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "wechatpy/session/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "10639"
},
{
"name": "Python",
"bytes": "356111"
},
{
"name": "Shell",
"bytes": "70"
}
],
"symlink_target": ""
}
|
"""
created_by: Aninda Manocha
created_date: 3/5/2015
last_modified_by: Keshav Patel
last_modified_date: 3/6/2015
"""
# imports
import constants
import utils
import json
from sql.user import User
from sql.question import Question
from sql.session import Session
from sql.test_case import Test_Case
#Format of test case -AM
#requestType: addTestCase
#question: question id integer
#weight: integer
#content: string
def iChooseU(form):
thisUser = utils.findUser(form)
question = form["question"]
theQuestion = Question.get(question)[0]
weight = form["weight"]
content = form["content"]
newTestCase = Test_Case.noID(None, thisUser, theQuestion, weight, content, ACTIVE)
newTestCase.add()
return utils.successJson(form)
|
{
"content_hash": "00357047a87c80c931b4f301bc7133c6",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 86,
"avg_line_length": 23.529411764705884,
"alnum_prop": 0.695,
"repo_name": "NCSSM-CS/CSAssess",
"id": "76d3ad85d0c743b1e9bc479ea91b2565ee4c8040",
"size": "826",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "controller/addTestCase.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11074"
},
{
"name": "HTML",
"bytes": "602701"
},
{
"name": "JavaScript",
"bytes": "308272"
},
{
"name": "Python",
"bytes": "107267"
},
{
"name": "Shell",
"bytes": "291"
}
],
"symlink_target": ""
}
|
"""Github repo wrapper."""
from __future__ import print_function
# Standard library imports
import datetime
import sys
import time
# Local imports
from loghub.external.github import ApiError, ApiNotFoundError, GitHub
class GitHubRepo(object):
"""Github repository wrapper."""
def __init__(self, username=None, password=None, token=None, repo=None):
"""Github repository wrapper."""
self._username = username
self._password = password
self._token = token
self.gh = GitHub(
username=username,
password=password,
access_token=token, )
repo_organization, repo_name = repo.split('/')
self._repo_organization = repo_organization
self._repo_name = repo_name
self.repo = self.gh.repos(repo_organization)(repo_name)
# Check username and repo name
self._check_user()
self._check_repo_name()
def _check_user(self):
"""Check if the supplied username is valid."""
try:
self.gh.users(self._repo_organization).get()
except ApiNotFoundError:
print('LOGHUB: Organization/user `{}` seems to be '
'invalid.\n'.format(self._repo_organization))
sys.exit(1)
except ApiError:
self._check_rate()
print('LOGHUB: The credentials seems to be invalid!\n')
sys.exit(1)
def _check_repo_name(self):
"""Check if the supplied repository exists."""
try:
self.repo.get()
except ApiNotFoundError:
print('LOGHUB: Repository `{0}` for organization/username `{1}` '
'seems to be invalid.\n'.format(self._repo_name,
self._repo_organization))
sys.exit(1)
except ApiError:
self._check_rate()
def _check_rate(self):
"""Check and handle if api rate limit has been exceeded."""
if self.gh.x_ratelimit_remaining == 0:
reset_struct = time.gmtime(self.gh.x_ratelimit_reset)
reset_format = time.strftime('%Y/%m/%d %H:%M', reset_struct)
print('LOGHUB: GitHub API rate limit exceeded!')
print('LOGHUB: GitHub API rate limit resets on '
'{}'.format(reset_format))
if not self._username and not self._password or not self._token:
print('LOGHUB: Try running loghub with user/password or '
'a valid token.\n')
sys.exit(1)
def _filter_milestone(self, issues, milestone):
"""Filter out all issues in milestone."""
if milestone:
for issue in issues[:]:
milestone_data = issue.get('milestone', {})
if milestone_data:
issue_milestone_title = milestone_data.get('title')
else:
issue_milestone_title = ''
if issue_milestone_title != milestone:
issues.remove(issue)
return issues
def _filter_since(self, issues, since):
"""Filter out all issues before `since` date."""
if since:
since_date = self.str_to_date(since)
for issue in issues[:]:
close_date = self.str_to_date(issue['closed_at'])
if close_date < since_date and issue in issues:
issues.remove(issue)
return issues
def _filter_until(self, issues, until):
"""Filter out all issues after `until` date."""
if until:
until_date = self.str_to_date(until)
for issue in issues[:]:
close_date = self.str_to_date(issue['closed_at'])
if close_date > until_date and issue in issues:
issues.remove(issue)
return issues
def _filter_by_branch(self, issues, issue, branch):
"""Filter prs by the branch they were merged into."""
number = issue['number']
if not self.is_merged(number) and issue in issues:
issues.remove(issue)
if branch:
# Get PR info and get base branch
pr_data = self.pr(number)
base_ref = pr_data['base']['ref']
if base_ref != branch and issue in issues:
issues.remove(issue)
return issues
def _filer_closed_prs(self, issues, branch):
"""Filter out closed PRs."""
for issue in issues[:]:
pr = issue.get('pull_request', '')
# Add label names inside additional key
issue['loghub_label_names'] = [
l['name'] for l in issue.get('labels')
]
if pr:
issues = self._filter_by_branch(issues, issue, branch)
return issues
def tags(self):
"""Return all tags."""
self._check_rate()
return self.repo('git')('refs')('tags').get()
def tag(self, tag_name):
"""Get tag information."""
self._check_rate()
refs = self.repo('git')('refs')('tags').get()
sha = -1
tags = []
for ref in refs:
ref_name = 'refs/tags/{tag}'.format(tag=tag_name)
if 'object' in ref and ref['ref'] == ref_name:
sha = ref['object']['sha']
tags.append(ref['ref'].split('/')[-1])
if sha == -1:
print("LOGHUB: You didn't pass a valid tag name!")
print('LOGHUB: The available tags are: {0}\n'.format(tags))
sys.exit(1)
try:
tag = self.repo('git')('tags')(sha).get()
except ApiNotFoundError:
print("\nLOGHUB: The Git tags API only supports annotated tag "
"objects, not lightweight tags.\n")
sys.exit(1)
return tag
def labels(self):
"""Return labels for the repo."""
self._check_rate()
return self.repo.labels.get()
def set_labels(self, labels):
"""Return labels for the repo."""
self._check_rate()
for label in labels:
new_name = label['new_name']
old_name = label['old_name']
color = label['color']
try:
self.repo.labels(old_name).patch(name=new_name, color=color)
print('Updated label: "{0}" -> "{1}" (#{2})'.format(
old_name, new_name, color))
except ApiError:
try:
self.repo.labels.post(name=new_name, color=color)
print('Created label: "{0}" (#{1})'.format(new_name,
color))
except ApiError:
print('\nLabel "{0}" already exists!'.format(new_name))
def milestones(self):
"""Return all milestones."""
self._check_rate()
return self.repo.milestones.get(state='all', direction='desc')
def milestone(self, milestone_title):
"""Return milestone with given title."""
self._check_rate()
milestones = self.milestones()
milestone_number = -1
milestone_titles = [milestone['title'] for milestone in milestones]
for milestone in milestones:
if milestone['title'] == milestone_title:
milestone_number = milestone['number']
break
if milestone_number == -1:
print("LOGHUB: You didn't pass a valid milestone name!")
print('LOGHUB: The available milestones are: {0}\n'
''.format(milestone_titles))
sys.exit(1)
return milestone
def pr(self, pr_number):
"""Get PR information."""
self._check_rate()
return self.repo('pulls')(str(pr_number)).get()
def issue(self, issue_number):
"""Get a specific issue number from repo."""
# /repos/:owner/:repo/issues/:issue_number
self._check_rate()
return self.repo.issues(str(issue_number)).get()
def issues(self,
milestone=None,
state=None,
assignee=None,
creator=None,
mentioned=None,
labels=None,
sort=None,
direction=None,
since=None,
until=None,
branch=None,
cache=False,
base_issues=None):
"""Return Issues and Pull Requests."""
self._check_rate()
page = 1
if not base_issues:
milestone_number = None
if milestone:
milestone_data = self.milestone(milestone)
milestone_number = milestone_data.get('number')
issues = []
while True:
result = self.repo.issues.get(page=page,
per_page=100,
milestone=milestone_number,
state=state,
assignee=assignee,
creator=creator,
mentioned=mentioned,
labels=labels,
sort=sort,
direction=direction,
since=since)
if len(result) > 0:
issues += result
page = page + 1
else:
break
else:
issues = base_issues
# If since was provided, filter the issue
issues = self._filter_since(issues, since)
# If until was provided, filter the issue
issues = self._filter_until(issues, until)
# If milestone was provided, filter the issue
issues = self._filter_milestone(issues, milestone)
# If it is a pr check if it is merged or closed, removed closed ones
issues = self._filer_closed_prs(issues, branch)
return issues
def is_merged(self, pr):
"""
Return wether a PR was merged, or if it was closed and discarded.
https://developer.github.com/v3/pulls/#get-if-a-pull-request-has-been-merged
"""
self._check_rate()
merged = True
try:
self.repo('pulls')(str(pr))('merge').get()
except Exception:
merged = False
return merged
@staticmethod
def str_to_date(string):
"""Convert ISO date string to datetime object."""
parts = string.split('T')
date_parts = parts[0]
time_parts = parts[1][:-1]
year, month, day = [int(i) for i in date_parts.split('-')]
hour, minutes, seconds = [int(i) for i in time_parts.split(':')]
return datetime.datetime(year, month, day, hour, minutes, seconds)
|
{
"content_hash": "1465ea2c9747f2f5239163c8272247b7",
"timestamp": "",
"source": "github",
"line_count": 312,
"max_line_length": 84,
"avg_line_length": 35.1474358974359,
"alnum_prop": 0.5110341054167427,
"repo_name": "spyder-ide/loghub",
"id": "18d2ba528eb3d79d02f3a13afa8219c93c76dff2",
"size": "11274",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "loghub/core/repo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "82198"
}
],
"symlink_target": ""
}
|
import datetime as dt
# from flask_login import UserMixin
from skeleton.extensions import bcrypt
from skeleton.database import (
Column,
db,
Model,
ReferenceCol,
relationship,
SurrogatePK,
)
from itsdangerous import (TimedJSONWebSignatureSerializer
as Serializer, BadSignature, SignatureExpired)
import os
class Role(SurrogatePK, Model):
__tablename__ = 'roles'
name = Column(db.String(80), unique=True, nullable=False)
user_id = ReferenceCol('users', nullable=True)
user = relationship('User', backref='roles')
def __init__(self, name, **kwargs):
db.Model.__init__(self, name=name, **kwargs)
def __repr__(self):
return '<Role({name})>'.format(name=self.name)
class User(SurrogatePK, Model):
__tablename__ = 'users'
username = Column(db.String(80), unique=True, nullable=False)
email = Column(db.String(80), unique=True, nullable=False)
password = Column(db.String(128), nullable=True)
created_at = Column(db.DateTime, nullable=False, default=dt.datetime.utcnow)
first_name = Column(db.String(30), nullable=True)
last_name = Column(db.String(30), nullable=True)
active = Column(db.Boolean(), default=False)
is_admin = Column(db.Boolean(), default=False)
def __init__(self, username, email, password=None, **kwargs):
db.Model.__init__(self, username=username, email=email, **kwargs)
if password:
self.set_password(password)
else:
self.password = None
def set_password(self, password):
self.password = bcrypt.generate_password_hash(password)
def check_password(self, value):
return bcrypt.check_password_hash(self.password, value)
def generate_auth_token(self, expiration = 600):
s = Serializer('secret_key', expires_in = expiration)
return s.dumps({ 'id': self.id })
@staticmethod
def verify_auth_token(token):
s = Serializer('secret_key')
try:
data = s.loads(token)
except SignatureExpired:
return None # valid token, but expired
except BadSignature:
return None # invalid token
user = User.query.get(data['id'])
return user
@property
def full_name(self ):
return "{0} {1}".format(self.first_name, self.last_name)
def __repr__(self):
return '<User({username!r})>'.format(username=self.username)
|
{
"content_hash": "7e494703de449840d27417180db5ec4e",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 80,
"avg_line_length": 31.164556962025316,
"alnum_prop": 0.6344435418359058,
"repo_name": "outpark/skeleton",
"id": "e945872d32351ec5d7044a8bf60937c0cb1b68a1",
"size": "2486",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skeleton/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "581"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "27331"
}
],
"symlink_target": ""
}
|
import sys
import os
import time
launcher_path = os.path.realpath(sys.argv[0])
argv0 = sys.argv[0]
def executable_in_path(program):
def is_executable(filepath):
return os.path.exists(filepath) and os.access(filepath, os.X_OK)
for path in os.environ["PATH"].split(os.pathsep):
executable_file = os.path.join(path, program)
if is_executable(executable_file):
return executable_file
return None
def strip_path( full_path, remove_this = "tema" ):
(rest, last ) = os.path.split(full_path)
while last and last != remove_this :
# print rest, last, remove_this
# sys.stdout.flush()
# time.sleep(5)
(rest, last ) = os.path.split(rest)
return rest
tema_path = strip_path(launcher_path)
modelutils_path = os.path.join(strip_path(tema_path, "TemaLib"), "ModelUtils")
validation_path = os.path.join(strip_path(tema_path, "TemaLib"), "Validation")
mocksut_path = os.path.join(tema_path,"MockSUT")
man_path = os.path.join(os.path.join(strip_path(tema_path, "TemaLib"), "Docs"),"man")
if tema_path not in sys.path:
sys.path.reverse()
sys.path.append(tema_path)
sys.path.reverse()
command_set = dict()
command_set['testengine'] = "testengine.testengine"
command_set["mdm2svg"] = "eini.mdm2svg"
logtools = ["plotter","logreader","log2srt","sequencer"]
exec_commands = set([ "xsimulate", "simulate", "validate","analysator","runmodelpackage","help", "mocksut", "model2dot", "actionlist" ])
exec_commands.update(logtools)
modelutils_commands = set(["generatetaskswitcher","gt","rextendedrules","renamerules","composemodel","specialiser","generatetestconf"])
other_commands = set(["modelutils","engine_home","packagereader","ats4appmodel2lsts","variablemodelcreator","filterexpand","model2lsts","do_python","do_make"])
other_commands.update(modelutils_commands)
help_commands_exceptions = dict()
def print_usage(path,exec_commands,other_commands,command_set):
print >> sys.stdout, "Usage:", os.path.basename(path), "<command>"
print >> sys.stdout, ""
print >> sys.stdout, "Available commands:"
# Sort all commands and print them
commands = []
commands.extend(exec_commands)
commands.extend(other_commands)
commands.extend(command_set.keys())
commands.sort()
[ sys.stdout.write(" %s\n" % command ) for command in commands]
print >> sys.stdout, ""
print >> sys.stdout, "See 'tema help COMMAND' for more information on a specific command."
print >> sys.stdout, "Note that all commands don't have help pages."
if len(sys.argv) < 2 :
print_usage(argv0,exec_commands,other_commands,command_set)
raise SystemExit(1)
sys.argv[0:1]=[]
## print >> sys.stderr, "__".join(sys.argv)
try:
module= command_set[sys.argv[0]]
if sys.argv[0] == "model2lsts":
sys.argv[0] = "2lsts"
except KeyError:
if sys.argv[0] in exec_commands :
environment = os.environ
base_command = sys.argv[0]
exec_path = modelutils_path
if sys.argv[0] == "simulate" or sys.argv[0] == "xsimulate":
exec_path = os.path.join(validation_path, "simulation")
base_command = sys.argv[0] + ".py"
if sys.argv[0] == "model2dot":
exec_path = os.path.join(validation_path, "viewer")
base_command = sys.argv[0] + ".py"
if sys.argv[0] in ["runmodelpackage","actionlist"]:
exec_path = modelutils_path
base_command = sys.argv[0] + ".py"
if sys.argv[0] == "mocksut":
exec_path = mocksut_path
base_command = sys.argv[0] + ".py"
if sys.argv[0] == "help" :
environment['MANPATH'] = man_path
if len(sys.argv) == 1:
print_usage(argv0,exec_commands,other_commands,command_set)
raise SystemExit(1)
if not sys.argv[1].startswith("tema."):
command = "tema.%s" % (sys.argv[1])
else:
command = sys.argv[1]
if command in help_commands_exceptions:
sys.argv[1] = help_commands_exceptions[command]
else:
sys.argv[1] = command
base_command = "man"
for dir in os.environ.get('PATH', '').split(os.pathsep):
candidate = os.path.join(dir,base_command)
if os.path.isfile(candidate) and not os.path.isdir(candidate):
exec_path=dir
break
if sys.argv[0] in logtools:
exec_path = os.path.join(validation_path, "loghandling")
base_command = sys.argv[0] + ".py"
if sys.argv[0] == "validate" or sys.argv[0] == "analysator" :
exec_path = os.path.join(validation_path, "analysis")
base_command = sys.argv[0] + ".py"
exec_path = os.path.join(exec_path, base_command)
environment['PYTHONPATH'] = ":".join(sys.path)
# print >> sys.stderr, exec_path
try:
os.execve( exec_path, sys.argv, environment )
except Exception, e:
print e
print >> sys.stderr, exec_path
raise SystemExit(1)
elif sys.argv[0] in other_commands :
if sys.argv[0] == "modelutils" :
print modelutils_path
elif sys.argv[0] == "engine_home" :
print tema_path
elif sys.argv[0] == "filterexpand" :
environment = os.environ
environment['PYTHONPATH'] = ":".join(sys.path)
path = tema_path + "/tema/filter/filterexpand.py"
args = sys.argv
args[0] = path
os.execve( path, args, environment )
elif sys.argv[0] == "model2lsts" :
environment = os.environ
environment['PYTHONPATH'] = ":".join(sys.path)
path = tema_path + "/tema/model/model2lsts.py"
args = sys.argv
args[0] = path
os.execve( path, args, environment )
elif sys.argv[0] in ["do_python"]:
environment = os.environ
environment['PYTHONPATH'] = ":".join(sys.path)
path = sys.executable
args = [path]
os.execve(path,args,environment)
elif sys.argv[0] in ["do_make"]:
path = executable_in_path("gmake")
if not path:
path = executable_in_path("make")
environment = os.environ
environment['PYTHONPATH'] = ":".join(sys.path)
environment['TEMA_MODEL_TOOLS'] = modelutils_path
args = sys.argv
args[0] = path
os.execve(path,args,environment)
elif sys.argv[0] in modelutils_commands:
environment = os.environ
environment['PYTHONPATH'] = ":".join(sys.path)
path = tema_path + "/tema/modelutils/%s.py" % sys.argv[0]
args = sys.argv
args[0] = path
os.execve( path, args, environment )
elif sys.argv[0] == "packagereader" :
environment = os.environ
environment['PYTHONPATH'] = ":".join(sys.path)
path = tema_path + "/tema/packagereader/packagereader.py"
args = sys.argv
args[0] = path
os.execve( path, args, environment )
elif sys.argv[0] == "variablemodelcreator" :
environment = os.environ
environment['PYTHONPATH'] = ":".join(sys.path)
path = tema_path + "/tema/variablemodels/VariableModelCreator.py"
args = sys.argv
args[0] = path
os.execve( path, args, environment )
elif sys.argv[0] == "ats4appmodel2lsts" :
environment = os.environ
environment['PYTHONPATH'] = ":".join(sys.path)
path = tema_path + "/tema/ats4appmodel/ats4appmodel2lsts.py"
args = sys.argv
args[0] = path
os.execve( path, args, environment )
else:
print >> sys.stderr, "Command", sys.argv[0], "not found"
raise SystemExit(1)
raise SystemExit(0)
__import__( module, globals(), locals(), [''])
|
{
"content_hash": "dabf52354762375a30fd411479e76600",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 159,
"avg_line_length": 38.22641509433962,
"alnum_prop": 0.5783563672260612,
"repo_name": "tema-mbt/tema-tg",
"id": "e49af044041c3888f1fefea0ad2714a3c472aba7",
"size": "9271",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "TemaLib/tema/tematool.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "924079"
},
{
"name": "Shell",
"bytes": "2195"
},
{
"name": "TeX",
"bytes": "20563"
}
],
"symlink_target": ""
}
|
"""Printing subsystem"""
from pretty import *
from latex import latex, print_latex
from mathml import mathml, print_mathml
from python import python, print_python
from ccode import ccode, print_ccode
from gtk import *
from preview import preview
from str import StrPrinter, sstr, sstrrepr
_StrPrinter = StrPrinter()
from repr import srepr
# /cyclic/
from sympy.core import basic
from sympy.matrices import matrices
basic.StrPrinter = _StrPrinter
matrices.StrPrinter = _StrPrinter
del basic, matrices
from tree import print_tree
|
{
"content_hash": "116390467af5c8e6b9422abb8681ed2a",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 42,
"avg_line_length": 21.4,
"alnum_prop": 0.7906542056074767,
"repo_name": "fperez/sympy",
"id": "1ade76dfa1a26ecdce212eb47460fe75ae7a5803",
"size": "535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sympy/printing/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7547665"
},
{
"name": "Scheme",
"bytes": "125"
}
],
"symlink_target": ""
}
|
"""
Signals relating to questions and answers.
"""
from django.dispatch import Signal
# Sent just before a answer will be posted (after it's been approved and
# moderated; this can be used to modify the answer (in place) with posting
# details or other such actions. If any receiver returns False the answer will be
# discarded and a 403 (not allowed) response. This signal is sent at more or less
# the same time (just before, actually) as the Answer object's pre-save signal,
# except that the HTTP request is sent along with this signal.
answer_will_be_posted = Signal(providing_args=["answer", "request"])
# Sent just after a answer was posted. See above for how this differs
# from the Answer object's post-save signal.
answer_was_posted = Signal(providing_args=["answer", "request"])
question_will_be_posted = Signal(providing_args=["question", "request"])
# TODO, see django.contrib.comments
# Sent after a comment was "flagged" in some way. Check the flag to see if this
# was a user requesting removal of a comment, a moderator approving/removing a
# comment, or some other custom user flag.
#comment_was_flagged = Signal(providing_args=["comment", "flag", "created", "request"])
|
{
"content_hash": "d049c6ea4a7d525dd118b4a8892b167a",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 87,
"avg_line_length": 49.708333333333336,
"alnum_prop": 0.7518860016764459,
"repo_name": "alexisbellido/django-vodkamartini-qa",
"id": "cde029ca79a0787a9f76703040c7b568f250e898",
"size": "1193",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vodkamartiniqa/signals.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "4320"
},
{
"name": "Python",
"bytes": "146082"
}
],
"symlink_target": ""
}
|
import unittest
import os
from runtime.python.parser import Parser, TreeCatg
from runtime.python.position import Position
from runtime.python.token import Keyword
from grammar import TestGrammar
class ParserTest(unittest.TestCase):
def setUp(self):
self._parser = Parser(TestGrammar())
self._parser.enableBlockComments()
self._parser.enableFullBacktracking()
Position.setTabSize(4)
def tearDown(self):
self._parser = None
def testPositionInfo(self):
filePath = os.path.abspath(os.path.dirname(__file__)) + os.sep + "testcode"
root = self._parser.parseFile(filePath, TreeCatg.PARSE_TREE)
print(root.toXml())
children = root.getChildren()
for1 = children[0]
for2 = children[1]
self._checkNode(for1.getChildren()[0], 1, 1, 1, 8)
self._checkNode(for1.getChildren()[1], 1, 9, 1, 14)
self._checkNode(for2.getChildren()[1], 6, 13, 6, 18)
def testTokenInfo(self):
filePath = os.path.abspath(os.path.dirname(__file__)) + os.sep + "testcode"
tokenInfo = self._parser.getTokenInfoFromFile(filePath)
for tokenType, token in tokenInfo:
startLine, startCol = token.getStartPosition()
endLine, endCol = token.getEndPosition()
text = token.getText()
if not isinstance(tokenType, Keyword):
print("(%d,%d) - (%d,%d): '%s'" % (startLine, startCol, endLine, endCol, text))
else:
print("(%d,%d) - (%d,%d): '%s' (keyword)" % (startLine, startCol, endLine, endCol, text))
def _checkNode(self, node, expStartLine, expStartCol, expEndLine, expEndCol):
line, col = node.getToken().getStartPosition()
self.assertEqual(line, expStartLine)
self.assertEqual(col, expStartCol)
line, col = node.getToken().getEndPosition()
self.assertEqual(line, expEndLine)
self.assertEqual(col, expEndCol)
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "c3a0e7191b0798b688c966b6b38ad473",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 105,
"avg_line_length": 33.15384615384615,
"alnum_prop": 0.5860788863109049,
"repo_name": "ThomasBollmeier/bovinus",
"id": "c8f3b77de240ed69792adc26c6aea04c37b1eb44",
"size": "2768",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unittests/python/parser_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "75463"
},
{
"name": "M4",
"bytes": "1643"
},
{
"name": "Makefile",
"bytes": "2791"
},
{
"name": "PHP",
"bytes": "83682"
},
{
"name": "Python",
"bytes": "197313"
},
{
"name": "Shell",
"bytes": "1441"
},
{
"name": "Vim script",
"bytes": "1125"
}
],
"symlink_target": ""
}
|
import os
import sys
import unittest
from unittest.mock import patch
import ray
from ray.rllib import _register_all
from ray.tune import TuneError
from ray.tune.schedulers import FIFOScheduler
from ray.tune.result import DONE
from ray.tune.registry import _global_registry, TRAINABLE_CLASS
from ray.tune.trial import Trial
from ray.tune.trial_runner import TrialRunner
from ray.tune.resources import Resources
from ray.tune.suggest import BasicVariantGenerator
def create_mock_components():
class _MockScheduler(FIFOScheduler):
errored_trials = []
def on_trial_error(self, trial_runner, trial):
self.errored_trials += [trial]
class _MockSearchAlg(BasicVariantGenerator):
errored_trials = []
def on_trial_complete(self, trial_id, error=False, **kwargs):
if error:
self.errored_trials += [trial_id]
searchalg = _MockSearchAlg()
scheduler = _MockScheduler()
return searchalg, scheduler
class TrialRunnerTest2(unittest.TestCase):
def setUp(self):
os.environ["TUNE_STATE_REFRESH_PERIOD"] = "0.1"
# Wait up to five seconds for placement groups when starting a trial
os.environ["TUNE_PLACEMENT_GROUP_WAIT_S"] = "5"
# Block for results even when placement groups are pending
os.environ["TUNE_TRIAL_STARTUP_GRACE_PERIOD"] = "0"
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
def testErrorHandling(self):
ray.init(num_cpus=4, num_gpus=2)
runner = TrialRunner()
kwargs = {
"stopping_criterion": {
"training_iteration": 1
},
"resources": Resources(cpu=1, gpu=1),
}
_global_registry.register(TRAINABLE_CLASS, "asdf", None)
trials = [Trial("asdf", **kwargs), Trial("__fake", **kwargs)]
for t in trials:
runner.add_trial(t)
runner.step()
self.assertEqual(trials[0].status, Trial.ERROR)
self.assertEqual(trials[1].status, Trial.PENDING)
runner.step()
self.assertEqual(trials[0].status, Trial.ERROR)
self.assertEqual(trials[1].status, Trial.RUNNING)
def testThrowOnOverstep(self):
ray.init(num_cpus=1, num_gpus=1)
runner = TrialRunner()
runner.step()
self.assertRaises(TuneError, runner.step)
def testFailureRecoveryDisabled(self):
ray.init(num_cpus=1, num_gpus=1)
searchalg, scheduler = create_mock_components()
runner = TrialRunner(searchalg, scheduler=scheduler)
kwargs = {
"resources": Resources(cpu=1, gpu=1),
"checkpoint_freq": 1,
"max_failures": 0,
"config": {
"mock_error": True,
},
}
runner.add_trial(Trial("__fake", **kwargs))
trials = runner.get_trials()
runner.step() # Start trial
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step() # Process result, dispatch save
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step() # Process save
runner.step() # Error
self.assertEqual(trials[0].status, Trial.ERROR)
self.assertEqual(trials[0].num_failures, 1)
self.assertEqual(len(searchalg.errored_trials), 1)
self.assertEqual(len(scheduler.errored_trials), 1)
def testFailureRecoveryEnabled(self):
ray.init(num_cpus=1, num_gpus=1)
searchalg, scheduler = create_mock_components()
runner = TrialRunner(searchalg, scheduler=scheduler)
kwargs = {
"resources": Resources(cpu=1, gpu=1),
"checkpoint_freq": 1,
"max_failures": 1,
"config": {
"mock_error": True,
},
}
runner.add_trial(Trial("__fake", **kwargs))
trials = runner.get_trials()
runner.step() # Start trial
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step() # Process result, dispatch save
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step() # Process save
runner.step() # Error (transient), dispatch restore
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[0].num_failures, 1)
runner.step() # Process restore
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(len(searchalg.errored_trials), 0)
self.assertEqual(len(scheduler.errored_trials), 0)
def testFailureRecoveryNodeRemoval(self):
# Node removal simulation only works with resource requests
os.environ["TUNE_PLACEMENT_GROUP_AUTO_DISABLED"] = "1"
ray.init(num_cpus=1, num_gpus=1)
searchalg, scheduler = create_mock_components()
runner = TrialRunner(searchalg, scheduler=scheduler)
kwargs = {
"resources": Resources(cpu=1, gpu=1),
"checkpoint_freq": 1,
"max_failures": 1,
"config": {
"mock_error": True,
},
}
runner.add_trial(Trial("__fake", **kwargs))
trials = runner.get_trials()
with patch("ray.cluster_resources") as resource_mock:
resource_mock.return_value = {"CPU": 1, "GPU": 1}
runner.step() # Start trial
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step() # Process result, dispatch save
runner.step() # Process save
self.assertEqual(trials[0].status, Trial.RUNNING)
# Mimic a node failure
resource_mock.return_value = {"CPU": 0, "GPU": 0}
runner.step() # Detect node failure
self.assertEqual(trials[0].status, Trial.PENDING)
self.assertEqual(trials[0].num_failures, 1)
self.assertEqual(len(searchalg.errored_trials), 0)
self.assertEqual(len(scheduler.errored_trials), 1)
def testFailureRecoveryMaxFailures(self):
ray.init(num_cpus=1, num_gpus=1)
runner = TrialRunner()
kwargs = {
"resources": Resources(cpu=1, gpu=1),
"checkpoint_freq": 1,
"max_failures": 2,
"config": {
"mock_error": True,
"persistent_error": True,
},
}
runner.add_trial(Trial("__fake", **kwargs))
trials = runner.get_trials()
runner.step() # Start trial
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step() # Process result, dispatch save
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step() # Process save
runner.step() # Error (transient), dispatch restore
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[0].num_failures, 1)
runner.step() # Process restore
runner.step() # Error (transient), dispatch restore
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[0].num_failures, 2)
runner.step() # Process restore
runner.step() # Error (terminal)
self.assertEqual(trials[0].status, Trial.ERROR)
self.assertEqual(trials[0].num_failures, 3)
def testFailFast(self):
ray.init(num_cpus=1, num_gpus=1)
runner = TrialRunner(fail_fast=True)
kwargs = {
"resources": Resources(cpu=1, gpu=1),
"checkpoint_freq": 1,
"max_failures": 0,
"config": {
"mock_error": True,
"persistent_error": True,
},
}
runner.add_trial(Trial("__fake", **kwargs))
runner.add_trial(Trial("__fake", **kwargs))
trials = runner.get_trials()
runner.step() # Start trial
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step() # Process result, dispatch save
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step() # Process save
runner.step() # Error
self.assertEqual(trials[0].status, Trial.ERROR)
self.assertRaises(TuneError, lambda: runner.step())
def testFailFastRaise(self):
ray.init(num_cpus=1, num_gpus=1)
runner = TrialRunner(fail_fast=TrialRunner.RAISE)
kwargs = {
"resources": Resources(cpu=1, gpu=1),
"checkpoint_freq": 1,
"max_failures": 0,
"config": {
"mock_error": True,
"persistent_error": True,
},
}
runner.add_trial(Trial("__fake", **kwargs))
runner.add_trial(Trial("__fake", **kwargs))
trials = runner.get_trials()
runner.step() # Start trial
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step() # Process result, dispatch save
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step() # Process save
with self.assertRaises(Exception):
runner.step() # Error
def testCheckpointing(self):
ray.init(num_cpus=1, num_gpus=1)
runner = TrialRunner()
kwargs = {
"stopping_criterion": {
"training_iteration": 1
},
"resources": Resources(cpu=1, gpu=1),
"checkpoint_freq": 1,
}
runner.add_trial(Trial("__fake", **kwargs))
trials = runner.get_trials()
runner.step() # Start trial
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(ray.get(trials[0].runner.set_info.remote(1)), 1)
runner.step() # Process result, dispatch save
runner.step() # Process save, stop trial
kwargs["restore_path"] = trials[0].checkpoint.value
self.assertEqual(trials[0].status, Trial.TERMINATED)
runner.add_trial(Trial("__fake", **kwargs))
trials = runner.get_trials()
self.assertEqual(trials[1].status, Trial.PENDING)
runner.step() # Start trial, dispatch restore
self.assertEqual(trials[1].status, Trial.RUNNING)
runner.step() # Process restore
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.RUNNING)
self.assertEqual(ray.get(trials[1].runner.get_info.remote()), 1)
self.addCleanup(os.remove, trials[0].checkpoint.value)
def testRestoreMetricsAfterCheckpointing(self):
ray.init(num_cpus=1, num_gpus=1)
runner = TrialRunner()
kwargs = {
"resources": Resources(cpu=1, gpu=1),
"checkpoint_freq": 1,
}
runner.add_trial(Trial("__fake", **kwargs))
trials = runner.get_trials()
runner.step() # Start trial
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(ray.get(trials[0].runner.set_info.remote(1)), 1)
runner.step() # Process result, dispatch save
runner.step() # Process save
runner.trial_executor.stop_trial(trials[0])
kwargs["restore_path"] = trials[0].checkpoint.value
kwargs.pop("checkpoint_freq") # No checkpointing for next trial
runner.add_trial(Trial("__fake", **kwargs))
trials = runner.get_trials()
runner.step() # Start trial, dispatch restore
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.RUNNING)
runner.step() # Process restore
runner.step() # Process result
self.assertEqual(trials[1].last_result["timesteps_since_restore"], 10)
self.assertEqual(trials[1].last_result["iterations_since_restore"], 1)
self.assertGreater(trials[1].last_result["time_since_restore"], 0)
runner.step() # Process restore
self.assertEqual(trials[1].last_result["timesteps_since_restore"], 20)
self.assertEqual(trials[1].last_result["iterations_since_restore"], 2)
self.assertGreater(trials[1].last_result["time_since_restore"], 0)
self.addCleanup(os.remove, trials[0].checkpoint.value)
def testCheckpointingAtEnd(self):
ray.init(num_cpus=1, num_gpus=1)
runner = TrialRunner()
kwargs = {
"stopping_criterion": {
"training_iteration": 2
},
"checkpoint_at_end": True,
"resources": Resources(cpu=1, gpu=1),
}
runner.add_trial(Trial("__fake", **kwargs))
trials = runner.get_trials()
runner.step() # Start trial
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step() # Process result
runner.step() # Process result, dispatch save
self.assertEqual(trials[0].last_result[DONE], True)
runner.step() # Process save
self.assertEqual(trials[0].has_checkpoint(), True)
def testResultDone(self):
"""Tests that last_result is marked `done` after trial is complete."""
ray.init(num_cpus=1, num_gpus=1)
runner = TrialRunner()
kwargs = {
"stopping_criterion": {
"training_iteration": 2
},
"resources": Resources(cpu=1, gpu=1),
}
runner.add_trial(Trial("__fake", **kwargs))
trials = runner.get_trials()
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step()
self.assertNotEqual(trials[0].last_result[DONE], True)
runner.step()
self.assertEqual(trials[0].last_result[DONE], True)
def testPauseThenResume(self):
ray.init(num_cpus=1, num_gpus=1)
runner = TrialRunner()
kwargs = {
"stopping_criterion": {
"training_iteration": 2
},
"resources": Resources(cpu=1, gpu=1),
}
runner.add_trial(Trial("__fake", **kwargs))
trials = runner.get_trials()
runner.step() # Start trial
runner.step() # Process result
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(ray.get(trials[0].runner.get_info.remote()), None)
self.assertEqual(ray.get(trials[0].runner.set_info.remote(1)), 1)
runner.trial_executor.pause_trial(trials[0])
self.assertEqual(trials[0].status, Trial.PAUSED)
runner.trial_executor.resume_trial(trials[0])
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(ray.get(trials[0].runner.get_info.remote()), 1)
runner.step() # Process result
self.assertEqual(trials[0].status, Trial.TERMINATED)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
{
"content_hash": "9fd0d5db6c3b5d0175af6f4607b38510",
"timestamp": "",
"source": "github",
"line_count": 395,
"max_line_length": 78,
"avg_line_length": 37.164556962025316,
"alnum_prop": 0.5944141689373297,
"repo_name": "pcmoritz/ray-1",
"id": "81beeb0a92fedb8a4466e670402a0849f4c2801d",
"size": "14680",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/tune/tests/test_trial_runner_2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "70670"
},
{
"name": "C++",
"bytes": "4670851"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Dockerfile",
"bytes": "14159"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1338604"
},
{
"name": "JavaScript",
"bytes": "914"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "Python",
"bytes": "10523389"
},
{
"name": "Shell",
"bytes": "117557"
},
{
"name": "Smarty",
"bytes": "239"
},
{
"name": "Starlark",
"bytes": "238506"
},
{
"name": "TypeScript",
"bytes": "259269"
}
],
"symlink_target": ""
}
|
from setuptools import setup
import os, shutil
version = '0.1'
setup(version=version,
name='kestrel',
description = "kestrel",
scripts = [
"bin/kestrel_import_mailapp.py",
"bin/kestrel_init.py",
"bin/kestrel_newconf.py",
"bin/kestrel_runcron.py",
"bin/kestrel_update.py",
],
long_description="""kestrel""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
data_files=[
('etc', ['etc/getmail_rc.jinja2']),
],
include_package_data = True,
keywords='',
author='Ian Dennis Miller',
author_email='ian@iandennismiller.com',
url='http://www.iandennismiller.com',
install_requires = [
"sievelib==0.8",
"Jinja2==2.7.1",
],
license='MIT',
zip_safe=False,
)
|
{
"content_hash": "92ade8cec80f05f2465a4aa9e2e8b034",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 95,
"avg_line_length": 27.71875,
"alnum_prop": 0.5501691093573844,
"repo_name": "iandennismiller/kestrel",
"id": "4ea7132f306e9158bc37c3c95b9a83cc9572b511",
"size": "887",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4362"
}
],
"symlink_target": ""
}
|
import importlib
import traceback
from utils.log import logger
from Kunlun_M.const import VENDOR_ECOSYSTEM
from Kunlun_M.settings import ACTIVE_SCA_SYSTEM
def get_vulns_from_source(language, vendor_name, vendor_version):
result = []
sources = VENDOR_ECOSYSTEM.get(language, {})
for source in sources.keys():
ecosystem = sources[source]
if source not in ACTIVE_SCA_SYSTEM:
continue
module = importlib.import_module(__name__ + "." + source)
func = getattr(module, "get_vulns_from_" + source, None)
try:
if func:
vulns = func(ecosystem, vendor_name, vendor_version)
result.extend(vulns)
except KeyboardInterrupt:
raise
except:
logger.error("[Vendor Scan] EcoSystem {} get error.\n{}".format(source, traceback.format_exc()))
return result
|
{
"content_hash": "0623c75fa6f1f081fd085d51011e2787",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 108,
"avg_line_length": 25.685714285714287,
"alnum_prop": 0.6229143492769744,
"repo_name": "LoRexxar/Cobra-W",
"id": "ff393910170e50654c49ab53cd70c5c1b6c4e892",
"size": "899",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/vuln_apis/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "204"
},
{
"name": "Hack",
"bytes": "82"
},
{
"name": "Java",
"bytes": "45"
},
{
"name": "PHP",
"bytes": "6172"
},
{
"name": "Python",
"bytes": "441482"
}
],
"symlink_target": ""
}
|
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.views import generic
from django_filters.views import FilterView
from providers.filtersets import ProviderServiceFilterSet
from providers.forms import ProviderForm, ProviderServiceForm
from providers.models import Provider, ProviderService
### Provider Service View ###
class ProviderServiceFilterView(FilterView):
model = ProviderService
filterset_class = ProviderServiceFilterSet
def get_queryset(self):
return ProviderService.objects.all().select_related('provider')
class ProviderServiceListView(generic.ListView):
model = ProviderService
filterset_class = ProviderServiceFilterSet
def get_queryset(self):
return ProviderService.objects.filter(provider=Provider.objects.get(pk=self.kwargs.get('pk'))).order_by('id')
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['pk'] = self.kwargs['pk']
return ctx
class ProviderServiceCreateView(LoginRequiredMixin, generic.CreateView):
model = ProviderService
form_class = ProviderServiceForm
def get_success_url(self):
return reverse('providerservice-list', kwargs={'pk': self.kwargs['pk']})
def post(self, request, *args, **kwargs):
retval = super().post(request, *args, **kwargs)
messages.add_message(request, messages.SUCCESS, "The service has been added successfully",
'alert alert-success')
return retval
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['provider'] = get_object_or_404(Provider, pk=self.kwargs['pk'])
return kwargs
class ProviderServiceUpdateView(LoginRequiredMixin, UserPassesTestMixin, generic.UpdateView):
model = ProviderService
form_class = ProviderServiceForm
def test_func(self):
return self.request.user == self.get_object().provider.owner
def get_success_url(self):
return reverse('providerservice-list', kwargs={'pk': self.kwargs['provider_pk']})
def post(self, request, *args, **kwargs):
retval = super().post(request, *args, **kwargs)
messages.add_message(request, messages.SUCCESS, "The service has been updated successfully",
'alert alert-success')
return retval
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['provider'] = get_object_or_404(Provider, pk=self.kwargs['provider_pk'])
return kwargs
class ProviderServiceDeleteView(LoginRequiredMixin, UserPassesTestMixin, generic.DeleteView):
model = ProviderService
def test_func(self):
return self.request.user == self.get_object().provider.owner
def get_success_url(self):
return reverse('providerservice-list', kwargs={'pk': self.kwargs['provider_pk']})
def post(self, request, *args, **kwargs):
retval = super().post(request, *args, **kwargs)
messages.add_message(request, messages.SUCCESS, "The service has been deleted successfully",
'alert alert-success')
return retval
### Provider Views ###
class ProviderListView(LoginRequiredMixin, generic.ListView):
model = Provider
def get_queryset(self):
return Provider.objects.filter(owner=self.request.user)
class ProviderCreateView(LoginRequiredMixin, generic.CreateView):
model = Provider
form_class = ProviderForm
def get_success_url(self):
return reverse('provider-list-user')
def post(self, request, *args, **kwargs):
retval = super().post(request, *args, **kwargs)
messages.add_message(request, messages.SUCCESS, "The company has been added successfully",
'alert alert-success')
return retval
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['owner'] = self.request.user
return kwargs
class ProviderUpdateView(LoginRequiredMixin, UserPassesTestMixin, generic.UpdateView):
model = Provider
form_class = ProviderForm
def get_success_url(self):
return reverse('provider-list-user')
def test_func(self):
return self.request.user == self.get_object().owner
def post(self, request, *args, **kwargs):
retval = super().post(request, *args, **kwargs)
messages.add_message(request, messages.SUCCESS, "The company has been updated successfully",
'alert alert-success')
return retval
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['owner'] = self.request.user
return kwargs
|
{
"content_hash": "b852247e34dd82293f7d439ac1b1c9bf",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 117,
"avg_line_length": 33.76223776223776,
"alnum_prop": 0.6777133388566694,
"repo_name": "aniruddha-adhikary/bookit",
"id": "d090bbc2928b64ff77385f1bc96356099ca07153",
"size": "4828",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bookit/providers/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2145"
},
{
"name": "HTML",
"bytes": "43525"
},
{
"name": "JavaScript",
"bytes": "3902"
},
{
"name": "Python",
"bytes": "73443"
},
{
"name": "Shell",
"bytes": "4196"
}
],
"symlink_target": ""
}
|
import requests
import json
import sys
from getpass import getpass
from urllib.parse import urljoin
API_BASE = "https://api.waas.barracudanetworks.com/v4/waasapi/"
def waas_api_login(email, password):
res = requests.post(urljoin(API_BASE, 'api_login'), data=dict(email=email, password=password))
res.raise_for_status()
response_json = res.json()
return response_json['key']
def waas_api_get(token, path):
res = requests.get(urljoin(API_BASE, path), headers={"Content-Type": "application/json", 'auth-api': token})
res.raise_for_status()
return res.json()
def waas_api_patch(token, path, data):
res = requests.patch(urljoin(API_BASE, path), data, headers={'auth-api': token})
res.raise_for_status()
return res.json()
if __name__ == '__main__':
if len(sys.argv) >= 3:
email = sys.argv[1]
password = sys.argv[2]
else:
email = input("Enter user email:")
password = getpass("Enter user password:")
token = waas_api_login(email, password)
# Show list of applications, and servers for each application
apps = waas_api_get(token, 'applications')
for app in apps:
print("Application Name: {}".format(app['name']))
for server in app['servers']:
print("\tServer: {} ({} {}:{})".format(server['name'], server['protocol'], server['host'], server['port']))
if apps:
app_name = apps[0]['name']
# change request limits value
data = {"max_request_length": 3141}
updated = waas_api_patch(token, urljoin(API_BASE, f'applications/{app_name}/request_limits/'), data)
print('updated request limits:', json.dumps(updated, indent=4))
|
{
"content_hash": "7a847d10a12ac0ef4744850a88ed8507",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 119,
"avg_line_length": 33.13725490196079,
"alnum_prop": 0.6372781065088757,
"repo_name": "barracudanetworks/waf-automation",
"id": "782ff08b36da54fee7da63a845644fe9ae7c3867",
"size": "1691",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "waf-as-a-service-api/waas_rest_api_v4_example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HCL",
"bytes": "7766"
},
{
"name": "HTML",
"bytes": "540"
},
{
"name": "Pascal",
"bytes": "514"
},
{
"name": "Perl",
"bytes": "10764"
},
{
"name": "PowerShell",
"bytes": "116613"
},
{
"name": "Puppet",
"bytes": "23489"
},
{
"name": "Python",
"bytes": "65746"
},
{
"name": "Ruby",
"bytes": "70753"
},
{
"name": "Shell",
"bytes": "6811"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import json
import datetime
import os
import os.path
import sys
import traceback
from distutils import log
from .base import BaseBuildCommand
class BuildAssetsCommand(BaseBuildCommand):
user_options = BaseBuildCommand.user_options + [
('asset-json-path=', None,
'Relative path for JSON manifest. Defaults to {dist_name}/assets.json'),
('inplace', 'i',
"ignore build-lib and put compiled javascript files into the source " +
"directory alongside your pure Python modules"),
('force', 'f',
"Force rebuilding of static content. Defaults to rebuilding on version "
"change detection."),
]
description = 'build static media assets'
def initialize_options(self):
self.asset_json_path = '{}/assets.json'.format(
self.distribution.get_name(),
)
BaseBuildCommand.initialize_options(self)
def get_dist_paths(self):
return [
'src/sentry/static/sentry/dist',
]
def get_manifest_additions(self):
return (
'src/' + self.asset_json_path,
)
def _get_package_version(self):
"""
Attempt to get the most correct current version of Sentry.
"""
pkg_path = os.path.join(self.work_path, 'src')
sys.path.insert(0, pkg_path)
try:
import sentry
except Exception:
version = None
build = None
else:
log.info('pulled version information from \'sentry\' module'.format(
sentry.__file__))
version = self.distribution.get_version()
build = sentry.__build__
finally:
sys.path.pop(0)
if not (version and build):
json_path = self.get_asset_json_path()
try:
with open(json_path) as fp:
data = json.loads(fp.read())
except Exception:
pass
else:
log.info('pulled version information from \'{}\''.format(
json_path,
))
version, build = data['version'], data['build']
return {
'version': version,
'build': build,
}
def _needs_static(self, version_info):
json_path = self.get_asset_json_path()
if not os.path.exists(json_path):
return True
with open(json_path) as fp:
data = json.load(fp)
if data.get('version') != version_info.get('version'):
return True
if data.get('build') != version_info.get('build'):
return True
return False
def _needs_built(self):
if BaseBuildCommand._needs_built(self):
return True
version_info = self._get_package_version()
return self._needs_static(version_info)
def _build(self):
version_info = self._get_package_version()
log.info('building assets for {} v{} (build {})'.format(
self.distribution.get_name(),
version_info['version'] or 'UNKNOWN',
version_info['build'] or 'UNKNOWN',
))
if not version_info['version'] or not version_info['build']:
log.fatal('Could not determine sentry version or build')
sys.exit(1)
try:
self._build_static()
except Exception:
traceback.print_exc()
log.fatal('unable to build Sentry\'s static assets!\n'
'Hint: You might be running an invalid version of NPM.')
sys.exit(1)
log.info('writing version manifest')
manifest = self._write_version_file(version_info)
log.info('recorded manifest\n{}'.format(
json.dumps(manifest, indent=2),
))
def _build_static(self):
# By setting NODE_ENV=production, a few things happen
# * React optimizes out certain code paths
# * Webpack will add version strings to built/referenced assets
env = dict(os.environ)
env['SENTRY_STATIC_DIST_PATH'] = self.sentry_static_dist_path
env['NODE_ENV'] = 'production'
self._run_command(['node_modules/.bin/webpack', '--bail'],
env=env)
def _write_version_file(self, version_info):
manifest = {
'createdAt': datetime.datetime.utcnow().isoformat() + 'Z',
'version': version_info['version'],
'build': version_info['build'],
}
with open(self.get_asset_json_path(), 'w') as fp:
json.dump(manifest, fp)
return manifest
@property
def sentry_static_dist_path(self):
return os.path.abspath(os.path.join(
self.build_lib, 'sentry/static/sentry/dist'))
def get_asset_json_path(self):
return os.path.abspath(os.path.join(
self.build_lib, self.asset_json_path))
|
{
"content_hash": "726e299d97871bfdd18fd99e3e3a1f02",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 81,
"avg_line_length": 32.311688311688314,
"alnum_prop": 0.5582797427652733,
"repo_name": "JackDanger/sentry",
"id": "2c79a530083bb1855307d46ee3af05bc24767218",
"size": "4976",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/sentry/utils/distutils/commands/build_assets.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "583430"
},
{
"name": "HTML",
"bytes": "319622"
},
{
"name": "JavaScript",
"bytes": "624672"
},
{
"name": "Makefile",
"bytes": "2660"
},
{
"name": "Python",
"bytes": "6279717"
}
],
"symlink_target": ""
}
|
from uuid import uuid1
import time
from flask import request
from apps.utils.format.obj_format import json_to_pyseq
class RestSession:
"""
区别与flask-session 针对Rest api请求的Session
"""
def __init__(self):
self.sid = None
def init_app(self, app, **kwargs):
self._get_interface(app)
@app.before_request
def init_current_session_id():
"""
请求前调用此函数设置当前请求的sid
:return:
"""
self.sid = "{}rest-{}".format(
self.config["SESSION_KEY_PREFIX"], str(uuid1()))
header = request.headers.get('OSR-ClientId')
if header:
self.sid = header
@app.teardown_request
def clear_current_session_id(e):
"""
清理当前请求init_current_session中设置的sid
:return:
"""
self.sid = None
def _get_interface(self, app):
self.config = app.config.copy()
self.config.setdefault('SESSION_TYPE', 'mongodb')
self.config.setdefault('SESSION_PERMANENT', True)
self.config.setdefault('SESSION_KEY_PREFIX', 'session:')
self.config.setdefault('SESSION_REDIS', None)
self.config.setdefault('SESSION_MONGODB', None)
self.config.setdefault('SESSION_MONGODB_DB', 'osr_sys')
self.config.setdefault('SESSION_MONGODB_COLLECT', 'osr_session')
self.config.setdefault('SESSION_KEY_PREFIX', 'osr-session:')
self.config.setdefault('PERMANENT_SESSION_LIFETIME', 86400 * 30)
if self.config["SESSION_TYPE"] == "redis":
if self.config["SESSION_REDIS"]:
self.redis = self.config["SESSION_REDIS"]
else:
raise Exception('Missing configuration "SESSION_REDIS"')
elif self.config["SESSION_TYPE"] == "mongodb":
if self.config["SESSION_MONGODB"]:
self.mdb_coll = self.config["SESSION_MONGODB"][self.config["SESSION_MONGODB_DB"]
][self.config["SESSION_MONGODB_COLLECT"]]
else:
raise Exception('Missing configuration "SESSION_MONGODB"')
def get(self, key=None, default=None):
"""
获取一个session
:param key:
:return:default:获取不到时返回
"""
if self.config["SESSION_TYPE"] == "redis":
temp_value = self.redis.get(self.sid)
if temp_value:
temp_value = json_to_pyseq(temp_value.decode())
if key and key in temp_value:
return temp_value[key]
elif key:
return None
else:
return temp_value
else:
query = {"id": self.sid}
if key:
query[key] = {"$exists": True}
value = self.mdb_coll.find_one(query, {key: 1})
if value:
return value[key]
else:
value = self.mdb_coll.find_one(query, {"_id": 0})
if value:
return value
return default
def set(self, key, value):
"""
设置一个sid的数据, 不存在则创建
:param key:
:param value:
:return:
"""
if self.sid:
if self.config["SESSION_TYPE"] == "redis":
temp_value = self.redis.get(self.sid)
if temp_value:
temp_value = json_to_pyseq(temp_value.decode())
temp_value[key] = value
else:
temp_value = {
key: value,
"expiration": time.time() +
self.config["PERMANENT_SESSION_LIFETIME"]}
self.redis.set(self.sid, temp_value)
return self.sid
else:
# mongodb
r = self.mdb_coll.update_one(
{"id": self.sid}, {"$set": {key: value}}, upsert=True)
if r.modified_count:
return self.sid
elif not r.modified_count and not r.matched_count:
self.mdb_coll.update_one({"id": self.sid}, {"$set": {
"expiration": time.time() + self.config["PERMANENT_SESSION_LIFETIME"]}})
return self.sid
else:
return None
return None
def delete(self, key=None):
"""
删除当前的session中的key
:return:
"""
if self.config["SESSION_TYPE"] == "redis":
if key:
temp_value = self.redis.get(self.sid)
if temp_value:
temp_value = json_to_pyseq(temp_value.decode())
if key and key in temp_value:
del temp_value[key]
self.redis.set(self.sid, temp_value)
else:
self.redis.delete(self.sid)
else:
if key:
se = self.mdb_coll.find_one({"id": self.sid})
if se and key in se:
del se[key]
self.mdb_coll.update_one({"_id": se["_id"]}, {"$set": se})
else:
self.mdb_coll.delete_one({"id": self.sid})
|
{
"content_hash": "791656249dcad8fffb6acfc2df3d5e32",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 117,
"avg_line_length": 33.477987421383645,
"alnum_prop": 0.4803682134134886,
"repo_name": "osroom/osroom",
"id": "daabdc22f7d78a1df2d20b18555dc00d940e737b",
"size": "5547",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "apps/core/flask/rest_session.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "5204531"
},
{
"name": "Shell",
"bytes": "18"
}
],
"symlink_target": ""
}
|
from sys import stderr
from tempfile import mkdtemp, mkstemp
from os import close, rmdir
from moi.job import system_call
from qiita_core.qiita_settings import qiita_config
from qiita_ware.wrapper import ParallelWrapper
from qiita_db.logger import LogEntry
from qiita_db.data import RawData
from qiita_db.metadata_template import TARGET_GENE_DATA_TYPES
from qiita_db.reference import Reference
def _get_qiime_minimal_mapping(prep_template, out_dir):
"""Generates a minimal QIIME-compliant mapping file for split libraries
The columns of the generated file are, in order: SampleID, BarcodeSequence,
LinkerPrimerSequence, Description. All values are taken from the prep
template except for Description, which always receive the value "Qiita MMF"
Parameters
----------
prep_template : PrepTemplate
The prep template from which we need to generate the minimal mapping
out_dir : str
Path to the output directory
Returns
-------
list of str
The paths to the qiime minimal mapping files
"""
from functools import partial
from os.path import join
import pandas as pd
# Get the data in a pandas DataFrame, so it is easier to manage
pt = prep_template.to_dataframe()
# We now need to rename some columns to be QIIME compliant.
# Hopefully, this conversion won't be needed if QIIME relaxes its
# constraints
pt.rename(columns={'barcodesequence': 'BarcodeSequence',
'linkerprimersequence': 'LinkerPrimerSequence'},
inplace=True)
pt['Description'] = pd.Series(['Qiita MMF'] * len(pt.index),
index=pt.index)
# We ensure the order of the columns as QIIME is expecting
cols = ['BarcodeSequence', 'LinkerPrimerSequence', 'Description']
# If the study has more than 1 lane, we should generate a qiita MMF for
# each of the lanes. We know how to split the prep template based on
# the run_prefix column
output_fps = []
path_builder = partial(join, out_dir)
for prefix, df in pt.groupby('run_prefix'):
df = df[cols]
out_fp = path_builder("%s_MMF.txt" % prefix)
output_fps.append(out_fp)
df.to_csv(out_fp, index_label="#SampleID", sep='\t')
return output_fps
def _get_preprocess_fastq_cmd(raw_data, prep_template, params):
"""Generates the split_libraries_fastq.py command for the raw-data
Parameters
----------
raw_data : RawData
The raw data object to pre-process
prep_template : PrepTemplate
The prep template to pre-process
params : PreprocessedIlluminaParams
The parameters to use for the preprocessing
Returns
-------
tuple (str, str)
A 2-tuple of strings. The first string is the command to be executed.
The second string is the path to the command's output directory
Raises
------
NotImplementedError
If any of the raw data input filepath type is not supported
ValueError
If the number of raw sequences an raw barcode files are not the same
If the raw data object does not have any sequence file associated
"""
from tempfile import mkdtemp
from qiita_core.qiita_settings import qiita_config
# Get the filepaths from the raw data object
forward_seqs = []
reverse_seqs = []
barcode_fps = []
for fpid, fp, fp_type in raw_data.get_filepaths():
if fp_type == "raw_forward_seqs":
forward_seqs.append(fp)
elif fp_type == "raw_reverse_seqs":
reverse_seqs.append(fp)
elif fp_type == "raw_barcodes":
barcode_fps.append(fp)
else:
raise NotImplementedError("Raw data file type not supported %s"
% fp_type)
if len(forward_seqs) == 0:
raise ValueError("Forward reads file not found on raw data %s"
% raw_data.id)
if len(barcode_fps) != len(forward_seqs):
raise ValueError("The number of barcode files and the number of "
"sequence files should match: %d != %d"
% (len(barcode_fps), len(forward_seqs)))
# The minimal QIIME mapping files should be written to a directory,
# so QIIME can consume them
prep_dir = mkdtemp(dir=qiita_config.working_dir,
prefix='MMF_%s' % prep_template.id)
# Get the Minimal Mapping Files
mapping_fps = _get_qiime_minimal_mapping(prep_template, prep_dir)
# Create a temporary directory to store the split libraries output
output_dir = mkdtemp(dir=qiita_config.working_dir, prefix='slq_out')
# Add any other parameter needed to split libraries fastq
params_str = params.to_str()
# We need to sort the filepaths to make sure that each lane's file is in
# the same order, so they match when passed to split_libraries_fastq.py
# All files should be prefixed with run_prefix, so the ordering is
# ensured to be correct
forward_seqs = sorted(forward_seqs)
reverse_seqs = sorted(reverse_seqs)
barcode_fps = sorted(barcode_fps)
mapping_fps = sorted(mapping_fps)
# Create the split_libraries_fastq.py command
cmd = str("split_libraries_fastq.py --store_demultiplexed_fastq -i %s -b "
"%s -m %s -o %s %s"
% (','.join(forward_seqs), ','.join(barcode_fps),
','.join(mapping_fps), output_dir, params_str))
return (cmd, output_dir)
def _get_preprocess_fasta_cmd(raw_data, prep_template, params):
"""Generates the split_libraries.py command for the raw-data
Parameters
----------
raw_data : RawData
The raw data object to pre-process
prep_template : PrepTemplate
The prep template to pre-process
params : Preprocessed454Params
The parameters to use for the preprocessing
Returns
-------
tuple (str, str)
A 2-tuple of strings. The first string is the command to be executed.
The second string is the path to the command's output directory
Raises
------
NotImplementedError
If any of the raw data input filepath type is not supported
ValueError
If the raw data object does not have any sequence file associated
"""
from tempfile import mkdtemp
from os.path import basename, splitext, join
from qiita_core.qiita_settings import qiita_config
# Get the filepaths from the raw data object
sffs = []
seqs = []
quals = []
for fpid, fp, fp_type in raw_data.get_filepaths():
if fp_type == "raw_sff":
sffs.append(fp)
elif fp_type == "raw_fasta":
seqs.append(fp)
elif fp_type == "raw_qual":
quals.append(fp)
else:
raise NotImplementedError("Raw data file type not supported %s"
% fp_type)
# Create a temporary directory to store the split libraries output
output_dir = mkdtemp(dir=qiita_config.working_dir, prefix='sl_out')
prepreprocess_cmd = ''
if seqs and sffs:
raise ValueError("Cannot have SFF and raw fasta, on %s"
% raw_data.id)
elif quals and not seqs:
raise ValueError("Cannot have just qual, on %s"
% raw_data.id)
elif seqs and not quals:
raise ValueError("It is not currently possible to process "
"fasta file(s) without qual file(s). This will "
"be supported in the future. You can track progress "
"on this by following: "
"https://github.com/biocore/qiita/issues/953")
elif seqs:
seqs = sorted(seqs)
quals = sorted(quals)
else:
prepreprocess_cmds = []
for sff in sffs:
base = splitext(basename(sff))[0]
sff_cmd = "process_sff.py -i %s -o %s" % (sff, output_dir)
prepreprocess_cmds.append(sff_cmd)
seqs.append(join(output_dir, '%s.fna' % base))
quals.append(join(output_dir, '%s.qual' % base))
prepreprocess_cmd = '; '.join(prepreprocess_cmds)
# The minimal QIIME mapping files should be written to a directory,
# so QIIME can consume them
prep_dir = mkdtemp(dir=qiita_config.working_dir,
prefix='MMF_%s' % prep_template.id)
# Get the Minimal Mapping Files
mapping_fps = sorted(_get_qiime_minimal_mapping(prep_template, prep_dir))
# Add any other parameter needed to split libraries
params_str = params.to_str()
# Create the split_libraries_fastq.py command
# len(mapping_fps) will be == 1 when there is no run_prefix or it has
# the same value in all the rows
if len(mapping_fps) == 1:
qual_str = "-q %s -d" % ','.join(quals) if quals else ""
cmd = ' '.join(["split_libraries.py",
"-f %s" % ','.join(seqs),
"-m %s" % mapping_fps[0],
qual_str,
"-o %s" % output_dir,
params_str])
else:
len_seqs = len(seqs)
len_mapping_fps = len(mapping_fps)
if len_mapping_fps > len_seqs:
mapping_fps = [basename(m) for m in mapping_fps]
sffs = [basename(s) for s in sffs]
raise ValueError(
'The prep template defines: "%s" but you only have "%s" as '
'sequence files' % (', '.join(mapping_fps), ', '.join(sffs)))
if len_seqs != len_mapping_fps:
# -8 is to remove the _MMF.txt
prefixes = {m: {'prefix': basename(m)[:-8], 'seqs': [],
'quals': []} for m in mapping_fps}
counter = 0
for p in prefixes.values():
for i, s in enumerate(seqs):
# the files are prefixed with raw_data_id
if basename(s).split('_', 1)[1].startswith(p['prefix']):
p['seqs'].append(s)
if quals:
p['quals'].append(quals[i])
counter = counter + 1
if counter != len_seqs:
raise ValueError(
'The run prefixes in your prep template '
'"%s" do not match your file names "%s"' %
(', '.join(mapping_fps), ', '.join(sffs)))
mapping_fps = prefixes.keys()
seqs = [','.join(p['seqs']) for p in prefixes.values()]
if quals:
quals = [','.join(p['quals']) for p in prefixes.values()]
cmd, output_folders, n = [], [], 1
for i, (seq, mapping) in enumerate(zip(seqs, mapping_fps)):
qual_str = "-q %s -d" % quals[i] if quals else ""
split_dir = join(output_dir, basename(mapping))
output_folders.append(split_dir)
cmd.append(' '.join(["split_libraries.py",
"-f %s" % seq,
"-m %s" % mapping,
qual_str,
"-o %s" % split_dir,
"-n %d" % n,
params_str]))
# Number comes from (100K larger than amplicon):
# http://454.com/products/gs-FLX-system/index.asp
n = (i + 1) * 800000
# files to cat from multiple split libraries
to_cat = ['split_library_log.txt', 'seqs.fna']
if quals:
to_cat.append('seqs_filtered.qual')
for tc in to_cat:
files = [join(x, tc) for x in output_folders]
cmd.append("cat %s > %s" % (' '.join(files),
join(output_dir, tc)))
cmd = '; '.join(cmd)
if quals:
fq_cmd = ' '.join(["convert_fastaqual_fastq.py",
"-f %s/seqs.fna" % output_dir,
"-q %s/seqs_filtered.qual" % output_dir,
"-o %s" % output_dir,
"-F"])
if prepreprocess_cmd:
cmd = '; '.join([prepreprocess_cmd, cmd, fq_cmd])
else:
cmd = '; '.join([cmd, fq_cmd])
return (cmd, output_dir)
def generate_demux_file(sl_out, **kwargs):
"""Creates the HDF5 demultiplexed file
Parameters
----------
sl_out : str
Path to the output directory of split libraries
kwargs: ignored
Necessary to include to support execution via moi.
Raises
------
ValueError
If the split libraries output does not contain the demultiplexed fastq
file
"""
from os.path import join, exists
from h5py import File
from qiita_ware.demux import to_hdf5
fastq_fp = join(sl_out, 'seqs.fastq')
if not exists(fastq_fp):
raise ValueError("The split libraries output directory does not "
"contain the demultiplexed fastq file.")
demux_fp = join(sl_out, 'seqs.demux')
with File(demux_fp, "w") as f:
to_hdf5(fastq_fp, f)
return demux_fp
def _insert_preprocessed_data(study, params, prep_template, slq_out,
**kwargs):
"""Inserts the preprocessed data to the database
Parameters
----------
study : Study
The study to preprocess
params : BaseParameters
The parameters to use for preprocessing
prep_template : PrepTemplate
The prep template to use for the preprocessing
slq_out : str
Path to the split_libraries_fastq.py output directory
kwargs: ignored
Necessary to include to support execution via moi.
Raises
------
ValueError
If the preprocessed output directory does not contain all the expected
files
"""
from os.path import exists, join
from functools import partial
from qiita_db.data import PreprocessedData
# The filepaths that we are interested in are:
# 1) seqs.fna -> demultiplexed fasta file
# 2) seqs.fastq -> demultiplexed fastq file
# 3) seqs.demux -> demultiplexed HDF5 file
path_builder = partial(join, slq_out)
fasta_fp = path_builder('seqs.fna')
fastq_fp = path_builder('seqs.fastq')
demux_fp = path_builder('seqs.demux')
log_fp = path_builder('split_library_log.txt')
# Check that all the files exist
if not (exists(fasta_fp) and exists(demux_fp) and exists(log_fp)):
raise ValueError("The output directory %s does not contain all the "
"expected files." % slq_out)
filepaths = [(fasta_fp, "preprocessed_fasta"),
(demux_fp, "preprocessed_demux"),
(log_fp, "log")]
if exists(fastq_fp):
filepaths.append((fastq_fp, "preprocessed_fastq"))
PreprocessedData.create(study, params._table, params.id, filepaths,
prep_template)
# Change the prep_template status to success
prep_template.preprocessing_status = 'success'
class StudyPreprocessor(ParallelWrapper):
def _construct_job_graph(self, study, prep_template, params):
"""Constructs the workflow graph to preprocess a study
The steps performed to preprocess a study are:
1) Execute split libraries
2) Add the new preprocessed data to the DB
Parameters
----------
study : Study
The study to preprocess
prep_template : PrepTemplate
The prep template to use for the preprocessing
params : BaseParameters
The parameters to use for preprocessing
"""
self.prep_template = prep_template
self._logger = stderr
raw_data = RawData(prep_template.raw_data)
# Change the prep_template preprocessing_status t
self.prep_template.preprocessing_status = 'preprocessing'
# STEP 1: Preprocess the study
preprocess_node = "PREPROCESS"
# Check the raw data filetype to know which command generator we
# should use
filetype = raw_data.filetype
if filetype == "FASTQ":
cmd_generator = _get_preprocess_fastq_cmd
insert_preprocessed_data = _insert_preprocessed_data
elif filetype in ('FASTA', 'SFF'):
cmd_generator = _get_preprocess_fasta_cmd
insert_preprocessed_data = _insert_preprocessed_data
else:
raise NotImplementedError(
"Raw data %s cannot be preprocessed, filetype %s not supported"
% (raw_data.id, filetype))
# Generate the command
cmd, output_dir = cmd_generator(raw_data, self.prep_template, params)
self._job_graph.add_node(preprocess_node, func=system_call,
args=(cmd,),
job_name="Construct preprocess command",
requires_deps=False)
# This step is currently only for data types in which we need to store,
# demultiplexed sequences. Since it is the only supported data type at
# this point, it is ok the leave it here. However, as new data types
# become available, we will need to think a better way of doing this.
demux_node = "GEN_DEMUX_FILE"
self._job_graph.add_node(demux_node,
func=generate_demux_file,
args=(output_dir,),
job_name="Generated demux file",
requires_deps=False)
self._job_graph.add_edge(preprocess_node, demux_node)
# STEP 2: Add preprocessed data to DB
insert_preprocessed_node = "INSERT_PREPROCESSED"
self._job_graph.add_node(insert_preprocessed_node,
func=insert_preprocessed_data,
args=(study, params, self.prep_template,
output_dir),
job_name="Store preprocessed data",
requires_deps=False)
self._job_graph.add_edge(demux_node, insert_preprocessed_node)
self._dirpaths_to_remove.append(output_dir)
def _failure_callback(self, msg=None):
"""Callback to execute in case that any of the job nodes failed
Need to change the prep_template preprocessing status to 'failed'
"""
self.prep_template.preprocessing_status = 'failed:\n %s' % msg
LogEntry.create('Fatal', msg,
info={'prep_template': self.prep_template.id})
# <======== StudyProcessor helper functions ===========>
def _get_process_target_gene_cmd(preprocessed_data, params):
"""Generates the pick_closed_reference_otus.py command
Parameters
----------
preprocessed_data : PreprocessedData
The preprocessed_data to process
params : ProcessedSortmernaParams
The parameters to use for the processing
Returns
-------
tuple (str, str)
A 2-tuple of strings. The first string is the command to be executed.
The second string is the path to the command's output directory
Raises
------
ValueError
If no sequence file is found on the preprocessed data
"""
# Get the filepaths from the preprocessed data object
seqs_fp = None
for fpid, fp, fp_type in preprocessed_data.get_filepaths():
if fp_type == "preprocessed_fasta":
seqs_fp = fp
break
if not seqs_fp:
raise ValueError("No sequence file found on the preprocessed data %s"
% preprocessed_data.id)
# Create a temporary directory to store the pick otus results
output_dir = mkdtemp(dir=qiita_config.working_dir,
prefix='pick_otus_otu_%s_' % preprocessed_data.id)
# mkdtemp creates the directory, so we remove it here so the script
# can safely run
rmdir(output_dir)
# We need to generate a parameters file with the parameters for
# pick_otus.py
fd, param_fp = mkstemp(dir=qiita_config.working_dir,
prefix='params_%s_' % preprocessed_data.id,
suffix='.txt')
close(fd)
with open(param_fp, 'w') as f:
params.to_file(f)
ref = Reference(params.reference)
reference_fp = ref.sequence_fp
taxonomy_fp = ref.taxonomy_fp
if taxonomy_fp:
params_str = "-t %s" % taxonomy_fp
else:
params_str = ""
# Create the split_libraries_fastq.py command
cmd = str("pick_closed_reference_otus.py -i %s -r %s -o %s -p %s %s"
% (seqs_fp, reference_fp, output_dir, param_fp, params_str))
return (cmd, output_dir)
def _insert_processed_data_target_gene(preprocessed_data, params,
pick_otus_out, **kwargs):
"""Inserts the preprocessed data to the database
Parameters
----------
preprocessed_data : PreprocessedData
The preprocessed_data to process
params : ProcessedSortmernaParams
The parameters to use for the processing
pick_otus_out : str
Path to the pick_closed_reference_otus.py output directory
kwargs: ignored
Necessary to include to support execution via moi.
Raises
------
ValueError
If the processed output directory does not contain all the expected
files
"""
from os.path import exists, join, isdir
from glob import glob
from functools import partial
from qiita_db.data import ProcessedData
# The filepaths that we are interested in are:
# 1) otu_table.biom -> the output OTU table
# 2) sortmerna_picked_otus -> intermediate output of pick_otus.py
# 3) log_20141217091339.log -> log file
path_builder = partial(join, pick_otus_out)
biom_fp = path_builder('otu_table.biom')
otus_dp = path_builder('sortmerna_picked_otus')
log_fp = glob(path_builder('log_*.txt'))[0]
# Check that all the files exist
if not (exists(biom_fp) and isdir(otus_dp) and exists(log_fp)):
raise ValueError("The output directory %s does not contain all the "
"expected files." % pick_otus_out)
filepaths = [(biom_fp, "biom"),
(otus_dp, "directory"),
(log_fp, "log")]
ProcessedData.create(params._table, params.id, filepaths,
preprocessed_data=preprocessed_data)
# Change the preprocessed_data status to processed
preprocessed_data.processing_status = 'processed'
class StudyProcessor(ParallelWrapper):
def _construct_job_graph(self, preprocessed_data, params):
"""Constructs the workflow graph to process a study
The steps performed to process a study are:
1) Execute pick_closed_reference_otus.py
2) Add the new processed data to the DB
Parameters
----------
preprocessed_data : PreprocessedData
The preprocessed data to process
params : BaseParameters
The parameters to use for processing
"""
self._logger = stderr
self.preprocessed_data = preprocessed_data
self.preprocessed_data.processing_status = "processing"
if preprocessed_data.data_type() in TARGET_GENE_DATA_TYPES:
cmd_generator = _get_process_target_gene_cmd
insert_processed_data = _insert_processed_data_target_gene
else:
raise NotImplementedError(
"Preprocessed data %s cannot be processed, data type %s "
"not supported"
% (preprocessed_data.id, preprocessed_data.data_type()))
# Step 1: Process the study
process_node = "PROCESS"
cmd, output_dir = cmd_generator(preprocessed_data, params)
self._job_graph.add_node(process_node,
func=system_call,
args=(cmd,),
job_name="Process command",
requires_deps=False)
# Step 2: Add processed data to DB
insert_processed_node = "INSERT_PROCESSED"
self._job_graph.add_node(insert_processed_node,
func=insert_processed_data,
args=(self.preprocessed_data, params,
output_dir),
job_name="Store processed data",
requires_deps=False)
self._job_graph.add_edge(process_node, insert_processed_node)
self._dirpaths_to_remove.append(output_dir)
def _failure_callback(self, msg=None):
"""Callback to execute in case that any of the job nodes failed
Need to change the preprocessed data process status to 'failed'
"""
self.preprocessed_data.processing_status = 'failed: %s' % msg
LogEntry.create('Fatal', msg,
info={'preprocessed_data': self.preprocessed_data.id})
|
{
"content_hash": "ffc4c24601af212c9bbec5895a24302a",
"timestamp": "",
"source": "github",
"line_count": 673,
"max_line_length": 79,
"avg_line_length": 37.335809806835066,
"alnum_prop": 0.5839535161380188,
"repo_name": "RNAer/qiita",
"id": "2273a0c21f830f1d528dd72c99fda7a041f0b8fa",
"size": "25478",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qiita_ware/processing_pipeline.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "282"
},
{
"name": "HTML",
"bytes": "423495"
},
{
"name": "JavaScript",
"bytes": "5136"
},
{
"name": "Python",
"bytes": "1277701"
},
{
"name": "SQLPL",
"bytes": "419"
},
{
"name": "Shell",
"bytes": "3195"
}
],
"symlink_target": ""
}
|
import os
import sys
import time
from collections import Counter, OrderedDict
from typing import List, Union
# this is needed because we want to add 'torchtext/examples/data_pipeline' directory to the
# `sys.path` variable in order to import the pytext_vocab (since its not a module)
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "examples", "vocab"))
import torch
from pytext.data.utils import Vocabulary as PytextVocabulary
from pytext.torchscript.vocab import ScriptVocabulary as PytextScriptVocabulary
from pytext_vocab import ScriptVocab as ExperimentalScriptVocabulary
from torchtext.prototype.datasets import AG_NEWS
def _run_benchmark_lookup(tokens, vocab, num_iters=1):
def _run_benchmark_pytext_vocab(toks, v: PytextVocabulary):
for token_or_tokens_list in toks:
v.lookup_all(token_or_tokens_list)
def _run_benchmark_pytext_script_vocab(toks, v: PytextScriptVocabulary):
# list lookup
if isinstance(toks, list) and isinstance(toks[0], list):
for tokens_list in toks:
v.lookup_indices_1d(tokens_list)
# single token lookup
elif isinstance(toks, list):
for token in toks:
v.lookup_indices_1d([token])
else:
raise RuntimeError("Received tokens of incorrect type {}.".format(type(toks)))
def _run_benchmark_experimental_script_vocab(toks, v: ExperimentalScriptVocabulary):
# list lookup
if isinstance(toks, list) and isinstance(toks[0], list):
for tokens_list in toks:
v.lookup_indices_1d(tokens_list)
# single token lookup
elif isinstance(toks, list):
for token in toks:
v[token]
else:
raise RuntimeError("Received tokens of incorrect type {}.".format(type(toks)))
t0 = time.monotonic()
if isinstance(vocab, PytextVocabulary):
for _ in range(num_iters):
_run_benchmark_pytext_vocab(tokens, vocab)
elif isinstance(vocab, PytextScriptVocabulary):
for _ in range(num_iters):
_run_benchmark_pytext_script_vocab(tokens, vocab)
elif isinstance(vocab, (ExperimentalScriptVocabulary, torch.jit._script.RecursiveScriptModule)):
for _ in range(num_iters):
_run_benchmark_experimental_script_vocab(tokens, vocab)
else:
raise RuntimeError("Received vocab of incorrect type {}.".format(type(vocab)))
print("Lookup time:", time.monotonic() - t0)
def _run_benchmark_lookup_jit_for_loop(tokens: Union[List[str], List[List[str]]], vocab, num_iters=1):
@torch.jit.script
def _run_benchmark_pytext_script_vocab(toks: List[str], v: PytextScriptVocabulary):
for token in toks:
v.lookup_indices_1d([token])
@torch.jit.script
def _run_benchmark_experimental_script_vocab(toks: List[str], v: ExperimentalScriptVocabulary):
for token in toks:
v[token]
@torch.jit.script
def _run_benchmark_lists_pytext_script_vocab(tok_lists: List[List[str]], v: PytextScriptVocabulary):
for tokens_list in tok_lists:
v.lookup_indices_1d(tokens_list)
@torch.jit.script
def _run_benchmark_lists_experimental_script_vocab(tok_lists: List[List[str]], v: ExperimentalScriptVocabulary):
for tokens_list in tok_lists:
v.lookup_indices_1d(tokens_list)
t0 = time.monotonic()
# list lookup
if isinstance(tokens, list) and isinstance(tokens[0], list):
if isinstance(vocab, PytextScriptVocabulary):
for _ in range(num_iters):
_run_benchmark_lists_pytext_script_vocab(tokens, vocab)
elif isinstance(vocab, (ExperimentalScriptVocabulary, torch.jit._script.RecursiveScriptModule)):
for _ in range(num_iters):
_run_benchmark_lists_experimental_script_vocab(tokens, vocab)
else:
raise RuntimeError("Received vocab of incorrect type {}.".format(type(vocab)))
# single token lookup
elif isinstance(tokens, list):
if isinstance(vocab, PytextScriptVocabulary):
for _ in range(num_iters):
_run_benchmark_pytext_script_vocab(tokens, vocab)
elif isinstance(vocab, (ExperimentalScriptVocabulary, torch.jit._script.RecursiveScriptModule)):
for _ in range(num_iters):
_run_benchmark_experimental_script_vocab(tokens, vocab)
else:
raise RuntimeError("Received vocab of incorrect type {}.".format(type(vocab)))
else:
raise RuntimeError("Received tokens of incorrect type {}.".format(type(tokens)))
print("Lookup time:", time.monotonic() - t0)
def benchmark_experimental_vocab():
(train,) = AG_NEWS(data_select="train")
vocab = train.get_vocab()
tokens: List[str] = []
tokens_lists: List[List[str]] = []
for (_, text) in train:
cur_tokens = []
for id in text.tolist():
cur_tokens.append(vocab.itos[id])
tokens_lists.append(cur_tokens)
tokens += cur_tokens
print("Tokens size:", len(tokens))
print("Tokens list size:", len(tokens_lists))
counter = Counter(tokens)
sorted_by_freq_tuples = sorted(counter.items(), key=lambda x: x[1], reverse=True)
vocab_list = [pair[0] for pair in sorted_by_freq_tuples]
vocab_list.insert(0, "<unk>")
ordered_dict = OrderedDict(sorted_by_freq_tuples)
# pytext vocab construction
print("Pytext Vocabulary")
t0 = time.monotonic()
pytext_vocab = PytextVocabulary(vocab_list)
print("Construction time:", time.monotonic() - t0)
# pytext ScriptVocab construction
print("Pytext Script Vocabulary")
t0 = time.monotonic()
pytext_script_vocab = PytextScriptVocabulary(vocab_list)
print("Construction time:", time.monotonic() - t0)
jit_pytext_script_vocab = torch.jit.script(pytext_script_vocab)
# experimental ScriptVocab construction
print("Experimental Script Vocabulary")
t0 = time.monotonic()
experimental_script_vocab = ExperimentalScriptVocabulary(ordered_dict, unk_token="<unk>")
print("Construction time:", time.monotonic() - t0)
jit_experimental_script_vocab = torch.jit.script(experimental_script_vocab)
# pytext Vocab eager lookup
print("Pytext Vocabulary - Eager Mode")
_run_benchmark_lookup(tokens, pytext_vocab)
_run_benchmark_lookup([tokens], pytext_vocab)
_run_benchmark_lookup(tokens_lists, pytext_vocab)
# pytext ScriptVocab eager lookup
print("Pytext ScriptVocab - Eager Mode")
_run_benchmark_lookup(tokens, pytext_script_vocab)
_run_benchmark_lookup([tokens], pytext_script_vocab)
_run_benchmark_lookup(tokens_lists, pytext_script_vocab)
# experimental ScriptVocab eager lookup
print("Experimental ScriptVocab - Eager Mode")
_run_benchmark_lookup(tokens, experimental_script_vocab)
_run_benchmark_lookup([tokens], experimental_script_vocab)
_run_benchmark_lookup(tokens_lists, experimental_script_vocab)
# pytext ScriptVocab jit lookup
print("Pytext ScriptVocab - Jit Mode")
_run_benchmark_lookup(tokens, jit_pytext_script_vocab)
_run_benchmark_lookup([tokens], jit_pytext_script_vocab)
_run_benchmark_lookup(tokens_lists, jit_pytext_script_vocab)
# experimental ScriptVocab jit lookup
print("Experimental ScriptVocab - Jit Mode")
_run_benchmark_lookup(tokens, jit_experimental_script_vocab)
_run_benchmark_lookup([tokens], jit_experimental_script_vocab)
_run_benchmark_lookup(tokens_lists, jit_experimental_script_vocab)
# pytext ScriptVocab JITed for loop
print("Pytext ScriptVocab - Jit For Loop")
_run_benchmark_lookup_jit_for_loop(tokens, jit_pytext_script_vocab)
_run_benchmark_lookup_jit_for_loop([tokens], jit_pytext_script_vocab)
_run_benchmark_lookup_jit_for_loop(tokens_lists, jit_pytext_script_vocab)
# experimental ScriptVocab JITed for loop
print("Experimental ScriptVocab - Jit For Loop")
_run_benchmark_lookup_jit_for_loop(tokens, jit_experimental_script_vocab)
_run_benchmark_lookup_jit_for_loop([tokens], jit_experimental_script_vocab)
_run_benchmark_lookup_jit_for_loop(tokens_lists, jit_experimental_script_vocab)
if __name__ == "__main__":
benchmark_experimental_vocab()
|
{
"content_hash": "b930143ac6cfdb2e42409c3dbb928d99",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 116,
"avg_line_length": 41.78391959798995,
"alnum_prop": 0.6821407095610342,
"repo_name": "pytorch/text",
"id": "21053a31f0fb7e4f0b6f8e4ea8949314f8b82093",
"size": "8315",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "benchmark/benchmark_pytext_vocab.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5989"
},
{
"name": "C",
"bytes": "1165"
},
{
"name": "C++",
"bytes": "103773"
},
{
"name": "CMake",
"bytes": "6607"
},
{
"name": "Dockerfile",
"bytes": "1632"
},
{
"name": "Python",
"bytes": "761434"
},
{
"name": "Shell",
"bytes": "19559"
}
],
"symlink_target": ""
}
|
"""Definition and configuration of the Flask application."""
import flask.ext.restless as rest
from flask import Flask, render_template
from flask.ext.assets import Environment, Bundle
from flask.ext.triangle import Triangle
from readme.model import Recommendation
from readme.db import session
from readme import conf as c
app = Flask(__name__)
# Make the application compatible with AngularJS
Triangle(app)
# Definition of the REST API over the database models
manager = rest.APIManager(app, session=session)
recommendation_blueprint = manager.create_api(
Recommendation,
methods=['GET', 'POST', 'PUT', 'DELETE']
)
# Definition of the static assets handler
assets = Environment(app)
# Definition of the JS assets pipeline
js_files = [c.JS_DIR + '/' + js for js in c.JS_FILES]
packed_js = c.JS_DIR + '/' + c.PACKED_JS
js = Bundle(
*js_files,
filters='jsmin',
output=packed_js)
assets.register('js_all', js)
# Definition of the CSS assets pipeline
css_files = [c.CSS_DIR + '/' + css for css in c.CSS_FILES]
packed_css = c.CSS_DIR + '/' + c.PACKED_CSS
css = Bundle(
*css_files,
filters='cssmin',
output=packed_css)
assets.register('css_all', css)
@app.route('/')
def index():
return render_template('index.html.jinja2')
|
{
"content_hash": "4c4d7179c8e254522239781add6f7df3",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 60,
"avg_line_length": 25.34,
"alnum_prop": 0.7103393843725335,
"repo_name": "brouberol/readme",
"id": "d64a0c823dc24c7a0f4408b573636339523a6826",
"size": "1292",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "readme/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "0"
},
{
"name": "JavaScript",
"bytes": "573"
},
{
"name": "Python",
"bytes": "2860"
}
],
"symlink_target": ""
}
|
import sys
import socket
import select
def run_client():
"""
Run the client. This should listen for input text from the user
and send messages to the server. Responses from the server should
be printed to the console.
"""
# Specify where the server is to connect to
server_address = '127.0.0.1'
port = 5000
# Create a socket and connect to the server
client_socket = socket.socket()
client_socket.connect((server_address, port))
socket_list = [sys.stdin, client_socket]
# Log that it has connected to the server
print ('Connected to chat server...')
print ('Type here to send messages:')
# Start listening for input and messages from the server
while True:
# Listen to the sockets (and command line input) until something happens
ready_to_read, ready_to_write, in_error = select.select(socket_list , [], [], 0)
# When one of the inputs are ready, process the message
for sock in ready_to_read:
# The server has sent a message
if sock == client_socket:
# decode the data coming from the socket and print it out to the console
msg = sock.recv(1024).decode()
# if the recieved message is not nothing, print it for the user
if(len(msg) > 0):
print("%s" % (str(msg)))
else:
# The user entered a message
msg = sys.stdin.readline()
# strip the message of extra white space
msg = msg.strip()
# Send the message to the server
client_socket.send(msg.encode())
# print a log of the sent message
print("**YOU**: %s" % (str(msg)))
# main metod will start running the chat client
if __name__ == '__main__':
run_client()
|
{
"content_hash": "cd62f24836d6b67dc3e91d0f2fe3098b",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 88,
"avg_line_length": 37.568627450980394,
"alnum_prop": 0.5715031315240083,
"repo_name": "patevs/python_chat_app",
"id": "6e9c84ab66c628f0c4b1a5d947e29174d86ee133",
"size": "1916",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10244"
}
],
"symlink_target": ""
}
|
from django.conf.urls.defaults import *
from piston.resource import Resource
from snapboard.api.handlers import ThreadHandler
from snapboard.api.auth import StaffHttpBasicAuthentication
auth = StaffHttpBasicAuthentication(realm="Snapboard")
thread = Resource(ThreadHandler, authentication=auth)
urlpatterns = patterns('',
(r'^thread/$', thread),
)
|
{
"content_hash": "19b78da9091eec00d020c02c6e8d592a",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 59,
"avg_line_length": 25.357142857142858,
"alnum_prop": 0.8,
"repo_name": "johnboxall/snapboard",
"id": "e9118779800c827e8b5449ff4ba267e545229390",
"size": "355",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "snapboard/api/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "14517"
},
{
"name": "Python",
"bytes": "193925"
}
],
"symlink_target": ""
}
|
import sys
import os
from robot.parsing import VALID_EXTENSIONS as RESOURCE_EXTENSIONS
from robot.errors import DataError
from .robotbuilder import LibraryDocBuilder, ResourceDocBuilder
from .specbuilder import SpecDocBuilder
if sys.platform.startswith('java'):
from .javabuilder import JavaDocBuilder
else:
def JavaDocBuilder():
raise DataError('Documenting Java test libraries requires Jython.')
def DocumentationBuilder(library_or_resource):
extension = os.path.splitext(library_or_resource)[1][1:].lower()
if extension in RESOURCE_EXTENSIONS:
return ResourceDocBuilder()
if extension == 'xml':
return SpecDocBuilder()
if extension == 'java':
return JavaDocBuilder()
return LibraryDocBuilder()
|
{
"content_hash": "f5424541c5b5d7d538a497eeaace9deb",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 75,
"avg_line_length": 31.708333333333332,
"alnum_prop": 0.7477003942181341,
"repo_name": "ktan2020/legacy-automation",
"id": "d5e661e7da5f1a54a5b3bb339a7b2d4307f8fbb5",
"size": "1367",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "win/Lib/site-packages/robot/libdocpkg/builder.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "913"
},
{
"name": "Ada",
"bytes": "289"
},
{
"name": "Assembly",
"bytes": "687"
},
{
"name": "Boo",
"bytes": "540"
},
{
"name": "C",
"bytes": "40116"
},
{
"name": "C#",
"bytes": "474"
},
{
"name": "C++",
"bytes": "393"
},
{
"name": "CSS",
"bytes": "70883"
},
{
"name": "ColdFusion",
"bytes": "1012"
},
{
"name": "Common Lisp",
"bytes": "1034"
},
{
"name": "D",
"bytes": "1858"
},
{
"name": "Eiffel",
"bytes": "426"
},
{
"name": "Erlang",
"bytes": "9243"
},
{
"name": "FORTRAN",
"bytes": "1810"
},
{
"name": "Forth",
"bytes": "182"
},
{
"name": "Groovy",
"bytes": "2366"
},
{
"name": "Haskell",
"bytes": "816"
},
{
"name": "Haxe",
"bytes": "455"
},
{
"name": "Java",
"bytes": "1155"
},
{
"name": "JavaScript",
"bytes": "69444"
},
{
"name": "Lua",
"bytes": "795"
},
{
"name": "Matlab",
"bytes": "1278"
},
{
"name": "OCaml",
"bytes": "350"
},
{
"name": "Objective-C++",
"bytes": "885"
},
{
"name": "PHP",
"bytes": "1411"
},
{
"name": "Pascal",
"bytes": "388"
},
{
"name": "Perl",
"bytes": "252651"
},
{
"name": "Pike",
"bytes": "589"
},
{
"name": "Python",
"bytes": "42085780"
},
{
"name": "R",
"bytes": "1156"
},
{
"name": "Ruby",
"bytes": "480"
},
{
"name": "Scheme",
"bytes": "282"
},
{
"name": "Shell",
"bytes": "30518"
},
{
"name": "Smalltalk",
"bytes": "926"
},
{
"name": "Squirrel",
"bytes": "697"
},
{
"name": "Stata",
"bytes": "302"
},
{
"name": "SystemVerilog",
"bytes": "3145"
},
{
"name": "Tcl",
"bytes": "1039"
},
{
"name": "TeX",
"bytes": "1746"
},
{
"name": "VHDL",
"bytes": "985"
},
{
"name": "Vala",
"bytes": "664"
},
{
"name": "Verilog",
"bytes": "439"
},
{
"name": "Visual Basic",
"bytes": "2142"
},
{
"name": "XSLT",
"bytes": "152770"
},
{
"name": "ooc",
"bytes": "890"
},
{
"name": "xBase",
"bytes": "769"
}
],
"symlink_target": ""
}
|
import asyncio
import random
sentences = [
"справился с упражнением",
"мастер сетевых протоколов!",
"просто мастер Python!",
"будет вести следующую лекцию"
]
class EchoServerProtocol(asyncio.Protocol):
def __init__(self):
super(EchoServerProtocol, self).__init__()
self.transport = None
self.name = None
self.secret_number = None
def connection_made(self, transport):
self.transport = transport
def data_received(self, data):
message = data.decode()
if self.name is None:
self.name = message
self.secret_number = random.randrange(0, 10000000)
self.transport.write(str(self.secret_number).encode())
print('%s подключился' % (self.name,))
else:
if str(self.secret_number) == message:
response = 'Yeah! %s %s' % (self.name, random.choice(sentences))
print(response)
else:
response = 'Fail! %s не угадал число: было %s, а пришло %s' % (
self.name, self.secret_number, message
)
print(response)
self.transport.close()
loop = asyncio.get_event_loop()
# Each client connection will create a new protocol instance
coro = loop.create_server(EchoServerProtocol, '0.0.0.0', 9999)
server = loop.run_until_complete(coro)
# Serve requests until Ctrl+C is pressed
print('Serving on {}'.format(server.sockets[0].getsockname()))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
|
{
"content_hash": "16ef5364e8a39630b1788e3819271726",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 80,
"avg_line_length": 27.18032786885246,
"alnum_prop": 0.6115802171290712,
"repo_name": "park-python/course",
"id": "799a4610f59350f6cf3706e9774d1bd6a987d617",
"size": "1774",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lectures/06_Internet/exercise_3_tcp_send_recv_send.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2545"
},
{
"name": "Go",
"bytes": "1190"
},
{
"name": "HTML",
"bytes": "1885"
},
{
"name": "JavaScript",
"bytes": "1920"
},
{
"name": "Jupyter Notebook",
"bytes": "1961265"
},
{
"name": "Python",
"bytes": "53267"
},
{
"name": "Shell",
"bytes": "72"
}
],
"symlink_target": ""
}
|
import re, sys
from difflib import unified_diff, ndiff
import pprint
import logging
import svntest
logger = logging.getLogger()
######################################################################
# Exception types
class SVNUnexpectedOutput(svntest.Failure):
"""Exception raised if an invocation of svn results in unexpected
output of any kind."""
pass
class SVNUnexpectedStdout(SVNUnexpectedOutput):
"""Exception raised if an invocation of svn results in unexpected
output on STDOUT."""
pass
class SVNUnexpectedStderr(SVNUnexpectedOutput):
"""Exception raised if an invocation of svn results in unexpected
output on STDERR."""
pass
class SVNExpectedStdout(SVNUnexpectedOutput):
"""Exception raised if an invocation of svn results in no output on
STDOUT when output was expected."""
pass
class SVNExpectedStderr(SVNUnexpectedOutput):
"""Exception raised if an invocation of svn results in no output on
STDERR when output was expected."""
pass
class SVNUnexpectedExitCode(SVNUnexpectedOutput):
"""Exception raised if an invocation of svn exits with a value other
than what was expected."""
pass
class SVNIncorrectDatatype(SVNUnexpectedOutput):
"""Exception raised if invalid input is passed to the
run_and_verify_* API"""
pass
class SVNDumpParseError(svntest.Failure):
"""Exception raised if parsing a dump file fails"""
pass
######################################################################
# Comparison of expected vs. actual output
def createExpectedOutput(expected, output_type, match_all=True):
"""Return EXPECTED, promoted to an ExpectedOutput instance if not
None. Raise SVNIncorrectDatatype if the data type of EXPECTED is
not handled."""
if isinstance(expected, list):
expected = ExpectedOutput(expected)
elif isinstance(expected, str):
expected = RegexOutput(expected, match_all)
elif isinstance(expected, int):
expected = RegexOutput(".*: E%d:.*" % expected, False)
elif expected is AnyOutput:
expected = AnyOutput()
elif expected is not None and not isinstance(expected, ExpectedOutput):
raise SVNIncorrectDatatype("Unexpected type for '%s' data" % output_type)
return expected
class ExpectedOutput(object):
"""Matches an ordered list of lines.
If MATCH_ALL is True, the expected lines must match all the actual
lines, one-to-one, in the same order. If MATCH_ALL is False, the
expected lines must match a subset of the actual lines, one-to-one,
in the same order, ignoring any other actual lines among the
matching ones.
"""
def __init__(self, expected, match_all=True):
"""Initialize the expected output to EXPECTED which is a string, or
a list of strings.
See also: svntest.verify.createExpectedOutput().
"""
assert expected is not None
self.expected = expected
self.match_all = match_all
def __str__(self):
return str(self.expected)
def __cmp__(self, other):
raise TypeError("ExpectedOutput does not implement direct comparison; "
"see the 'matches()' method")
def matches(self, actual):
"""Return whether SELF matches ACTUAL (which may be a list
of newline-terminated lines, or a single string).
"""
assert actual is not None
expected = self.expected
if not isinstance(expected, list):
expected = [expected]
if not isinstance(actual, list):
actual = [actual]
if self.match_all:
return expected == actual
i_expected = 0
for actual_line in actual:
if expected[i_expected] == actual_line:
i_expected += 1
if i_expected == len(expected):
return True
return False
def display_differences(self, message, label, actual):
"""Show the differences between the expected and ACTUAL lines. Print
MESSAGE unless it is None, the expected lines, the ACTUAL lines,
and a diff, all labeled with LABEL.
"""
display_lines(message, self.expected, actual, label, label)
display_lines_diff(self.expected, actual, label, label)
class AnyOutput(ExpectedOutput):
"""Matches any non-empty output.
"""
def __init__(self):
ExpectedOutput.__init__(self, [], False)
def matches(self, actual):
assert actual is not None
if len(actual) == 0:
# No actual output. No match.
return False
for line in actual:
# If any line has some text, then there is output, so we match.
if line:
return True
# We did not find a line with text. No match.
return False
def display_differences(self, message, label, actual):
if message:
logger.warn(message)
class RegexOutput(ExpectedOutput):
"""Matches a single regular expression.
If MATCH_ALL is true, every actual line must match the RE. If
MATCH_ALL is false, at least one actual line must match the RE. In
any case, there must be at least one line of actual output.
"""
def __init__(self, expected, match_all=True):
"EXPECTED is a regular expression string."
assert isinstance(expected, str) or isinstance(expected, bytes)
ExpectedOutput.__init__(self, expected, match_all)
self.expected_re = re.compile(expected)
def matches(self, actual):
assert actual is not None
if not isinstance(actual, list):
actual = [actual]
# If a regex was provided assume that we require some actual output.
# Fail if we don't have any.
if len(actual) == 0:
return False
if self.match_all:
return all(self.expected_re.match(line) for line in actual)
else:
return any(self.expected_re.match(line) for line in actual)
def display_differences(self, message, label, actual):
display_lines(message, self.expected, actual, label + ' (regexp)', label)
def insert(self, index, line):
self.expected.insert(index, line)
self.expected_re = re.compile(self.expected)
class RegexListOutput(ExpectedOutput):
"""Matches an ordered list of regular expressions.
If MATCH_ALL is True, the expressions must match all the actual
lines, one-to-one, in the same order. If MATCH_ALL is False, the
expressions must match a subset of the actual lines, one-to-one, in
the same order, ignoring any other actual lines among the matching
ones.
In any case, there must be at least one line of actual output.
"""
def __init__(self, expected, match_all=True):
"EXPECTED is a list of regular expression strings."
assert isinstance(expected, list)
ExpectedOutput.__init__(self, expected, match_all)
self.expected_res = [re.compile(e) for e in expected]
def matches(self, actual):
assert actual is not None
if not isinstance(actual, list):
actual = [actual]
if self.match_all:
return (len(self.expected_res) == len(actual) and
all(e.match(a) for e, a in zip(self.expected_res, actual)))
i_expected = 0
for actual_line in actual:
if self.expected_res[i_expected].match(actual_line):
i_expected += 1
if i_expected == len(self.expected_res):
return True
return False
def display_differences(self, message, label, actual):
display_lines(message, self.expected, actual, label + ' (regexp)', label)
def insert(self, index, line):
self.expected.insert(index, line)
self.expected_res = [re.compile(e) for e in self.expected]
class UnorderedOutput(ExpectedOutput):
"""Matches an unordered list of lines.
The expected lines must match all the actual lines, one-to-one, in
any order.
"""
def __init__(self, expected):
assert isinstance(expected, list)
ExpectedOutput.__init__(self, expected)
def matches(self, actual):
if not isinstance(actual, list):
actual = [actual]
return sorted(self.expected) == sorted(actual)
def display_differences(self, message, label, actual):
display_lines(message, self.expected, actual, label + ' (unordered)', label)
display_lines_diff(self.expected, actual, label + ' (unordered)', label)
class UnorderedRegexListOutput(ExpectedOutput):
"""Matches an unordered list of regular expressions.
The expressions must match all the actual lines, one-to-one, in any
order.
Note: This can give a false negative result (no match) when there is
an actual line that matches multiple expressions and a different
actual line that matches some but not all of those same
expressions. The implementation matches each expression in turn to
the first unmatched actual line that it can match, and does not try
all the permutations when there are multiple possible matches.
"""
def __init__(self, expected):
assert isinstance(expected, list)
ExpectedOutput.__init__(self, expected)
def matches(self, actual):
assert actual is not None
if not isinstance(actual, list):
actual = [actual]
if len(self.expected) != len(actual):
return False
for e in self.expected:
expect_re = re.compile(e)
for actual_line in actual:
if expect_re.match(actual_line):
actual.remove(actual_line)
break
else:
# One of the regexes was not found
return False
return True
def display_differences(self, message, label, actual):
display_lines(message, self.expected, actual,
label + ' (regexp) (unordered)', label)
class AlternateOutput(ExpectedOutput):
"""Matches any one of a list of ExpectedOutput instances.
"""
def __init__(self, expected, match_all=True):
"EXPECTED is a list of ExpectedOutput instances."
assert isinstance(expected, list) and expected != []
assert all(isinstance(e, ExpectedOutput) for e in expected)
ExpectedOutput.__init__(self, expected)
def matches(self, actual):
assert actual is not None
for e in self.expected:
if e.matches(actual):
return True
return False
def display_differences(self, message, label, actual):
# For now, just display differences against the first alternative.
e = self.expected[0]
e.display_differences(message, label, actual)
######################################################################
# Displaying expected and actual output
def display_trees(message, label, expected, actual):
'Print two trees, expected and actual.'
if message is not None:
logger.warn(message)
if expected is not None:
logger.warn('EXPECTED %s:', label)
svntest.tree.dump_tree(expected)
if actual is not None:
logger.warn('ACTUAL %s:', label)
svntest.tree.dump_tree(actual)
def display_lines_diff(expected, actual, expected_label, actual_label):
"""Print a unified diff between EXPECTED (labeled with EXPECTED_LABEL)
and ACTUAL (labeled with ACTUAL_LABEL).
Each of EXPECTED and ACTUAL is a string or a list of strings.
"""
if not isinstance(expected, list):
expected = [expected]
if not isinstance(actual, list):
actual = [actual]
logger.warn('DIFF ' + expected_label + ':')
for x in unified_diff(expected, actual,
fromfile='EXPECTED ' + expected_label,
tofile='ACTUAL ' + actual_label):
logger.warn('| ' + x.rstrip())
def display_lines(message, expected, actual,
expected_label, actual_label=None):
"""Print MESSAGE, unless it is None, then print EXPECTED (labeled
with EXPECTED_LABEL) followed by ACTUAL (labeled with ACTUAL_LABEL).
Each of EXPECTED and ACTUAL is a string or a list of strings.
"""
if message is not None:
logger.warn(message)
if type(expected) is str:
expected = [expected]
if type(actual) is str:
actual = [actual]
if actual_label is None:
actual_label = expected_label
if expected is not None:
logger.warn('EXPECTED %s:', expected_label)
for x in expected:
logger.warn('| ' + x.rstrip())
if actual is not None:
logger.warn('ACTUAL %s:', actual_label)
for x in actual:
logger.warn('| ' + x.rstrip())
def compare_and_display_lines(message, label, expected, actual,
raisable=None):
"""Compare two sets of output lines, and print them if they differ,
preceded by MESSAGE iff not None. EXPECTED may be an instance of
ExpectedOutput (and if not, it is wrapped as such). ACTUAL may be a
list of newline-terminated lines, or a single string. RAISABLE is an
exception class, an instance of which is thrown if ACTUAL doesn't
match EXPECTED."""
if raisable is None:
raisable = svntest.main.SVNLineUnequal
### It'd be nicer to use createExpectedOutput() here, but its
### semantics don't match all current consumers of this function.
assert expected is not None
assert actual is not None
if not isinstance(expected, ExpectedOutput):
expected = ExpectedOutput(expected)
actual = svntest.main.ensure_list(actual)
if len(actual) > 0:
is_binary = not isinstance(actual[0], str)
actual = svntest.main.filter_dbg(actual, is_binary)
if not expected.matches(actual):
expected.display_differences(message, label, actual)
raise raisable
def verify_outputs(message, actual_stdout, actual_stderr,
expected_stdout, expected_stderr, all_stdout=True):
"""Compare and display expected vs. actual stderr and stdout lines:
if they don't match, print the difference (preceded by MESSAGE iff
not None) and raise an exception.
If EXPECTED_STDERR or EXPECTED_STDOUT is a string the string is
interpreted as a regular expression. For EXPECTED_STDOUT and
ACTUAL_STDOUT to match, every line in ACTUAL_STDOUT must match the
EXPECTED_STDOUT regex, unless ALL_STDOUT is false. For
EXPECTED_STDERR regexes only one line in ACTUAL_STDERR need match."""
expected_stderr = createExpectedOutput(expected_stderr, 'stderr', False)
expected_stdout = createExpectedOutput(expected_stdout, 'stdout', all_stdout)
for (actual, expected, label, raisable) in (
(actual_stderr, expected_stderr, 'STDERR', SVNExpectedStderr),
(actual_stdout, expected_stdout, 'STDOUT', SVNExpectedStdout)):
if expected is None:
continue
if isinstance(expected, RegexOutput):
raisable = svntest.main.SVNUnmatchedError
elif not isinstance(expected, AnyOutput):
raisable = svntest.main.SVNLineUnequal
compare_and_display_lines(message, label, expected, actual, raisable)
def verify_exit_code(message, actual, expected,
raisable=SVNUnexpectedExitCode):
"""Compare and display expected vs. actual exit codes:
if they don't match, print the difference (preceded by MESSAGE iff
not None) and raise an exception."""
if expected != actual:
display_lines(message, str(expected), str(actual), "Exit Code")
raise raisable
# A simple dump file parser. While sufficient for the current
# testsuite it doesn't cope with all valid dump files.
class DumpParser:
def __init__(self, lines):
self.current = 0
self.lines = lines
self.parsed = {}
def parse_line(self, regex, required=True):
m = re.match(regex, self.lines[self.current])
if not m:
if required:
raise SVNDumpParseError("expected '%s' at line %d\n%s"
"\nPrevious lines:\n%s"
% (regex, self.current,
self.lines[self.current],
''.join(self.lines[max(0,self.current - 10):self.current])))
else:
return None
self.current += 1
return m.group(1)
def parse_blank(self, required=True):
if self.lines[self.current] != b'\n': # Works on Windows
if required:
raise SVNDumpParseError("expected blank at line %d\n%s"
% (self.current, self.lines[self.current]))
else:
return False
self.current += 1
return True
def parse_header(self, header):
regex = b'([^:]*): (.*)$'
m = re.match(regex, self.lines[self.current])
if not m:
raise SVNDumpParseError("expected a header at line %d, but found:\n%s"
% (self.current, self.lines[self.current]))
self.current += 1
return m.groups()
def parse_headers(self):
headers = []
while self.lines[self.current] != b'\n':
key, val = self.parse_header(self)
headers.append((key, val))
return headers
def parse_boolean(self, header, required):
return self.parse_line(header + b': (false|true)$', required)
def parse_format(self):
return self.parse_line(b'SVN-fs-dump-format-version: ([0-9]+)$')
def parse_uuid(self):
return self.parse_line(b'UUID: ([0-9a-z-]+)$')
def parse_revision(self):
return self.parse_line(b'Revision-number: ([0-9]+)$')
def parse_prop_delta(self):
return self.parse_line(b'Prop-delta: (false|true)$', required=False)
def parse_prop_length(self, required=True):
return self.parse_line(b'Prop-content-length: ([0-9]+)$', required)
def parse_content_length(self, required=True):
return self.parse_line(b'Content-length: ([0-9]+)$', required)
def parse_path(self):
path = self.parse_line(b'Node-path: (.*)$', required=False)
return path
def parse_kind(self):
return self.parse_line(b'Node-kind: (.+)$', required=False)
def parse_action(self):
return self.parse_line(b'Node-action: ([0-9a-z-]+)$')
def parse_copyfrom_rev(self):
return self.parse_line(b'Node-copyfrom-rev: ([0-9]+)$', required=False)
def parse_copyfrom_path(self):
path = self.parse_line(b'Node-copyfrom-path: (.+)$', required=False)
if not path and self.lines[self.current] == 'Node-copyfrom-path: \n':
self.current += 1
path = ''
return path
def parse_copy_md5(self):
return self.parse_line(b'Text-copy-source-md5: ([0-9a-z]+)$', required=False)
def parse_copy_sha1(self):
return self.parse_line(b'Text-copy-source-sha1: ([0-9a-z]+)$', required=False)
def parse_text_md5(self):
return self.parse_line(b'Text-content-md5: ([0-9a-z]+)$', required=False)
def parse_text_sha1(self):
return self.parse_line(b'Text-content-sha1: ([0-9a-z]+)$', required=False)
def parse_text_delta(self):
return self.parse_line(b'Text-delta: (false|true)$', required=False)
def parse_text_delta_base_md5(self):
return self.parse_line(b'Text-delta-base-md5: ([0-9a-f]+)$', required=False)
def parse_text_delta_base_sha1(self):
return self.parse_line(b'Text-delta-base-sha1: ([0-9a-f]+)$', required=False)
def parse_text_length(self):
return self.parse_line(b'Text-content-length: ([0-9]+)$', required=False)
def get_props(self):
props = []
while not re.match(b'PROPS-END$', self.lines[self.current]):
props.append(self.lines[self.current])
self.current += 1
self.current += 1
# Split into key/value pairs to do an unordered comparison.
# This parses the serialized hash under the assumption that it is valid.
prophash = {}
curprop = [0]
while curprop[0] < len(props):
def read_key_or_value(curprop):
# klen / vlen
klen = int(props[curprop[0]].split()[1])
curprop[0] += 1
# key / value
key = b''
while len(key) != klen + 1:
key += props[curprop[0]]
curprop[0] += 1
key = key[:-1]
return key
if props[curprop[0]].startswith(b'K'):
key = read_key_or_value(curprop)
value = read_key_or_value(curprop)
elif props[curprop[0]].startswith(b'D'):
key = read_key_or_value(curprop)
value = None
else:
raise
prophash[key] = value
return prophash
def get_content(self, length):
content = b''
while len(content) < length:
content += self.lines[self.current]
self.current += 1
if len(content) == length + 1:
content = content[:-1]
elif len(content) != length:
raise SVNDumpParseError("content length expected %d actual %d at line %d"
% (length, len(content), self.current))
return content
def parse_one_node(self):
node = {}
# optional 'kind' and required 'action' must be next
node['kind'] = self.parse_kind()
action = self.parse_action()
# read any remaining headers
headers_list = self.parse_headers()
headers = dict(headers_list)
# Content-length must be last, if present
if b'Content-length' in headers and headers_list[-1][0] != b'Content-length':
raise SVNDumpParseError("'Content-length' header is not last, "
"in header block ending at line %d"
% (self.current,))
# parse the remaining optional headers and store in specific keys in NODE
for key, header, regex in [
('copyfrom_rev', b'Node-copyfrom-rev', b'([0-9]+)$'),
('copyfrom_path', b'Node-copyfrom-path', b'(.*)$'),
('copy_md5', b'Text-copy-source-md5', b'([0-9a-z]+)$'),
('copy_sha1', b'Text-copy-source-sha1',b'([0-9a-z]+)$'),
('prop_length', b'Prop-content-length', b'([0-9]+)$'),
('text_length', b'Text-content-length', b'([0-9]+)$'),
('text_md5', b'Text-content-md5', b'([0-9a-z]+)$'),
('text_sha1', b'Text-content-sha1', b'([0-9a-z]+)$'),
('content_length', b'Content-length', b'([0-9]+)$'),
]:
if not header in headers:
node[key] = None
continue
m = re.match(regex, headers[header])
if not m:
raise SVNDumpParseError("expected '%s' at line %d\n%s"
% (regex, self.current,
self.lines[self.current]))
node[key] = m.group(1)
self.parse_blank()
if node['prop_length']:
node['props'] = self.get_props()
if node['text_length']:
node['content'] = self.get_content(int(node['text_length']))
# Hard to determine how may blanks is 'correct' (a delete that is
# followed by an add that is a replace and a copy has one fewer
# than expected but that can't be predicted until seeing the add)
# so allow arbitrary number
blanks = 0
while self.current < len(self.lines) and self.parse_blank(required=False):
blanks += 1
node['blanks'] = blanks
return action, node
def parse_all_nodes(self):
nodes = {}
while True:
if self.current >= len(self.lines):
break
path = self.parse_path()
if path is None:
break
if not nodes.get(path):
nodes[path] = {}
action, node = self.parse_one_node()
if nodes[path].get(action):
raise SVNDumpParseError("duplicate action '%s' for node '%s' at line %d"
% (action, path, self.current))
nodes[path][action] = node
return nodes
def parse_one_revision(self):
revision = {}
number = self.parse_revision()
revision['prop_length'] = self.parse_prop_length()
revision['content_length'] = self.parse_content_length()
self.parse_blank()
revision['props'] = self.get_props()
self.parse_blank()
revision['nodes'] = self.parse_all_nodes()
return number, revision
def parse_all_revisions(self):
while self.current < len(self.lines):
number, revision = self.parse_one_revision()
if self.parsed.get(number):
raise SVNDumpParseError("duplicate revision %d at line %d"
% (number, self.current))
self.parsed[number] = revision
def parse(self):
self.parsed['format'] = self.parse_format()
self.parse_blank()
self.parsed['uuid'] = self.parse_uuid()
self.parse_blank()
self.parse_all_revisions()
return self.parsed
def compare_dump_files(message, label, expected, actual,
ignore_uuid=False,
expect_content_length_always=False,
ignore_empty_prop_sections=False,
ignore_number_of_blank_lines=False):
"""Parse two dump files EXPECTED and ACTUAL, both of which are lists
of lines as returned by run_and_verify_dump, and check that the same
revisions, nodes, properties, etc. are present in both dumps.
"""
parsed_expected = DumpParser(expected).parse()
parsed_actual = DumpParser(actual).parse()
if ignore_uuid:
parsed_expected['uuid'] = '<ignored>'
parsed_actual['uuid'] = '<ignored>'
for parsed in [parsed_expected, parsed_actual]:
for rev_name, rev_record in parsed.items():
#print "Found %s" % (rev_name,)
if b'nodes' in rev_record:
#print "Found %s.%s" % (rev_name, 'nodes')
for path_name, path_record in rev_record['nodes'].items():
#print "Found %s.%s.%s" % (rev_name, 'nodes', path_name)
for action_name, action_record in path_record.items():
#print "Found %s.%s.%s.%s" % (rev_name, 'nodes', path_name, action_name)
if expect_content_length_always:
if action_record.get('content_length') == None:
#print 'Adding: %s.%s.%s.%s.%s' % (rev_name, 'nodes', path_name, action_name, 'content_length=0')
action_record['content_length'] = '0'
if ignore_empty_prop_sections:
if action_record.get('prop_length') == '10':
#print 'Removing: %s.%s.%s.%s.%s' % (rev_name, 'nodes', path_name, action_name, 'prop_length')
action_record['prop_length'] = None
del action_record['props']
old_content_length = int(action_record['content_length'])
action_record['content_length'] = str(old_content_length - 10)
if ignore_number_of_blank_lines:
action_record['blanks'] = 0
if parsed_expected != parsed_actual:
print('DIFF of raw dumpfiles (including expected differences)')
print(''.join(ndiff(expected, actual)))
raise svntest.Failure('DIFF of parsed dumpfiles (ignoring expected differences)\n'
+ '\n'.join(ndiff(
pprint.pformat(parsed_expected).splitlines(),
pprint.pformat(parsed_actual).splitlines())))
##########################################################################################
## diff verifications
def is_absolute_url(target):
return (target.startswith('file://')
or target.startswith('http://')
or target.startswith('https://')
or target.startswith('svn://')
or target.startswith('svn+ssh://'))
def make_diff_header(path, old_tag, new_tag, src_label=None, dst_label=None):
"""Generate the expected diff header for file PATH, with its old and new
versions described in parentheses by OLD_TAG and NEW_TAG. SRC_LABEL and
DST_LABEL are paths or urls that are added to the diff labels if we're
diffing against the repository or diffing two arbitrary paths.
Return the header as an array of newline-terminated strings."""
if src_label:
src_label = src_label.replace('\\', '/')
if not is_absolute_url(src_label):
src_label = '.../' + src_label
src_label = '\t(' + src_label + ')'
else:
src_label = ''
if dst_label:
dst_label = dst_label.replace('\\', '/')
if not is_absolute_url(dst_label):
dst_label = '.../' + dst_label
dst_label = '\t(' + dst_label + ')'
else:
dst_label = ''
path_as_shown = path.replace('\\', '/')
return [
"Index: " + path_as_shown + "\n",
"===================================================================\n",
"--- " + path_as_shown + src_label + "\t(" + old_tag + ")\n",
"+++ " + path_as_shown + dst_label + "\t(" + new_tag + ")\n",
]
def make_no_diff_deleted_header(path, old_tag, new_tag):
"""Generate the expected diff header for a deleted file PATH when in
'no-diff-deleted' mode. (In that mode, no further details appear after the
header.) Return the header as an array of newline-terminated strings."""
path_as_shown = path.replace('\\', '/')
return [
"Index: " + path_as_shown + " (deleted)\n",
"===================================================================\n",
]
def make_git_diff_header(target_path, repos_relpath,
old_tag, new_tag, add=False, src_label=None,
dst_label=None, delete=False, text_changes=True,
cp=False, mv=False, copyfrom_path=None,
copyfrom_rev=None):
""" Generate the expected 'git diff' header for file TARGET_PATH.
REPOS_RELPATH is the location of the path relative to the repository root.
The old and new versions ("revision X", or "working copy") must be
specified in OLD_TAG and NEW_TAG.
SRC_LABEL and DST_LABEL are paths or urls that are added to the diff
labels if we're diffing against the repository. ADD, DELETE, CP and MV
denotes the operations performed on the file. COPYFROM_PATH is the source
of a copy or move. Return the header as an array of newline-terminated
strings."""
path_as_shown = target_path.replace('\\', '/')
if src_label:
src_label = src_label.replace('\\', '/')
src_label = '\t(.../' + src_label + ')'
else:
src_label = ''
if dst_label:
dst_label = dst_label.replace('\\', '/')
dst_label = '\t(.../' + dst_label + ')'
else:
dst_label = ''
output = [
"Index: " + path_as_shown + "\n",
"===================================================================\n"
]
if add:
output.extend([
"diff --git a/" + repos_relpath + " b/" + repos_relpath + "\n",
"new file mode 100644\n",
])
if text_changes:
output.extend([
"--- /dev/null\t(" + old_tag + ")\n",
"+++ b/" + repos_relpath + dst_label + "\t(" + new_tag + ")\n"
])
elif delete:
output.extend([
"diff --git a/" + repos_relpath + " b/" + repos_relpath + "\n",
"deleted file mode 100644\n",
])
if text_changes:
output.extend([
"--- a/" + repos_relpath + src_label + "\t(" + old_tag + ")\n",
"+++ /dev/null\t(" + new_tag + ")\n"
])
elif cp:
if copyfrom_rev:
copyfrom_rev = '@' + copyfrom_rev
else:
copyfrom_rev = ''
output.extend([
"diff --git a/" + copyfrom_path + " b/" + repos_relpath + "\n",
"copy from " + copyfrom_path + copyfrom_rev + "\n",
"copy to " + repos_relpath + "\n",
])
if text_changes:
output.extend([
"--- a/" + copyfrom_path + src_label + "\t(" + old_tag + ")\n",
"+++ b/" + repos_relpath + "\t(" + new_tag + ")\n"
])
elif mv:
output.extend([
"diff --git a/" + copyfrom_path + " b/" + path_as_shown + "\n",
"rename from " + copyfrom_path + "\n",
"rename to " + repos_relpath + "\n",
])
if text_changes:
output.extend([
"--- a/" + copyfrom_path + src_label + "\t(" + old_tag + ")\n",
"+++ b/" + repos_relpath + "\t(" + new_tag + ")\n"
])
else:
output.extend([
"diff --git a/" + repos_relpath + " b/" + repos_relpath + "\n",
"--- a/" + repos_relpath + src_label + "\t(" + old_tag + ")\n",
"+++ b/" + repos_relpath + dst_label + "\t(" + new_tag + ")\n",
])
return output
def make_diff_prop_header(path):
"""Return a property diff sub-header, as a list of newline-terminated
strings."""
return [
"\n",
"Property changes on: " + path.replace('\\', '/') + "\n",
"___________________________________________________________________\n"
]
def make_diff_prop_val(plus_minus, pval):
"Return diff for prop value PVAL, with leading PLUS_MINUS (+ or -)."
if len(pval) > 0 and pval[-1] != '\n':
return [plus_minus + pval + "\n","\\ No newline at end of property\n"]
return [plus_minus + pval]
def make_diff_prop_deleted(pname, pval):
"""Return a property diff for deletion of property PNAME, old value PVAL.
PVAL is a single string with no embedded newlines. Return the result
as a list of newline-terminated strings."""
return [
"Deleted: " + pname + "\n",
"## -1 +0,0 ##\n"
] + make_diff_prop_val("-", pval)
def make_diff_prop_added(pname, pval):
"""Return a property diff for addition of property PNAME, new value PVAL.
PVAL is a single string with no embedded newlines. Return the result
as a list of newline-terminated strings."""
return [
"Added: " + pname + "\n",
"## -0,0 +1 ##\n",
] + make_diff_prop_val("+", pval)
def make_diff_prop_modified(pname, pval1, pval2):
"""Return a property diff for modification of property PNAME, old value
PVAL1, new value PVAL2.
PVAL is a single string with no embedded newlines. A newline at the
end is significant: without it, we add an extra line saying '\ No
newline at end of property'.
Return the result as a list of newline-terminated strings.
"""
return [
"Modified: " + pname + "\n",
"## -1 +1 ##\n",
] + make_diff_prop_val("-", pval1) + make_diff_prop_val("+", pval2)
|
{
"content_hash": "b723bdc635917134cedb5769c6bd7471",
"timestamp": "",
"source": "github",
"line_count": 931,
"max_line_length": 113,
"avg_line_length": 35.39634801288937,
"alnum_prop": 0.6195909449535717,
"repo_name": "YueLinHo/Subversion",
"id": "98682a2461bd5b2c26fa5fa3c7acec8fd5691705",
"size": "34138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "subversion/tests/cmdline/svntest/verify.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2407"
},
{
"name": "C",
"bytes": "23344368"
},
{
"name": "C++",
"bytes": "1110396"
},
{
"name": "CSS",
"bytes": "1216"
},
{
"name": "Emacs Lisp",
"bytes": "401177"
},
{
"name": "HTML",
"bytes": "404487"
},
{
"name": "Java",
"bytes": "1698548"
},
{
"name": "M4",
"bytes": "204671"
},
{
"name": "Makefile",
"bytes": "50827"
},
{
"name": "Objective-C",
"bytes": "534640"
},
{
"name": "PLSQL",
"bytes": "1622"
},
{
"name": "PLpgSQL",
"bytes": "4534"
},
{
"name": "Perl",
"bytes": "395610"
},
{
"name": "Python",
"bytes": "6205629"
},
{
"name": "Roff",
"bytes": "33424"
},
{
"name": "Ruby",
"bytes": "437540"
},
{
"name": "Shell",
"bytes": "196621"
},
{
"name": "Vim script",
"bytes": "1123"
},
{
"name": "XSLT",
"bytes": "24950"
}
],
"symlink_target": ""
}
|
"""Research
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Tbc2lRq0hTZRSlh74SbIAStMMLDBU210
"""
from google.colab import drive
drive.mount('/content/gdrive')
import sys
customized_path_to_homework = "/content/gdrive/My Drive/Research"
sys.path.append(customized_path_to_homework)
# Commented out IPython magic to ensure Python compatibility.
import os, sys
import pandas as pd
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from datetime import datetime as dt, timedelta
import warnings
import seaborn as sns
sys.path.insert(0,'../')
# %load_ext autoreload
# %autoreload 2
warnings.filterwarnings('ignore')
"""**Load data**"""
file_name = "cs4114_spring_2021"
data = pd.read_csv("/content/gdrive/My Drive/Research/" + file_name + "_merged_result_unannotated.csv")
data.loc[(data['Event name'] == 'Other event') & (data['Event Description'].str.contains("PE") == True) & (data['Number of events'] > 1), "Event name"] = "PE event"
sorted_data = pd.read_csv("/content/gdrive/My Drive/Research/" + file_name + "_sorted.csv")
pi_attempts = pd.read_csv("/content/gdrive/My Drive/Research/pi_attempts.csv")
users = data['user ID'].unique()
df_dict = {elem: pd.DataFrame for elem in users}
for user in df_dict.keys():
df_dict[user] = data[:][data['user ID'] == user]
# drop students
data = data[(data['user ID'] != 6616) & (data['user ID'] != 58)]
sorted_data = sorted_data[(sorted_data['user_id'] != 6616) & (sorted_data['user_id'] != 58)]
data['user ID'].unique()
"""Number of rows for raw data & abstracted data"""
data.groupby('user ID')['session'].max()
print("Number of rows for raw data: {}".format(len(data.index)))
print("Number of rows for abstracted data: {}".format(len(sorted_data.index)))
print("Number of sessions: {}".format(data.groupby('user ID')['session'].max().sum()))
"""Top 10 most used frames (Based on number of each frame event counts)"""
frames_df = sorted_data[((sorted_data['name'] == 'jsav-forward') | (sorted_data['name'] == 'jsav-backward') | (sorted_data['name'] == 'jsav-begin') | (sorted_data['name'] == 'jsav-end')) & (sorted_data['ex_type'] != 'pe')]
# frames_df = frames_df[frames_df['action_time'] != frames_df['action_time']]
frames_df['description'] = frames_df['description'].str.replace('{|}|"','')
frames_df[['ev_num','current','total']] = frames_df['description'].str.split(',',expand=True)
frames_df['current'] = frames_df['current'].str.replace('currentStep:','')
frames_df['total'] = frames_df['total'].str.replace('totalSteps:','')
frames_df['current'] = frames_df['current'].astype(int)
frames_df['total'] = frames_df['total'].astype(int)
frames_df = frames_df.sort_values(['action_time', 'current'])
frames_df = frames_df.drop_duplicates(subset=['action_time', 'short_name'], keep=False)
#frames_df = frames_df[~frames_df[['action_time', 'short_name']].duplicated() | ~frames_df['name'].eq('jsav-forward')]
# top_frames = frames_df[['short_name','user_id']].groupby(['short_name'])['user_id'].count().reset_index(name='count').sort_values(['count'], ascending=False)
top_frames = frames_df['short_name'].value_counts()
top_10_frames = top_frames.head(10).index.tolist()
top_10_frames
# Only select frames data
frames = data[(data['Event name'] == 'FF event')]
frames['Action Time'] = frames["Action Time"].str.replace('.0 seconds', '').astype(int)
# Calculate total time and total event for each student
totalTime = frames[(frames['Action Time'] > 0) & (frames['Action Time'] < 600)].groupby('user ID')['Action Time'].sum()
totalEvent = frames[(frames['Action Time'] > 0) & (frames['Action Time'] < 600)].groupby('user ID')['Number of events'].sum()
avg_time_spent_per_user = totalTime / totalEvent
user_count = totalTime.count()
"""Bar chart - sorted by avgerage time spent on one slide (in top 10 frameset)"""
ax = pd.DataFrame(avg_time_spent_per_user.items(), columns=['user_id', 'avg_time']).sort_values('avg_time')
ax.plot.bar(x='user_id', y='avg_time', rot=90)
print("Average time (in sec) spend per slide: {} seconds".format(avg_time_spent_per_user.sum() / user_count))
sorted_dict = sorted_data.to_dict('records')
d = {}
forward_count = 0
backward_count = 0
user = sorted_dict[0]['user_id']
for row in sorted_dict:
if user not in d:
d[user] = {}
if user != row['user_id']:
d[user]['forward'] = forward_count
d[user]['backward'] = backward_count
forward_count = 0
backward_count = 0
user = row['user_id']
if row['name'] == 'jsav-forward':
forward_count += 1
elif row['name'] == 'jsav-backward':
backward_count += 1
d[user]['forward'] = forward_count
d[user]['backward'] = backward_count
names = list(d.keys())
f = [ x['forward'] for x in d.values() ]
b = [ x['backward'] for x in d.values() ]
plt.bar(np.arange(len(d)) - 0.5, f, 1, color='red', label='forward')
plt.bar(np.arange(len(d)) + 0.5, b, 1, color='green', label='backward')
plt.plot()
plt.xlabel("Users")
plt.ylabel("Event count")
plt.title("Number of forward and backward events")
plt.legend()
plt.show()
print("Average forward event per student: {}".format(np.sum(f) / len(d)))
print("Average backward event per student: {}".format(np.sum(b) / len(d)))
temp = data[data['Action Time'].str.contains('Reading time') == True]
temp['Action Time'] = temp['Action Time'].str[13:-3].astype(float)
temp2 = temp[ (temp['Action Time'] > 0) & (temp['Action Time'] < 4000) ]
temp2
#df['col'] = df['col'].str[:9]
print(temp2['Action Time'].describe())
p = temp2['Action Time'].value_counts().plot(x="Action Time", y="Count")
p.set_xlabel("Reading time in seconds")
p.set_ylabel("Number of Instances")
#data.head(10)
"""Histogram of idle time (Session threshold reason)
1. Delta times between consecutive events
---
"""
sorted_data['action_time'] = pd.to_datetime(sorted_data['action_time'])
sorted_data['delta_time'] = sorted_data.groupby('user_id')['action_time'].diff()
sorted_data['delta_time'] = sorted_data['delta_time'].dt.total_seconds()
temp_sorted_data = sorted_data
temp_sorted_data = temp_sorted_data[ (temp_sorted_data['delta_time'] > 0)]
p = temp_sorted_data['delta_time'].plot.hist(bins= 200, range=(0, 100))
p.set_xlabel("Delta times (seconds)")
p.set_ylabel("Number of Instances")
p.set_title("Delta times histogram")
temp_sorted_data['delta_time'].describe()
"""Behavior 4 PI Credit Seeking"""
behavior4 = frames_df
behavior4['action_time'] = pd.to_datetime(behavior4['action_time'])
behavior4['delta_time'] = behavior4.groupby('user_id')['action_time'].diff()
behavior4['delta_time'] = behavior4['delta_time'].dt.total_seconds()
total_frames_event = behavior4.groupby('user_id')['id'].count()
credit_seeking_event = behavior4.groupby('user_id')['delta_time'].apply(lambda x: (x <= 8).sum()).reset_index(name='count')
behavior4.groupby('user_id')['id'].agg(['count'])
print("======== Total PIFrames Credit Seeking related event number=======")
behavior4.groupby('user_id')['delta_time'].apply(lambda x: (x <= 8).sum()).reset_index(name='count')
|
{
"content_hash": "b7d7f2fd9794d7a46a40301476214c51",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 223,
"avg_line_length": 40.0625,
"alnum_prop": 0.678059849666714,
"repo_name": "OpenDSA/Analysis",
"id": "a97ed52ff564b74d63d8f740426f6450c145debf",
"size": "7075",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Samnyeong/cs4114_analysis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Hack",
"bytes": "18045"
},
{
"name": "Jupyter Notebook",
"bytes": "3417031"
},
{
"name": "PHP",
"bytes": "121693"
},
{
"name": "Python",
"bytes": "36111"
}
],
"symlink_target": ""
}
|
import pickle
from moviepy.editor import *
from moviepy.video.tools.tracking import manual_tracking, to_fxfy
# LOAD THE CLIP (subclip 6'51 - 7'01 of a chaplin movie)
clip = VideoFileClip("../../videos/chaplin.mp4").subclip((6,51.7),(7,01.3))
# MANUAL TRACKING OF THE HEAD
# the three next lines are for the manual tracking and its saving
# to a file, it must be commented once the tracking has been done
# (after the first run of the script for instance).
# Note that we save the list (ti,xi,yi), not the functions fx and fy
# (that we will need) because they have dependencies.
#txy, (fx,fy) = manual_tracking(clip, fps=6)
#with open("../../chaplin_txy.dat",'w+') as f:
# pickle.dump(txy)
# IF THE MANUAL TRACKING HAS BEEN PREVIOUSLY DONE,
# LOAD THE TRACKING DATA AND CONVERT IT TO FUNCTIONS x(t),fy(t)
with open("../../chaplin_txy.dat",'r') as f:
fx,fy = to_fxfy( pickle.load(f) )
# BLUR CHAPLIN'S HEAD IN THE CLIP
clip_blurred = clip.fx( vfx.headblur, fx, fy, 25)
# Generate the text, put in on a grey background
txt = TextClip("Hey you ! \n You're blurry!", color='grey70',
size = clip.size, bg_color='grey20',
font = "Century-Schoolbook-Italic", fontsize=40)
# Concatenate the Chaplin clip with the text clip, add audio
final = concatenate_videoclips([clip_blurred,txt.set_duration(3)]).\
set_audio(clip.audio)
# We write the result to a file. Here we raise the bitrate so that
# the final video is not too ugly.
final.write_videofile('../../blurredChaplin.avi', bitrate="3000k")
|
{
"content_hash": "4bb73207514241cb95216c2652e37f42",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 75,
"avg_line_length": 31.058823529411764,
"alnum_prop": 0.6742424242424242,
"repo_name": "misterbisson/moviepy",
"id": "5d55f04bce3663c5b14a639a5ec4020c8280014d",
"size": "1584",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "examples/headblur.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "272242"
}
],
"symlink_target": ""
}
|
"""
Management of Zabbix host groups.
:codeauthor: Jiri Kotlin <jiri.kotlin@ultimum.io>
"""
def __virtual__():
"""
Only make these states available if Zabbix module is available.
"""
if "zabbix.hostgroup_create" in __salt__:
return True
return (False, "zabbix module could not be loaded")
def present(name, **kwargs):
"""
Ensures that the host group exists, eventually creates new host group.
.. versionadded:: 2016.3.0
:param name: name of the host group
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
.. code-block:: yaml
create_testing_host_group:
zabbix_hostgroup.present:
- name: 'My hostgroup name'
"""
connection_args = {}
if "_connection_user" in kwargs:
connection_args["_connection_user"] = kwargs["_connection_user"]
if "_connection_password" in kwargs:
connection_args["_connection_password"] = kwargs["_connection_password"]
if "_connection_url" in kwargs:
connection_args["_connection_url"] = kwargs["_connection_url"]
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
# Comment and change messages
comment_hostgroup_created = "Host group {} created.".format(name)
comment_hostgroup_notcreated = "Unable to create host group: {}. ".format(name)
comment_hostgroup_exists = "Host group {} already exists.".format(name)
changes_hostgroup_created = {
name: {
"old": "Host group {} does not exist.".format(name),
"new": "Host group {} created.".format(name),
}
}
hostgroup_exists = __salt__["zabbix.hostgroup_exists"](name, **connection_args)
# Dry run, test=true mode
if __opts__["test"]:
if hostgroup_exists:
ret["result"] = True
ret["comment"] = comment_hostgroup_exists
else:
ret["result"] = None
ret["comment"] = comment_hostgroup_created
ret["changes"] = changes_hostgroup_created
return ret
if hostgroup_exists:
ret["result"] = True
ret["comment"] = comment_hostgroup_exists
else:
hostgroup_create = __salt__["zabbix.hostgroup_create"](name, **connection_args)
if "error" not in hostgroup_create:
ret["result"] = True
ret["comment"] = comment_hostgroup_created
ret["changes"] = changes_hostgroup_created
else:
ret["result"] = False
ret["comment"] = comment_hostgroup_notcreated + str(
hostgroup_create["error"]
)
return ret
def absent(name, **kwargs):
"""
Ensures that the host group does not exist, eventually delete host group.
.. versionadded:: 2016.3.0
:param name: name of the host group
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
.. code-block:: yaml
delete_testing_host_group:
zabbix_hostgroup.absent:
- name: 'My hostgroup name'
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
# Comment and change messages
comment_hostgroup_deleted = "Host group {} deleted.".format(name)
comment_hostgroup_notdeleted = "Unable to delete host group: {}. ".format(name)
comment_hostgroup_notexists = "Host group {} does not exist.".format(name)
changes_hostgroup_deleted = {
name: {
"old": "Host group {} exists.".format(name),
"new": "Host group {} deleted.".format(name),
}
}
connection_args = {}
if "_connection_user" in kwargs:
connection_args["_connection_user"] = kwargs["_connection_user"]
if "_connection_password" in kwargs:
connection_args["_connection_password"] = kwargs["_connection_password"]
if "_connection_url" in kwargs:
connection_args["_connection_url"] = kwargs["_connection_url"]
hostgroup_exists = __salt__["zabbix.hostgroup_exists"](name, **connection_args)
# Dry run, test=true mode
if __opts__["test"]:
if not hostgroup_exists:
ret["result"] = True
ret["comment"] = comment_hostgroup_notexists
else:
ret["result"] = None
ret["comment"] = comment_hostgroup_deleted
ret["changes"] = changes_hostgroup_deleted
return ret
hostgroup_get = __salt__["zabbix.hostgroup_get"](name, **connection_args)
if not hostgroup_get:
ret["result"] = True
ret["comment"] = comment_hostgroup_notexists
else:
try:
groupid = hostgroup_get[0]["groupid"]
hostgroup_delete = __salt__["zabbix.hostgroup_delete"](
groupid, **connection_args
)
except KeyError:
hostgroup_delete = False
if hostgroup_delete and "error" not in hostgroup_delete:
ret["result"] = True
ret["comment"] = comment_hostgroup_deleted
ret["changes"] = changes_hostgroup_deleted
else:
ret["result"] = False
ret["comment"] = comment_hostgroup_notdeleted + str(
hostgroup_delete["error"]
)
return ret
|
{
"content_hash": "687ef163d94bd1bc93693638b0101aa0",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 119,
"avg_line_length": 34.58682634730539,
"alnum_prop": 0.6028393351800554,
"repo_name": "saltstack/salt",
"id": "9822f160eec6057f65e0c96359e28ab676892a10",
"size": "5776",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "salt/states/zabbix_hostgroup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14911"
},
{
"name": "C",
"bytes": "1571"
},
{
"name": "Cython",
"bytes": "1458"
},
{
"name": "Dockerfile",
"bytes": "184"
},
{
"name": "Groovy",
"bytes": "12318"
},
{
"name": "HCL",
"bytes": "257"
},
{
"name": "HTML",
"bytes": "8031"
},
{
"name": "Jinja",
"bytes": "45598"
},
{
"name": "Makefile",
"bytes": "713"
},
{
"name": "NSIS",
"bytes": "76572"
},
{
"name": "PowerShell",
"bytes": "75891"
},
{
"name": "Python",
"bytes": "41444811"
},
{
"name": "Rich Text Format",
"bytes": "6242"
},
{
"name": "Roff",
"bytes": "191"
},
{
"name": "Ruby",
"bytes": "961"
},
{
"name": "SaltStack",
"bytes": "35856"
},
{
"name": "Scheme",
"bytes": "895"
},
{
"name": "Scilab",
"bytes": "1147"
},
{
"name": "Shell",
"bytes": "524917"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.db.models import permalink
from django.utils.translation import ugettext_lazy as _
from model_utils.models import TimeStampedModel
from model_utils.fields import MonitorField, StatusField
from model_utils import Choices
from audit_log.models import AuthStampedModel
from ananta.models import NextPrevMixin
from contacts.models import Person
class Gift(AuthStampedModel, NextPrevMixin, TimeStampedModel):
name = models.CharField(max_length=100,
help_text=_('Enter gift name.'))
description = models.TextField(blank=True)
@permalink
def get_absolute_url(self):
return ('gifts:gift:detail', None, {'pk': self.pk})
def __str__(self):
return self.name
class Meta:
ordering = ['name']
class GiftGiven(AuthStampedModel, NextPrevMixin, TimeStampedModel):
person = models.ForeignKey(Person, verbose_name="Person",
related_name='gifts')
gift = models.ForeignKey(Gift, verbose_name="Gift", related_name='gifts')
STATUS = Choices(
('sent', _('Sent')),
('returned', _('Returned')),
('delivered', _('Delivered')),
)
status = StatusField()
status_changed = MonitorField(monitor='status')
note = models.TextField(_("Note"), blank=True)
@permalink
def get_absolute_url(self):
return ('gifts:gift_given:detail', None, {'pk': self.pk})
def __str__(self):
return '%s for %s' % (self.gift.name, self.person.mixed_name)
|
{
"content_hash": "420144d7f41c5a8f10313a547b224343",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 77,
"avg_line_length": 30.78,
"alnum_prop": 0.6575698505523067,
"repo_name": "mayapurmedia/tovp",
"id": "a039217a8d10cd45d71a982adf297f4efbd5410c",
"size": "1539",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tovp/gifts/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "190169"
},
{
"name": "HTML",
"bytes": "281143"
},
{
"name": "JavaScript",
"bytes": "2888"
},
{
"name": "Python",
"bytes": "504316"
}
],
"symlink_target": ""
}
|
import datetime
from oslo_utils import timeutils
import testtools
from ec2api.api import common
from ec2api.api import ec2utils
from ec2api import exception
from ec2api.tests.unit import tools
class EC2ValidationTestCase(testtools.TestCase):
"""Test case for various validations."""
def test_validate_net(self):
validator = common.Validator()
validator.ip('10.10.0.0')
validator.cidr('10.10.0.0/24')
validator.subnet_cidr('10.10.0.0/24')
validator.vpc_cidr('10.10.0.0/24')
def check_raise_invalid_parameter(cidr):
self.assertRaises(exception.InvalidParameterValue,
validator.cidr, cidr)
check_raise_invalid_parameter('fake')
check_raise_invalid_parameter('10.10/24')
check_raise_invalid_parameter('10.10.0.0.0/24')
check_raise_invalid_parameter('10.10.0.0')
check_raise_invalid_parameter(' 10.10.0.0/24')
check_raise_invalid_parameter('10.10.0.0/24 ')
check_raise_invalid_parameter('.10.10.0.0/24 ')
check_raise_invalid_parameter('-1.10.0.0/24')
check_raise_invalid_parameter('10.256.0.0/24')
check_raise_invalid_parameter('10.10.0.0/33')
check_raise_invalid_parameter('10.10.0.0/-1')
self.assertRaises(exception.InvalidParameterValue,
validator.ip, '10.256.0.0')
self.assertRaises(exception.InvalidSubnetRange,
validator.subnet_cidr, '10.10.0.0/15')
self.assertRaises(exception.InvalidVpcRange,
validator.vpc_cidr, '10.10.0.0/29')
def test_validate_id(self):
validator = common.Validator()
validator.ec2_id('i-00000001')
validator.i_id('i-00000001')
validator.ami_id('ami-00000001')
validator.eni_id('eni-00000001')
validator.sg_id('sg-00000001')
validator.subnet_id('subnet-00000001')
validator.igw_id('igw-00000001')
validator.rtb_id('rtb-00000001')
validator.vpc_id('vpc-00000001')
validator.vol_id('vol-00000001')
validator.snap_id('snap-00000001')
validator.dopt_id('dopt-00000001')
validator.eni_attach_id('eni-attach-00000001')
validator.eipalloc_id('eipalloc-00000001')
validator.eipassoc_id('eipassoc-00000001')
validator.rtbassoc_id('rtbassoc-00000001')
invalid_ids = ['1234', 'a-1111', '', 'i-1111', 'i-rrr', 'foobar']
def check_raise_invalid_parameters(func):
for id in invalid_ids:
self.assertRaises(exception.InvalidParameterValue, func, id)
check_raise_invalid_parameters(validator.ami_id)
check_raise_invalid_parameters(validator.eni_id)
check_raise_invalid_parameters(validator.sg_id)
check_raise_invalid_parameters(validator.subnet_id)
check_raise_invalid_parameters(validator.igw_id)
check_raise_invalid_parameters(validator.rtb_id)
check_raise_invalid_parameters(validator.vpc_id)
check_raise_invalid_parameters(validator.vol_id)
check_raise_invalid_parameters(validator.snap_id)
check_raise_invalid_parameters(validator.dopt_id)
check_raise_invalid_parameters(validator.eni_attach_id)
check_raise_invalid_parameters(validator.eipalloc_id)
check_raise_invalid_parameters(validator.eipassoc_id)
check_raise_invalid_parameters(validator.rtbassoc_id)
invalid_ids = ['1234', 'a-1111', '', 'vpc-1111', 'vpc-rrr', 'foobar']
check_raise_invalid_parameters(validator.i_id)
invalid_ids = ['1234', '', 'foobar']
check_raise_invalid_parameters(validator.ec2_id)
def test_validate_multi(self):
validator = common.Validator()
result_sum = {'value': 0}
list_to_sum = [1, 2, 3, 4]
def sum(value):
# NOTE(Alex) Because nonlocal is only in python 3.0
result_sum['value'] += value
validator.multi(list_to_sum, sum)
self.assertEqual(result_sum['value'], 10)
self.assertRaises(exception.InvalidParameterValue,
validator.multi, 'not a list', sum)
def test_validate_primitive(self):
validator = common.Validator()
validator.int(5)
validator.bool(True)
validator.str('str')
validator.str64('str')
validator.str255('str')
def check_raise_validation_error(value, func):
self.assertRaises(exception.ValidationError,
func, value)
check_raise_validation_error('str', validator.int)
check_raise_validation_error('str', validator.bool)
check_raise_validation_error(5, validator.str)
check_raise_validation_error('x' * 65, validator.str64)
check_raise_validation_error('x' * 256, validator.str255)
def test_validate_security_group(self):
validator = common.Validator(params={})
validator.security_group_str('name')
validator.security_group_str('aa #^% -=99')
validator = common.Validator(params={'vpc_id': 'vpc_id'})
validator.security_group_str('name')
def check_raise_validation_error(value):
self.assertRaises(exception.ValidationError,
validator.security_group_str, value)
validator = common.Validator(params={})
check_raise_validation_error('aa \t\x01\x02\x7f')
check_raise_validation_error('x' * 256)
validator = common.Validator(params={'vpc_id': 'vpc_id'})
check_raise_validation_error('aa #^% -=99')
check_raise_validation_error('x' * 256)
class EC2TimestampValidationTestCase(testtools.TestCase):
"""Test case for EC2 request timestamp validation."""
def test_validate_ec2_timestamp_valid(self):
params = {'Timestamp': '2011-04-22T11:29:49Z'}
expired = ec2utils.is_ec2_timestamp_expired(params)
self.assertFalse(expired)
@tools.screen_all_logs
def test_validate_ec2_timestamp_old_format(self):
params = {'Timestamp': '2011-04-22T11:29:49'}
expired = ec2utils.is_ec2_timestamp_expired(params)
self.assertTrue(expired)
def test_validate_ec2_timestamp_not_set(self):
params = {}
expired = ec2utils.is_ec2_timestamp_expired(params)
self.assertFalse(expired)
def test_validate_ec2_timestamp_ms_time_regex(self):
result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.123Z')
self.assertIsNotNone(result)
result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.123456Z')
self.assertIsNotNone(result)
result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.1234567Z')
self.assertIsNone(result)
result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.123')
self.assertIsNone(result)
result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49Z')
self.assertIsNone(result)
@tools.screen_all_logs
def test_validate_ec2_timestamp_aws_sdk_format(self):
params = {'Timestamp': '2011-04-22T11:29:49.123Z'}
expired = ec2utils.is_ec2_timestamp_expired(params)
self.assertFalse(expired)
expired = ec2utils.is_ec2_timestamp_expired(params, expires=300)
self.assertTrue(expired)
@tools.screen_all_logs
def test_validate_ec2_timestamp_invalid_format(self):
params = {'Timestamp': '2011-04-22T11:29:49.000P'}
expired = ec2utils.is_ec2_timestamp_expired(params)
self.assertTrue(expired)
def test_validate_ec2_timestamp_advanced_time(self):
# EC2 request with Timestamp in advanced time
timestamp = timeutils.utcnow() + datetime.timedelta(seconds=250)
params = {'Timestamp': timeutils.strtime(timestamp,
"%Y-%m-%dT%H:%M:%SZ")}
expired = ec2utils.is_ec2_timestamp_expired(params, expires=300)
self.assertFalse(expired)
@tools.screen_all_logs
def test_validate_ec2_timestamp_advanced_time_expired(self):
timestamp = timeutils.utcnow() + datetime.timedelta(seconds=350)
params = {'Timestamp': timeutils.strtime(timestamp,
"%Y-%m-%dT%H:%M:%SZ")}
expired = ec2utils.is_ec2_timestamp_expired(params, expires=300)
self.assertTrue(expired)
def test_validate_ec2_req_timestamp_not_expired(self):
params = {'Timestamp': timeutils.isotime()}
expired = ec2utils.is_ec2_timestamp_expired(params, expires=15)
self.assertFalse(expired)
@tools.screen_all_logs
def test_validate_ec2_req_timestamp_expired(self):
params = {'Timestamp': '2011-04-22T12:00:00Z'}
compare = ec2utils.is_ec2_timestamp_expired(params, expires=300)
self.assertTrue(compare)
@tools.screen_all_logs
def test_validate_ec2_req_expired(self):
params = {'Expires': timeutils.isotime()}
expired = ec2utils.is_ec2_timestamp_expired(params)
self.assertTrue(expired)
def test_validate_ec2_req_not_expired(self):
expire = timeutils.utcnow() + datetime.timedelta(seconds=350)
params = {'Expires': timeutils.strtime(expire, "%Y-%m-%dT%H:%M:%SZ")}
expired = ec2utils.is_ec2_timestamp_expired(params)
self.assertFalse(expired)
@tools.screen_all_logs
def test_validate_Expires_timestamp_invalid_format(self):
# EC2 request with invalid Expires
params = {'Expires': '2011-04-22T11:29:49'}
expired = ec2utils.is_ec2_timestamp_expired(params)
self.assertTrue(expired)
@tools.screen_all_logs
def test_validate_ec2_req_timestamp_Expires(self):
# EC2 request with both Timestamp and Expires
params = {'Timestamp': '2011-04-22T11:29:49Z',
'Expires': timeutils.isotime()}
self.assertRaises(exception.InvalidRequest,
ec2utils.is_ec2_timestamp_expired,
params)
|
{
"content_hash": "b593b03be6f1e4c1310cafe8710b2dfd",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 78,
"avg_line_length": 40.688259109311744,
"alnum_prop": 0.6367164179104478,
"repo_name": "MayankGo/ec2-api",
"id": "fd977bf642e5ee18a0958f426e952a6b25dfc4f4",
"size": "10715",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable/kilo",
"path": "ec2api/tests/unit/test_ec2_validate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1460572"
},
{
"name": "Shell",
"bytes": "28903"
}
],
"symlink_target": ""
}
|
from io import StringIO
import re
vhost_start = re.compile(r'<VirtualHost\s+(.*?)>')
vhost_end = re.compile(r'</VirtualHost')
docroot_re = re.compile(r'(DocumentRoot\s+)(\S+)')
def replace_docroot(conf_string, vhost, new_docroot):
'''yield new lines of an httpd.conf file where docroot lines matching
the specified vhost are replaced with the new_docroot
'''
conf_file = StringIO(conf_string)
in_vhost = False
curr_vhost = None
for line in conf_file:
vhost_start_match = vhost_start.search(line)
if vhost_start_match:
curr_vhost = vhost_start_match.groups()[0]
in_vhost = True
if in_vhost and (curr_vhost == vhost):
docroot_match = docroot_re.search(line)
if docroot_match:
sub_line = docroot_re.sub(r'\1%s' % new_docroot, line)
line = sub_line
vhost_end_match = vhost_end.search(line)
if vhost_end_match:
in_vhost = False
yield line
if __name__ == '__main__':
import sys
conf_file = sys.argv[1]
vhost = sys.argv[2]
docroot = sys.argv[3]
conf_string = open(conf_file).read()
for line in replace_docroot(conf_string, vhost, docroot):
print (line),
# /usr/local/Python3/bin/python3 apache_conf_docroot_replace.py /etc/apache2/sites-available/psa local2:80 /tmp
#Python3 没有cStringIO, 改为io
#找到VirtualHost 替换DocumentRoot,打印输出而不是替换文件
#遍历配置文件里的行,建立3个编译后的正则: 开头有定义, 遍历,找开始,找到赋值curr_vhost,in_vhost为真
#开始,true,替换,结束, flase ;3种情况判断; 在vhost块 and 匹配==参数, 赋值docroot_match,为真,赋值suub_line, 换回行,找块尾
#一个匹配VirtualHost开始 ,一个匹配VirtualHost结尾 一个匹配DocumentRoot
#docroot_re.sub(r'\1%s' % new_docroot, line)
#\1 对应的第一个匹配组, 值,替换值, 所在行
#yield 迭代 生成generator
# http://www.ibm.com/developerworks/cn/opensource/os-cn-python-yield/
|
{
"content_hash": "71dac95c53287e063321cbbade38aa06",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 113,
"avg_line_length": 33.09090909090909,
"alnum_prop": 0.6532967032967033,
"repo_name": "lluxury/P_U_S_A",
"id": "d61c7cbf1a3ee74e4564d0f1326db588a0e9dad3",
"size": "2137",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "3_text/apache_conf_docroot_replace.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "AppleScript",
"bytes": "78"
},
{
"name": "HTML",
"bytes": "6274"
},
{
"name": "PLpgSQL",
"bytes": "1421"
},
{
"name": "Perl",
"bytes": "999"
},
{
"name": "Python",
"bytes": "1322191"
},
{
"name": "Roff",
"bytes": "6"
},
{
"name": "Shell",
"bytes": "1055"
}
],
"symlink_target": ""
}
|
import httplib, urllib
import time
from datetime import datetime
#Import modules for CGI handling
import cgi, cgitb
#Import line
from line import LineClient, LineGroup, LineContact
import Cookie, os, time
cookie = Cookie.SimpleCookie()
cookie_string = os.environ.get('HTTP_COOKIE')
def getCookies():
if not cookie_string:
return False
else:
# load() parses the cookie string
cookie.load(cookie_string)
# Use the value attribute of the cookie to get it
txt = str(cookie['login'].value)
if txt == 'success':
return True
else:
return False
#===================== Upload to clound ======================#
# This time to sleep between posts to the channel(at least 20second)
sleep = 15
# API Key Thingspeak channel to update
key = '9227DXSLR71NKKMO'
def uploadThingSpeak(data,numField):
while True:
params = urllib.urlencode({('field%d'%(numField)): data, 'key':key })
headers = {"Content-typZZe": "application/x-www-form-urlencoded","Accept": "text/plain"}
conn = httplib.HTTPConnection("api.thingspeak.com:80")
try:
conn.request("POST", "/update", params, headers)
response = conn.getresponse()
#print ('Field%s : %s' %(numField, data))
#print response.status #if show 200 is success
print 'Status :',response.reason
data = response.read()
conn.close()
except:
print "connection failed"
return
#===================== Upload to clound ======================#
#===================== Start LINE ALERT ======================#
loginLine = False
def lineAlert(MSG,nField):
USERNAME = 'sic@outlook.co.th'
PASSWORD = '1234567896'
GROUPNAME = 'Line-bot'
#MSG = 'hello world!'
#optional
COMPUTERNEME = 'Siczones.Bot'
TOKEN = ''
global loginLine
while loginLine is False:
try:
client = LineClient(id=USERNAME, password=PASSWORD, authToken=TOKEN, com_name=COMPUTERNEME)
TOKEN = client.authToken
#print "TOKEN : %s\r\n" % TOKEN
client_group = client.getGroupByName(GROUPNAME)
#print client_group
loginLine = True
print "Login success"
except:
print "Login Failed"
if loginLine is True:
try:
by = "[Webpage bot]"
client_group.sendMessage("Fielld :%s \n %s" % ( nField,MSG, by))
return
except:
#print 'sender failed'
lineAlert(MSG,nField)
#===================== END LINE ALERT ======================#
#========================== HTML ===========================#
# Create instance of FieldStorage
form = cgi.FieldStorage()
if getCookies() == False:
print 'Content-Type: text/html\n'
print '<html><head>'
homeIP = '192.168.0.102'
print ('''<meta http-equiv="refresh" content="0.1;http://%s">'''%(homeIP))
print '</head></html>'
else:
print ("Content-type:text/html\r\n\r\n")
print ('''<!DOCTYPE html>
<html lang="en">
<head>
<title>Upload ThingSpeak</title>
<meta charset="utf-8">
<link href="../favicon.ico" rel="icon" type="image/x-icon"/>
<link href="../favicon.ico" rel="shortcut icon" type="image/x-icon"/>
<!-- This file has been downloaded from Bootsnipp.com. Enjoy! -->
<meta name="viewport" content="width=device-width, initial-scale=1">
<link href="http://maxcdn.bootstrapcdn.com/bootstrap/3.3.0/css/bootstrap.min.css" rel="stylesheet">
<!-- Theme CSS -->
<link href="/css/agency.min.css" rel="stylesheet">
<link href="/css/siczones.css" rel="stylesheet">
<script src="http://code.jquery.com/jquery-1.11.1.min.js"></script>
<script src="http://maxcdn.bootstrapcdn.com/bootstrap/3.3.0/js/bootstrap.min.js"></script>
<script>
$(document).ready(function(){
$(window).scroll(function () {
if ($(this).scrollTop() > 50) {
$('#back-to-top').fadeIn();
} else {
$('#back-to-top').fadeOut();
}
});
// scroll body to 0px on click
$('#back-to-top').click(function () {
$('#back-to-top').tooltip('hide');
$('body,html').animate({
scrollTop: 0
}, 800);
return false;
});
$('#back-to-top').tooltip('show');
});
</script>
</head>''')
print ('''
<body>
<!-- ==================== Nav Tabs ======================= -->
<nav class="nav nav-tabs navbar-inverse navbar-fixed-top">
<div class = "container">
<ul class="nav nav-tabs">
<li role="presentation"><a href="index.py">Home</a></li>
<li role="presentation" ><a href="mode.py">Mode</a></li>
<li role="presentation" class="dropdown active">
<a class="dropdown-toggle" data-toggle="dropdown" href="#" role="button" aria-haspopup="true" aria-expanded="false">
Other<span class="caret"></span>
</a>
<ul class="dropdown-menu">
<li><a href="status.py">Status</a></li>
<li><a href="device.py">Device</a></li>
<li><a href="alert.py">Alert</a></li>
<li role="separator" class="divider"></li>
<li><a href="logout.py" onmouseover="style.color='red'" onmouseout="style.color='black'">Log out</a></li>
</ul>
</li>
</ul>
</div>
</nav>
<br/><br/>
<div class="container-fluid" id="grad1">
<div class="container">
<div class="navbar-header">
<h2 class="class="section-subheading text-muted"">
<a class="navbar-brand">Safety in residential system </a>
</h2>
</div>
</div>
</div>
<!-- ========================== Nav Tabs ======================= -->
<div class = "container">
<div class="wrapper">
<center><fieldset><legend>Result !!</legend>
<h3 class="form-signin-heading">Welcome to Upload data to ThingSpeak</h3>
<hr class="colorgraph"><div class="form-signin">''')
i=1
count = False
while i<=7 :
if (form.getvalue('Field%s'%(i))):
count = True
break
i = i+1
if count is True:
print('''<div class="alert alert-success" role="alert">Receive data value success !</div>''')
else:
print('''<div class="alert alert-warning" role="alert">No data value !</div>''')
i=1
Field=[]
while i<=7 :
# Get data from fields
Field.append(form.getvalue('Field%s'%(i)))
print ('''<p class="form-control">Field%s : %s''' % (i,Field[i-1]))
if Field[i-1] is not None:
uploadThingSpeak(float(Field[i-1]),i)
#if had more than one field then use sleep time
if (form.getvalue('Field%s'%(i+1)) is not None):
#sleep for desired amount of time
time.sleep(sleep)
i=i+1
print ('''</div><br>
<FORM action="status.py" class="btn-form"><button class="btn btn-lg btn-success btn-block" Type="submit" VALUE="Status" onmouseover="style.color='yellow'" onmouseout="style.color='white'">Status</button></FORM>
<FORM class="btn-from"><button class="btn btn-lg btn-primary btn-block" Type="button" VALUE="Back" onClick="history.go(-1);return true;">Back</button></FORM>''')
print ('''</fieldset></center></div></div>
<br/><br/>
<!-- ============== Footer ============ -->
<br/><br/><div class="navbar navbar-default navbar-fixed-bottom">
<div class="container">
<p class="navbar-text pull-left">Copyright <span class="glyphicon glyphicon-copyright-mark"> </span> 2016 - Siczones.</p>
<!-- a id="back-to-top" href="#" class="navbar-btn btn-danger btn pull-right" role="button" data-toggle="tooltip" data-placement="left"><span class="glyphicon glyphicon-chevron-up"></span></a -->
<!-- Split button -->
<div class="navbar-btn btn-group dropup pull-right">
<button id="back-to-top" href="#" type="button" class="btn btn-warning"><span class="glyphicon glyphicon-chevron-up"></span> Top</button>
</div>
</div>
</div>
<!-- ============== End Footer ============ -->
</body>''')
print ("</html>")
'''
i=1
while i<=7 :
if Field[i-1] is not None:
lineAlert( str(Field[i-1]), str(i))
i = i+1
'''
|
{
"content_hash": "885581d3df45e326d1e3b5ffc1b39230",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 220,
"avg_line_length": 40.342465753424655,
"alnum_prop": 0.5200905489530278,
"repo_name": "5610110083/Safety-in-residential-project",
"id": "1c96706fe4a16f5476fce2e25498770c719d3896",
"size": "9235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cgi-bin.bak/UploadThingSpeak.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "130630"
},
{
"name": "Arduino",
"bytes": "45331"
},
{
"name": "Assembly",
"bytes": "337688"
},
{
"name": "Batchfile",
"bytes": "67280"
},
{
"name": "C",
"bytes": "31386611"
},
{
"name": "C#",
"bytes": "10401"
},
{
"name": "C++",
"bytes": "780076"
},
{
"name": "CMake",
"bytes": "107889"
},
{
"name": "CSS",
"bytes": "461723"
},
{
"name": "DIGITAL Command Language",
"bytes": "322017"
},
{
"name": "DTrace",
"bytes": "12419"
},
{
"name": "Emacs Lisp",
"bytes": "5297"
},
{
"name": "Forth",
"bytes": "212900"
},
{
"name": "Frege",
"bytes": "2320153"
},
{
"name": "GDB",
"bytes": "10566"
},
{
"name": "HTML",
"bytes": "1493042"
},
{
"name": "Inno Setup",
"bytes": "3960"
},
{
"name": "JavaScript",
"bytes": "1301174"
},
{
"name": "Lex",
"bytes": "9180"
},
{
"name": "Lua",
"bytes": "12941"
},
{
"name": "M4",
"bytes": "196634"
},
{
"name": "Makefile",
"bytes": "3105186"
},
{
"name": "PHP",
"bytes": "18667"
},
{
"name": "Pascal",
"bytes": "25226"
},
{
"name": "Perl",
"bytes": "3425327"
},
{
"name": "Prolog",
"bytes": "29177"
},
{
"name": "Protocol Buffer",
"bytes": "2764"
},
{
"name": "Python",
"bytes": "327971"
},
{
"name": "Roff",
"bytes": "779793"
},
{
"name": "Scheme",
"bytes": "4249"
},
{
"name": "Shell",
"bytes": "1957563"
},
{
"name": "TeX",
"bytes": "2582"
},
{
"name": "Visual Basic",
"bytes": "1074"
},
{
"name": "XS",
"bytes": "4319"
},
{
"name": "XSLT",
"bytes": "12579"
},
{
"name": "Yacc",
"bytes": "7701"
},
{
"name": "eC",
"bytes": "5158"
}
],
"symlink_target": ""
}
|
"""
Exceptions that can occur while using maxcdn-ssl-client
"""
import traceback
class SslApiClientException(Exception):
"""
A superclass for all exceptions raised by maxcdn-ssl-client
"""
__slots__ = ('message', 'cause')
def __init__(self, message, cause=None):
self.message = message
self.cause = cause
super().__init__()
def __str__(self):
string = "SslApiClientException: {}".format(self.message)
if self.cause:
exception = "".join(
traceback.format_exception(type(self.cause),
self.cause,
self.cause.__traceback__))
string += "\nCaused by:\n{}".format(exception)
return string
class CommunicationException(SslApiClientException):
"""
An error while communicating with the API server
"""
pass
class ApiFailureException(SslApiClientException):
"""
An error due to an invalid use of the API
"""
pass
class CryptographyException(SslApiClientException):
"""
An error due to an invalid cryptographic operation
"""
pass
class LogicException(SslApiClientException):
"""
An error due to business logic
"""
pass
|
{
"content_hash": "4ed6b4085b929c31957edaa66b35f4af",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 69,
"avg_line_length": 25.54,
"alnum_prop": 0.5904463586530931,
"repo_name": "neverpanic/maxcdn-ssl-client",
"id": "331ff3a080f17ea1e15f3fa749cb4e1666d3971c",
"size": "1277",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "maxcdn_ssl_client/exception.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "23218"
}
],
"symlink_target": ""
}
|
from debile.slave.wrappers.lintian import parse_lintian
from debile.utils.commands import run_command
def lintian(targets, analysis, lintian_binary='lintian'):
if not isinstance(targets, list):
targets = [targets]
log = ""
failed = False
for target in targets:
out, _, _ = run_command([
lintian_binary,
"-IE",
"--pedantic",
"--show-overrides",
target
])
for issue in parse_lintian(out.splitlines(), target):
analysis.results.append(issue)
if issue.severity == 'error':
failed = True
log += out
return (analysis, log, failed, None, None)
def version(lintian_binary='lintian'):
out, _, ret = run_command([lintian_binary, '--version'])
if ret != 0:
raise Exception(lintian_binary + " is not installed")
name, version = out.split(" ")
return (name, version.strip())
|
{
"content_hash": "02c0bd68c643c01d3de3e7859ed62d1b",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 61,
"avg_line_length": 28.029411764705884,
"alnum_prop": 0.5739769150052466,
"repo_name": "tcc-unb-fga/debile",
"id": "d2c556a142e6a46654936a9f96230ece0e2250e5",
"size": "2129",
"binary": false,
"copies": "4",
"ref": "refs/heads/tcc",
"path": "debile/slave/runners/lintian.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1710"
},
{
"name": "Nginx",
"bytes": "1767"
},
{
"name": "Python",
"bytes": "366466"
},
{
"name": "Shell",
"bytes": "14590"
}
],
"symlink_target": ""
}
|
import os
import cv2
import math
import time
import shutil
import imghdr
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import gfile
from Util.ProgressBar import ProgressBar
class Config:
n_class = 19
extractors_path = "Models/Extractors/"
predictors_path = "Models/Predictors/"
image_paths = ["sources/{:04d}".format(i) for i in range(n_class)]
@staticmethod
def get_image_paths():
new_paths = []
for i, path in enumerate(Config.image_paths):
files = [path + "/" + file for file in os.listdir(path)]
new_paths += files
return new_paths
@staticmethod
def get_labels():
labels = []
for i in range(Config.n_class):
labels += [[0 if j != i else 1 for j in range(
Config.n_class)] for _ in range(len(os.listdir("sources/{:04d}".format(i))))]
np.save("sources/labels", labels)
@staticmethod
def shuffle(folder):
_features = np.load(folder + "/features.npy")
_labels = np.load(folder + "/labels.npy")
_indices = np.random.permutation(len(_features))
_features, _labels = _features[_indices], _labels[_indices]
np.save(folder + "/features", _features)
np.save(folder + "/labels", _labels)
@staticmethod
def split(folder):
_features = np.load(folder + "/features.npy")
_labels = np.load(folder + "/labels.npy")
_indices = np.random.permutation(len(_features))
_features, _labels = _features[_indices], _labels[_indices]
train_len = int(0.9 * len(_features))
x_train, x_test = _features[:train_len], _features[train_len:]
y_train, y_test = _labels[:train_len], _labels[train_len:]
np.save(folder + "/x_train", x_train)
np.save(folder + "/x_test", x_test)
np.save(folder + "/y_train", y_train)
np.save(folder + "/y_test", y_test)
class Extractor:
def __init__(self, extractor="v3", image_paths=None, labels=None, mat_dir=None):
self._extractor = extractor if "v3" not in extractor else "v3"
self._image_paths = Config.get_image_paths() if image_paths is None else image_paths
self._labels, self._mat_dir = labels, mat_dir
def _create_graph(self):
tf.reset_default_graph()
with gfile.FastGFile(Config.extractors_path+self._extractor+"/Model.pb", "rb") as file:
graph_def = tf.GraphDef()
graph_def.ParseFromString(file.read())
if "cnn" in self._extractor:
# Fix nodes
for node in graph_def.node:
if node.op == 'RefSwitch':
node.op = 'Switch'
for index in range(len(node.input)):
if 'moving_' in node.input[index]:
node.input[index] += '/read'
elif node.op == 'Assign':
node.op = 'Sub'
if 'use_locking' in node.attr:
del node.attr['use_locking']
tf.import_graph_def(graph_def, name="")
def _extract(self, verbose):
features = []
with tf.Session() as sess:
if self._extractor == "v3":
_output = "pool_3:0"
elif self._extractor == "ResNet-v2":
_output = "InceptionResnetV2/Logits/Flatten/Reshape:0"
elif self._extractor == "cnn":
_output = "final_result/Reshape:0"
else:
_output = "OutputFlow/Reshape:0"
flattened_tensor = sess.graph.get_tensor_by_name(_output)
if self._extractor == "v3":
_entry = "DecodeJpeg/contents:0"
elif self._extractor == "ResNet-v2":
_entry = "Placeholder:0"
else:
_entry = "Entry/Placeholder:0"
pop_lst = []
if "cnn" in self._extractor or "ResNet" in self._extractor and self._mat_dir is not None:
features = np.load(self._mat_dir)
else:
def process(img_path):
img_data = gfile.FastGFile(img_path, "rb").read()
feature = sess.run(flattened_tensor, {
_entry: img_data
})
features.append(np.squeeze(feature))
for i, image_path in enumerate(self._image_paths):
if not os.path.isfile(image_path):
continue
if "v3" in self._extractor:
if verbose:
print("Processing {}...".format(image_path))
try:
process(image_path)
except Exception as err:
if verbose:
print(err)
name, extension = os.path.splitext(image_path)
base = os.path.basename(image_path)
if extension.lower() in (".jpg", ".jpeg"):
new_name = name[:image_path.rfind(base)] + "{:06d}{}".format(i, extension)
print("Renaming {} to {}...".format(image_path, new_name))
os.rename(image_path, new_name)
process(new_name)
else:
new_name_base = name[:image_path.rfind(base)] + "{:06d}".format(i)
new_name = new_name_base + ".jpg"
print("Transforming {} to {}...".format(image_path, new_name))
try:
if imghdr.what(image_path) is None:
raise ValueError("{} is not an image".format(image_path))
os.rename(image_path, new_name_base + extension)
cv2.imwrite(new_name, cv2.imread(new_name_base + extension))
os.remove(new_name_base + extension)
process(new_name)
except Exception as err:
print(err)
print("Moving {} to '_err' folder...".format(image_path))
if not os.path.isdir("_err"):
os.makedirs("_err")
shutil.move(image_path, os.path.join("_err", os.path.basename(image_path)))
pop_lst.append(i)
else:
if verbose:
print("Reading {}...".format(image_path))
image_data = cv2.imread(image_path)
if self._extractor == "ResNet-v2":
features.append(cv2.resize(image_data, (299, 299)))
else:
features.append(cv2.resize(image_data, (64, 64)))
if "v3" not in self._extractor:
features = np.array(features)
print("Extracting features...")
rs = []
batch_size = math.floor(1e6 / np.prod(features.shape[1:]))
epoch = int(math.ceil(len(features) / batch_size))
bar = ProgressBar(max_value=epoch, name="Extract")
for i in range(epoch):
if i == epoch - 1:
rs.append(sess.run(flattened_tensor, {
_entry: features[i*batch_size:]
}))
else:
rs.append(sess.run(flattened_tensor, {
_entry: features[i*batch_size:(i+1)*batch_size]
}))
bar.update()
return np.vstack(rs).astype(np.float32)
if pop_lst:
labels = []
pop_cursor, pop_idx = 0, pop_lst[0]
for i, label in enumerate(self._labels):
if i == pop_idx:
pop_cursor += 1
if pop_cursor < len(pop_lst):
pop_idx = pop_lst[pop_cursor]
else:
pop_idx = -1
continue
labels.append(label)
labels = np.array(labels, dtype=np.float32)
elif self._labels is None:
labels = None
else:
labels = np.array(self._labels, dtype=np.float32)
return np.array(features, dtype=np.float32), labels
def run(self, verbose=True):
self._create_graph()
return self._extract(verbose)
class Predictor:
def __init__(self, predictor="v3"):
self._predictor = predictor
self._entry, self._output = "Entry/Placeholder:0", "OutputFlow/add_9:0"
def _create_graph(self):
tf.reset_default_graph()
with gfile.FastGFile(Config.predictors_path+self._predictor+"/Model.pb", "rb") as file:
graph_def = tf.GraphDef()
graph_def.ParseFromString(file.read())
if "cnn" in self._predictor:
# Fix nodes
for node in graph_def.node:
if node.op == 'RefSwitch':
node.op = 'Switch'
for index in range(len(node.input)):
if 'moving_' in node.input[index]:
node.input[index] += '/read'
elif node.op == 'Assign':
node.op = 'Sub'
if 'use_locking' in node.attr:
del node.attr['use_locking']
tf.import_graph_def(graph_def, name="")
def predict(self, x):
self._create_graph()
x, rs = np.atleast_2d(x).astype(np.float32), []
with tf.Session() as sess:
flattened_tensor = sess.graph.get_tensor_by_name(self._output)
print("Predicting...")
batch_size = math.floor(1e6 / np.prod(x.shape[1:]))
epoch = math.ceil(len(x) / batch_size) # type: int
bar = ProgressBar(max_value=epoch, name="Predict")
for i in range(epoch):
if i == epoch - 1:
rs.append(sess.run(flattened_tensor, {
self._entry: x[i*batch_size:]
}))
else:
rs.append(sess.run(flattened_tensor, {
self._entry: x[i*batch_size:(i+1)*batch_size]
}))
bar.update()
return np.vstack(rs).astype(np.float32)
class Pipeline:
shape = (1440, 576)
def __init__(self):
self._image_paths = []
self._img_dir = self._rs_dir = None
self._n_row = self._n_col = None
self._results = []
self._ans = self._pred = self._prob = None
@staticmethod
def get_image_paths(img_dir, pre_process=False):
image_paths = [img_dir + "/" + file for file in os.listdir(img_dir)]
if not pre_process:
return [img for img in image_paths if os.path.isfile(img)]
return [img for img in image_paths if os.path.isfile(img) and imghdr.what(img) is not None]
def _get_results(self, ans, img_dir=None, rs_dir=None):
if img_dir is None:
img_dir = self._img_dir
if rs_dir is None:
rs_dir = self._rs_dir
y_pred = np.exp(np.load(rs_dir + "/prediction.npy"))
y_pred /= np.sum(y_pred, axis=1, keepdims=True)
pred_classes = np.argmax(y_pred, axis=1)
if ans is not None:
true_classes = np.argmax(ans, axis=1)
true_prob = y_pred[range(len(y_pred)), true_classes]
else:
true_classes = None
true_prob = y_pred[range(len(y_pred)), pred_classes]
self._ans, self._pred, self._prob = true_classes, pred_classes, true_prob
images = []
c_base = 60
for i, img in enumerate(Pipeline.get_image_paths(img_dir, True)):
_pred = y_pred[i]
_indices = np.argsort(_pred)[-3:][::-1]
label_dic = np.load(os.path.join("_Data", "_Cache", "LABEL_DIC.npy"))
_ps, _labels = _pred[_indices], label_dic[_indices]
_img = cv2.imread(img)
if true_classes is None:
color = np.array([255, 255, 255], dtype=np.uint8)
else:
_p = _ps[0]
if _p <= 1 / 2:
_l, _r = 2 * c_base + (255 - 2 * c_base) * 2 * _p, c_base + (255 - c_base) * 2 * _p
else:
_l, _r = 255, 510 * (1 - _p)
if true_classes[i] == pred_classes[i]:
color = np.array([0, _l, _r], dtype=np.uint8)
else:
color = np.array([0, _r, _l], dtype=np.uint8)
canvas = np.zeros((256, 640, 3), dtype=np.uint8)
_img = cv2.resize(_img, (256, 256))
canvas[:, :256] = _img
canvas[:, 256:] = color
bar_len = 180
for j, (_p, _label) in enumerate(zip(_ps, _labels)):
cv2.putText(canvas, _label, (288, 64 + 64 * j), cv2.LINE_AA, 0.6, (0, 0, 0), 1)
cv2.rectangle(canvas, (420, 49 + 64 * j), (420 + int(bar_len * _p), 69 + 64 * j), (125, 0, 125), -1)
images.append(canvas)
return images
def _get_detail(self, event, x, y, *_):
label_dic = np.load(os.path.join("_Data", "_Cache", "LABEL_DIC.npy"))
if event == cv2.EVENT_LBUTTONDBLCLK:
_w, _h = Pipeline.shape
_pw, _ph = _w / self._n_col, _h / self._n_row
_idx = int(x // _pw + self._n_col * (y // _ph))
_prob = self._prob[_idx]
if self._ans is None or self._ans[_idx] == self._pred[_idx]:
title = "Detail (prob: {:6.4})".format(_prob)
else:
title = "True label: {} (prob: {:6.4})".format(
label_dic[self._ans[_idx]], _prob)
while 1:
cv2.imshow(title, self._results[_idx])
if cv2.waitKey(20) & 0xFF == 27:
break
cv2.destroyWindow(title)
def run(self, images_dir="Test", image_shape=(64, 64), model="v3",
delete_cache=True, extract_only=False, visualize_only=False, overview=True, verbose=True):
_t = time.time()
y_pred = None
if not visualize_only:
if model != "v3":
print("=" * 30)
print("Resizing images...")
print("=" * 30)
else:
print("=" * 30)
print("Reading images...")
print("=" * 30)
if extract_only:
self._image_paths = Pipeline.get_image_paths(images_dir)
else:
self._image_paths = Pipeline.get_image_paths(images_dir, True)
rs = []
_new_path, _mat_dir = images_dir + "/_cache", None
if not os.path.isdir(_new_path):
os.makedirs(_new_path)
if model != "v3":
for i, img in enumerate(self._image_paths):
_img = cv2.imread(img)
if model != "ResNet-v2":
_img = cv2.resize(_img, image_shape)
else:
image_shape = (299, 299)
_img = cv2.resize(_img, image_shape)/127.5-1
if model == "v3(64)":
_slash_idx = img.rfind("/")
_new_img = _new_path + "/" + img[_slash_idx+1:]
cv2.imwrite(_new_img, _img)
print("{} transformed to {} with shape {}".format(img, _new_img, image_shape))
self._image_paths[i] = _new_img
else:
print("{} transformed to shape {}".format(img, image_shape))
rs.append(_img.astype(np.float32))
_mat_dir = _new_path + "/_mat.npy"
np.save(_mat_dir, rs)
print("Done")
print("=" * 30)
print("Using {} to extract features...".format(model))
print("=" * 30)
features, _ = Extractor(model, self._image_paths, _mat_dir).run(verbose)
if extract_only:
np.save("features", features)
Pipeline._delete_cache(images_dir)
return
print("-" * 30)
print("Loading predictor...")
y_pred = Predictor(model).predict(features)
print("-" * 30)
self._img_dir = images_dir
self._rs_dir = images_dir + "/_Result"
label_dic = np.load(os.path.join("_Data", "_Cache", "LABEL_DIC.npy"))
if not visualize_only:
if not os.path.isdir(images_dir + "/_Result"):
os.makedirs(self._rs_dir)
np.save(self._rs_dir + "/prediction", y_pred)
labels = label_dic[np.argmax(y_pred, axis=1)]
with open(self._rs_dir + "/labels.txt", "w") as file:
file.write("\n".join(labels))
print("Done; results saved to '{}' folder".format(self._rs_dir))
if delete_cache:
Pipeline._delete_cache(images_dir)
print("-" * 30)
print("Done")
print("(Test) Time cost: {:8.6} s".format(time.time() - _t))
if overview:
print("-" * 30)
print("Visualizing results...")
if os.path.isfile(images_dir + "/_answer.npy"):
_ans = np.load(images_dir + "/_answer.npy")
else:
_ans = None
images = self._get_results(_ans)
n_row = math.ceil(math.sqrt(len(images))) # type: int
n_col = math.ceil(len(images) / n_row)
pictures = []
for i in range(n_row):
if i == n_row - 1:
pictures.append(np.hstack(
[*images[i*n_col:], np.zeros((256, 640*(n_row*n_col-len(images)), 3)) + 255]).astype(np.uint8))
else:
pictures.append(np.hstack(
images[i*n_col:(i+1)*n_col]).astype(np.uint8))
self._results = images
self._n_row, self._n_col = n_row, n_col
big_canvas = np.vstack(pictures).astype(np.uint8)
overview = cv2.resize(big_canvas, Pipeline.shape)
cv2.namedWindow("Overview")
cv2.setMouseCallback("Overview", self._get_detail)
cv2.imshow("Overview", overview)
cv2.waitKey(0)
cv2.destroyAllWindows()
print("-" * 30)
print("Done")
@staticmethod
def _delete_cache(images_dir):
print("-" * 30)
print("Deleting '_cache' folder...")
shutil.rmtree(images_dir + "/_cache")
|
{
"content_hash": "8f9e07989bfd66be755c8b5d932b59fc",
"timestamp": "",
"source": "github",
"line_count": 432,
"max_line_length": 119,
"avg_line_length": 45.47222222222222,
"alnum_prop": 0.4562716351048666,
"repo_name": "carefree0910/MachineLearning",
"id": "b3217ed8552cb24644c4d09ba0cbdc382d4d9df9",
"size": "19644",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_Dist/ImageRecognition/ToolBox.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2089597"
},
{
"name": "Python",
"bytes": "1197443"
}
],
"symlink_target": ""
}
|
import logging
import os
import zc.buildout
import pkg_resources
from zc.recipe.egg import Egg
class Symlinks(Egg):
"""Put symlinks to different files, into one directory.
For example, such section can be defined to bring pyexiv2,
from standart debian package to isolated buildout:
[buildout]
parts = pyexiv2 etree python
[pyexiv2]
recipe = svetlyak40wt.recipe.symlinks
path = parts/pyexiv2
files =
/usr/share/pyshared/pyexiv2.py
/usr/lib/python2.5/site-packages/libpyexiv2.so
[etree]
# This syntax uses pkg_resources to locate right path
# to the module resource.
path = parts/etree
files =
xml://xml/etree ElementTree
[python]
recipe = zc.recipe.egg
interpreter = python
eggs = ipython
extra-paths =
${pyexiv2:path}
${etree:path}
"""
def __init__(self, buildout, name, options):
super(Symlinks, self).__init__(buildout, name, options)
self.name, self.options = name, options
options['path'] = os.path.join(
buildout['buildout']['directory'],
options['path'],
)
def install(self):
path = self.options['path']
logger = logging.getLogger(self.name)
logger.info(
'Creating directory %s' % os.path.basename(path))
if not os.path.exists(path):
os.makedirs(path)
files = (file for file in self.options['files'].split('\n') if file)
for file in files:
file = file.split(None, 1)
if len(file) == 2:
file, as_ = file
else:
file = file[0]
as_ = os.path.basename(file)
if '://' in file:
file = self._get_resource_filename(file)
to = os.path.join(path, as_)
if os.path.islink(to) and os.readlink(to) != file:
logger.info('Removing symlink from "%s" to "%s"' % (os.readlink(to), to))
os.remove(to)
if not os.path.exists(to):
logger.info('Making symlink from "%s" to "%s"' % (file, to))
os.symlink(file, to)
# Since other processes may create resources in path it
# should be excluded during uninstall, so return empty list.
# We can't return list of created symlinks as buildout does not
# currently support symlink removal, see uninstall_symlinks below.
return []
def update(self):
pass
def _get_resource_filename(self, uri):
logger = logging.getLogger(self.name)
logger.info('getting resource filename for uri "%s"' % uri)
package, path = uri.split('://', 1)
self.options['eggs'] = package
ws = self.working_set()[1]
distributions = ws.require(package)
if not distributions:
raise RuntimeError('Can\'t find package "%"' % package)
package = distributions[0]
result = os.path.join(package.location, path)
logger.info('resource filename for uri "%s" is "%s"' % (uri, result))
return result
def uninstall_symlinks(name, options):
"""
Buildout does not currently support symlink removal.
This uninstall method removes created symlinks.
Inspired by https://bugs.launchpad.net/zc.buildout/+bug/144228
"""
path = options['path']
files = (file for file in options['files'].split('\n') if file)
for file in files:
file = file.split(None, 1)
if len(file) == 2:
file, as_ = file
else:
file = file[0]
as_ = os.path.basename(file)
to = os.path.join(path, as_)
if os.path.isfile(to) or os.path.islink(to):
os.remove(to)
|
{
"content_hash": "f45ecf14c928e88c72020a13c3164ff1",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 89,
"avg_line_length": 30.92248062015504,
"alnum_prop": 0.5447480571571822,
"repo_name": "svetlyak40wt/svetlyak40wt.recipe.symlinks",
"id": "7a8a3bbf6e8abcc89515e4e0f28309ca98d03a17",
"size": "3989",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "svetlyak40wt/recipe/symlinks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "5671"
}
],
"symlink_target": ""
}
|
from django import template
register = template.Library()
@register.filter
def print_model(value):
"""Removes all values of arg from the given string"""
klass = value.__class__
fields = klass._meta.get_all_field_names()
result = {}
for f in fields:
(field, model, direct, m2m) = klass._meta.get_field_by_name(f)
if not direct or field.primary_key:
continue
v = value._get_FIELD_display(field)
t = field.__class__.__name__
result[field.creation_counter] = [f, field.verbose_name, v, t]
# sorting fields
keys = result.keys()
keys.sort()
result2 = []
for k in keys:
result2.append(result[k])
return result2
@register.filter
def has_user_comment(value, arg):
for comment in value:
if comment.user == arg:
return True
return False
|
{
"content_hash": "a7af992f20900f760607692ca1dfd0f9",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 70,
"avg_line_length": 26.151515151515152,
"alnum_prop": 0.608342989571263,
"repo_name": "xenim/django-radioportal-review",
"id": "f4e9e79c1a516f70370647a1e992224cf3f2aeee",
"size": "863",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "radioportal_review/templatetags/printmodel.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "937"
},
{
"name": "HTML",
"bytes": "9830"
},
{
"name": "Python",
"bytes": "20071"
}
],
"symlink_target": ""
}
|
import time
class Timer(object):
""" basic second-based timer """
def __init__(self, start=None):
self.reset()
def after(self, seconds):
return self.now - self.start >= float(seconds)
def reset(self, start=None):
self.start = float(start) if start is not None else time.time()
@property
def now(self):
return time.time()
|
{
"content_hash": "c896072080a7d57a6757618155a003b8",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 71,
"avg_line_length": 21.27777777777778,
"alnum_prop": 0.597911227154047,
"repo_name": "Yasumoto/zktraffic",
"id": "075dd54ecc4ffeb56f5bcaaa189c408bee7f1af8",
"size": "1284",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "zktraffic/stats/timer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "159243"
}
],
"symlink_target": ""
}
|
from flask import Flask, g, request
import deepdream_test
app = Flask(__name__)
#@app.before_first_request
#def load_model():
# g.net = deepdream_test.make_net('../caffe/models/bvlc_googlenet/')
@app.route('/', methods=['GET', 'POST'])
def make_dream():
net = deepdream_test.make_net('../caffe/models/bvlc_googlenet/')
if request.method == 'POST' and len(request.files.keys()) > 0:
key = request.files.keys()[0]
filename = 'tmp/%s' % key
with open(filename, 'wb') as file:
file.write(request.files[key].read())
num_iterations = request.args.get('iters', 1)
inverse_gradient = request.args.get('inverse_gradient', 0)
if int(inverse_gradient):
inverse_gradient = True
else:
inverse_gradient = False
return deepdream_test.layerDream(net, filename, num_iterations, inverse_gradient)
#else if request.method == 'GET' and request.params['image_url']:
# TODO: add the ability to dreamify a url image
return 'No image found.'
if __name__ == '__main__':
app.run(host="0.0.0.0", port=5000, debug=True)
|
{
"content_hash": "3977f4b01b96ddefd5554b2a5802c693",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 89,
"avg_line_length": 35.16129032258065,
"alnum_prop": 0.6422018348623854,
"repo_name": "hoqqanen/dreaming-as-a-service",
"id": "b4165d37a8b383ded1cd5fabd53afe74f8d073a3",
"size": "1090",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9443"
},
{
"name": "Shell",
"bytes": "93"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
# Form implementation generated from reading ui file '.\acq4\modules\Patch\devicePagetemplate.ui'
#
# Created: Thu Jan 15 20:16:09 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(210, 159)
self.gridLayout = QtGui.QGridLayout(Form)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.offModeRadio = QtGui.QRadioButton(Form)
self.offModeRadio.setChecked(False)
self.offModeRadio.setObjectName(_fromUtf8("offModeRadio"))
self.gridLayout.addWidget(self.offModeRadio, 0, 0, 1, 1)
self.vcModeRadio = QtGui.QRadioButton(Form)
self.vcModeRadio.setChecked(True)
self.vcModeRadio.setObjectName(_fromUtf8("vcModeRadio"))
self.gridLayout.addWidget(self.vcModeRadio, 1, 0, 1, 1)
self.vcPulseCheck = QtGui.QCheckBox(Form)
self.vcPulseCheck.setChecked(True)
self.vcPulseCheck.setObjectName(_fromUtf8("vcPulseCheck"))
self.gridLayout.addWidget(self.vcPulseCheck, 1, 1, 1, 1)
self.vcPulseSpin = SpinBox(Form)
self.vcPulseSpin.setObjectName(_fromUtf8("vcPulseSpin"))
self.gridLayout.addWidget(self.vcPulseSpin, 1, 2, 1, 1)
self.vcHoldCheck = QtGui.QCheckBox(Form)
self.vcHoldCheck.setObjectName(_fromUtf8("vcHoldCheck"))
self.gridLayout.addWidget(self.vcHoldCheck, 2, 1, 1, 1)
self.vcHoldSpin = SpinBox(Form)
self.vcHoldSpin.setObjectName(_fromUtf8("vcHoldSpin"))
self.gridLayout.addWidget(self.vcHoldSpin, 2, 2, 1, 1)
self.icModeRadio = QtGui.QRadioButton(Form)
self.icModeRadio.setObjectName(_fromUtf8("icModeRadio"))
self.gridLayout.addWidget(self.icModeRadio, 3, 0, 1, 1)
self.icPulseCheck = QtGui.QCheckBox(Form)
self.icPulseCheck.setChecked(True)
self.icPulseCheck.setObjectName(_fromUtf8("icPulseCheck"))
self.gridLayout.addWidget(self.icPulseCheck, 3, 1, 1, 1)
self.icPulseSpin = SpinBox(Form)
self.icPulseSpin.setObjectName(_fromUtf8("icPulseSpin"))
self.gridLayout.addWidget(self.icPulseSpin, 3, 2, 1, 1)
self.icHoldCheck = QtGui.QCheckBox(Form)
self.icHoldCheck.setObjectName(_fromUtf8("icHoldCheck"))
self.gridLayout.addWidget(self.icHoldCheck, 4, 1, 1, 1)
self.icHoldSpin = SpinBox(Form)
self.icHoldSpin.setObjectName(_fromUtf8("icHoldSpin"))
self.gridLayout.addWidget(self.icHoldSpin, 4, 2, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.offModeRadio.setText(_translate("Form", "Off", None))
self.vcModeRadio.setText(_translate("Form", "VC", None))
self.vcPulseCheck.setText(_translate("Form", "Pulse", None))
self.vcPulseSpin.setSuffix(_translate("Form", "V", None))
self.vcHoldCheck.setText(_translate("Form", "Hold", None))
self.vcHoldSpin.setSuffix(_translate("Form", "V", None))
self.icModeRadio.setText(_translate("Form", "IC", None))
self.icPulseCheck.setText(_translate("Form", "Pulse", None))
self.icPulseSpin.setSuffix(_translate("Form", "A", None))
self.icHoldCheck.setText(_translate("Form", "Hold", None))
self.icHoldSpin.setSuffix(_translate("Form", "A", None))
from acq4.pyqtgraph import SpinBox
|
{
"content_hash": "758fab7e584971fb3736feb30f400192",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 97,
"avg_line_length": 46.367816091954026,
"alnum_prop": 0.6829449677739217,
"repo_name": "meganbkratz/acq4",
"id": "17b493ece089feba796d5d1c1bc6d018cec626e5",
"size": "4058",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "acq4/modules/Patch/devicePagetemplate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "3037"
},
{
"name": "Arduino",
"bytes": "18651"
},
{
"name": "Batchfile",
"bytes": "64"
},
{
"name": "C",
"bytes": "705091"
},
{
"name": "C++",
"bytes": "321384"
},
{
"name": "CSS",
"bytes": "716"
},
{
"name": "MATLAB",
"bytes": "1752"
},
{
"name": "Objective-C",
"bytes": "596020"
},
{
"name": "Processing",
"bytes": "13403"
},
{
"name": "Python",
"bytes": "5922488"
}
],
"symlink_target": ""
}
|
from airflow import DAG
from airflow.providers.cncf.kubernetes.operators import kubernetes_pod
from airflow.providers.google.cloud.transfers import gcs_to_bigquery
default_args = {
"owner": "Google",
"depends_on_past": False,
"start_date": "2021-03-01",
}
with DAG(
dag_id="epa_historical_air_quality.voc_daily_summary",
default_args=default_args,
max_active_runs=1,
schedule_interval="30 13 * * *",
catchup=False,
default_view="graph",
) as dag:
# Run CSV transform within kubernetes pod
transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="transform_csv",
name="voc_daily_summary",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.epa_historical_air_quality.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "https://aqs.epa.gov/aqsweb/airdata/daily_VOCS_YEAR_ITERATOR.zip",
"START_YEAR": "1990",
"SOURCE_FILE": "files/data.csv",
"TARGET_FILE": "files/data_output.csv",
"CHUNKSIZE": "2500000",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "data/epa_historical_air_quality/voc_daily_summary/files/data_output.csv",
"DATA_NAMES": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "sample_duration",\n "pollutant_standard", "date_local", "units_of_measure", "event_type", "observation_count",\n "observation_percent", "arithmetic_mean", "first_max_value", "first_max_hour", "aqi",\n "method_code", "method_name", "local_site_name", "address", "state_name",\n "county_name", "city_name", "cbsa_name", "date_of_last_change" ]',
"DATA_DTYPES": '{ "state_code": "str", "county_code": "str", "site_num": "str", "parameter_code": "int32", "poc": "int32",\n "latitude": "float64", "longitude": "float64", "datum": "str", "parameter_name": "str", "sample_duration": "str",\n "pollutant_standard": "str", "date_local": "datetime64[ns]", "units_of_measure": "str", "event_type": "str", "observation_count": "int32",\n "observation_percent": "float64", "arithmetic_mean": "float64", "first_max_value": "float64", "first_max_hour": "int32", "aqi": "str",\n "method_code": "str", "method_name": "str", "local_site_name": "str", "address": "str", "state_name": "str",\n "county_name": "str", "city_name": "str", "cbsa_name": "str", "date_of_last_change": "datetime64[ns]" }',
},
resources={
"request_memory": "8G",
"request_cpu": "3",
"request_ephemeral_storage": "5G",
},
)
# Task to load CSV data to a BigQuery table
load_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="load_to_bq",
bucket="{{ var.value.composer_bucket }}",
source_objects=[
"data/epa_historical_air_quality/voc_daily_summary/files/data_output.csv"
],
source_format="CSV",
destination_project_dataset_table="{{ var.json.epa_historical_air_quality.destination_tables.voc_daily_summary }}",
skip_leading_rows=1,
allow_quoted_newlines=True,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{
"name": "state_code",
"type": "STRING",
"description": "The FIPS code of the state in which the monitor resides.",
"mode": "NULLABLE",
},
{
"name": "county_code",
"type": "STRING",
"description": "The FIPS code of the county in which the monitor resides.",
"mode": "NULLABLE",
},
{
"name": "site_num",
"type": "STRING",
"description": "A unique number within the county identifying the site.",
"mode": "NULLABLE",
},
{
"name": "parameter_code",
"type": "INTEGER",
"description": "The AQS code corresponding to the parameter measured by the monitor.",
"mode": "NULLABLE",
},
{
"name": "poc",
"type": "INTEGER",
"description": "This is the “Parameter Occurrence Code” used to distinguish different instruments that measure the same parameter at the same site.",
"mode": "NULLABLE",
},
{
"name": "latitude",
"type": "FLOAT",
"description": "The monitoring site’s angular distance north of the equator measured in decimal degrees.",
"mode": "NULLABLE",
},
{
"name": "longitude",
"type": "FLOAT",
"description": "The monitoring site’s angular distance east of the prime meridian measured in decimal degrees.",
"mode": "NULLABLE",
},
{
"name": "datum",
"type": "STRING",
"description": "The Datum associated with the Latitude and Longitude measures.",
"mode": "NULLABLE",
},
{
"name": "parameter_name",
"type": "STRING",
"description": "The name or description assigned in AQS to the parameter measured by the monitor. Parameters may be pollutants or non-pollutants.",
"mode": "NULLABLE",
},
{
"name": "sample_duration",
"type": "STRING",
"description": "The length of time that air passes through the monitoring device before it is analyzed (measured). So, it represents an averaging period in the atmosphere (for example, a 24-hour sample duration draws ambient air over a collection filter for 24 straight hours). For continuous monitors, it can represent an averaging time of many samples (for example, a 1-hour value may be the average of four one-minute samples collected during each quarter of the hour).",
"mode": "NULLABLE",
},
{
"name": "pollutant_standard",
"type": "STRING",
"description": "A description of the ambient air quality standard rules used to aggregate statistics. (See description at beginning of document.)",
"mode": "NULLABLE",
},
{
"name": "date_local",
"type": "TIMESTAMP",
"description": "The calendar date for the summary. All daily summaries are for the local standard day (midnight to midnight) at the monitor.",
"mode": "NULLABLE",
},
{
"name": "units_of_measure",
"type": "STRING",
"description": "The unit of measure for the parameter. QAD always returns data in the standard units for the parameter. Submitters are allowed to report data in any unit and EPA converts to a standard unit so that we may use the data in calculations.",
"mode": "NULLABLE",
},
{
"name": "event_type",
"type": "STRING",
"description": "Indicates whether data measured during exceptional events are included in the summary. A wildfire is an example of an exceptional event; it is something that affects air quality, but the local agency has no control over. No Events means no events occurred. Events Included means events occurred and the data from them is included in the summary. Events Excluded means that events occurred but data form them is excluded from the summary. Concurred Events Excluded means that events occurred but only EPA concurred exclusions are removed from the summary. If an event occurred for the parameter in question, the data will have multiple records for each monitor.",
"mode": "NULLABLE",
},
{
"name": "observation_count",
"type": "INTEGER",
"description": "The number of observations (samples) taken during the day.",
"mode": "NULLABLE",
},
{
"name": "observation_percent",
"type": "FLOAT",
"description": "The percent representing the number of observations taken with respect to the number scheduled to be taken during the day. This is only calculated for monitors where measurements are required (e.g., only certain parameters).",
"mode": "NULLABLE",
},
{
"name": "arithmetic_mean",
"type": "FLOAT",
"description": "The average (arithmetic mean) value for the day.",
"mode": "NULLABLE",
},
{
"name": "first_max_value",
"type": "FLOAT",
"description": "The highest value for the day.",
"mode": "NULLABLE",
},
{
"name": "first_max_hour",
"type": "INTEGER",
"description": "The hour (on a 24-hour clock) when the highest value for the day (the previous field) was taken.",
"mode": "NULLABLE",
},
{
"name": "aqi",
"type": "INTEGER",
"description": "The Air Quality Index for the day for the pollutant, if applicable.",
"mode": "NULLABLE",
},
{
"name": "method_code",
"type": "INTEGER",
"description": "An internal system code indicating the method (processes, equipment, and protocols) used in gathering and measuring the sample. The method name is in the next column.",
"mode": "NULLABLE",
},
{
"name": "method_name",
"type": "STRING",
"description": "A short description of the processes, equipment, and protocols used in gathering and measuring the sample.",
"mode": "NULLABLE",
},
{
"name": "local_site_name",
"type": "STRING",
"description": "The name of the site (if any) given by the State, local, or tribal air pollution control agency that operates it.",
"mode": "NULLABLE",
},
{
"name": "address",
"type": "STRING",
"description": "The approximate street address of the monitoring site.",
"mode": "NULLABLE",
},
{
"name": "state_name",
"type": "STRING",
"description": "The name of the state where the monitoring site is located.",
"mode": "NULLABLE",
},
{
"name": "county_name",
"type": "STRING",
"description": "The name of the county where the monitoring site is located.",
"mode": "NULLABLE",
},
{
"name": "city_name",
"type": "STRING",
"description": "The name of the city where the monitoring site is located. This represents the legal incorporated boundaries of cities and not urban areas.",
"mode": "NULLABLE",
},
{
"name": "cbsa_name",
"type": "STRING",
"description": "The name of the core bases statistical area (metropolitan area) where the monitoring site is located.",
"mode": "NULLABLE",
},
{
"name": "date_of_last_change",
"type": "TIMESTAMP",
"description": "The date the last time any numeric values in this record were updated in the AQS data system.",
"mode": "NULLABLE",
},
],
)
transform_csv >> load_to_bq
|
{
"content_hash": "c0417b1dc33b53a73b7b945ce48b20c3",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 752,
"avg_line_length": 51.36708860759494,
"alnum_prop": 0.5367175948743224,
"repo_name": "llooker/public-datasets-pipelines",
"id": "cba70d8f7f69ee8631f08bbdd6f7e1c9b15f2759",
"size": "12759",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "datasets/epa_historical_air_quality/pipelines/voc_daily_summary/voc_daily_summary_dag.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "58993"
},
{
"name": "HCL",
"bytes": "394340"
},
{
"name": "Jinja",
"bytes": "11245"
},
{
"name": "Jupyter Notebook",
"bytes": "15325"
},
{
"name": "Python",
"bytes": "2616241"
}
],
"symlink_target": ""
}
|
db.define_table('rolling_code_seeds',
Field('myindex','integer',length=8),
Field('m_w','integer',length=32, default=20110715),
Field('m_z','integer',length=32, default=20110911))
# a table to store users and their roles
db.define_table('valid_members',
Field('emailaddr',requires=IS_NOT_EMPTY()),
Field('user','boolean',default=1),
Field('admin','boolean',default=0))
# a table to store remote arduino shac url
db.define_table('shac_config',
Field('myindex','integer',length=8),
Field('shac_url',requires=IS_NOT_EMPTY()))
# ValidMembers.insert(emailaddr='philipbooysen@gmail.com',user=1,admin=1)
# and define some global variables that will make code more compact
RollingCodeSeeds = db.rolling_code_seeds
ValidMembers = db.valid_members
ShacConfig = db.shac_config
uid = auth.user_id
|
{
"content_hash": "62332c2fdc9dc7b6f60c0cf63d074cfd",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 73,
"avg_line_length": 34.208333333333336,
"alnum_prop": 0.7149817295980512,
"repo_name": "house4hack/openSHAC",
"id": "84ad426dda5d10b7c63632a2dbbd2a85ebe7873d",
"size": "860",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web2py_shac/applications/enter/models/db_enter.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "11788"
},
{
"name": "C++",
"bytes": "7299"
},
{
"name": "CSS",
"bytes": "27301"
},
{
"name": "IDL",
"bytes": "417"
},
{
"name": "Java",
"bytes": "48873"
},
{
"name": "JavaScript",
"bytes": "8162"
},
{
"name": "Perl",
"bytes": "2681"
},
{
"name": "Processing",
"bytes": "14833"
},
{
"name": "Python",
"bytes": "133232"
},
{
"name": "Shell",
"bytes": "8"
},
{
"name": "TypeScript",
"bytes": "1564"
}
],
"symlink_target": ""
}
|
from sys import exit
def no_config(f):
f.__no_config = True
return f
class Target(object):
def __init__(self, buildozer):
super(Target, self).__init__()
self.buildozer = buildozer
self.build_mode = 'debug'
self.platform_update = False
def check_requirements(self):
pass
def check_configuration_tokens(self, errors=None):
if errors:
self.buildozer.info('Check target configuration tokens')
self.buildozer.error(
'{0} error(s) found in the buildozer.spec'.format(
len(errors)))
for error in errors:
print error
exit(1)
def compile_platform(self):
pass
def install_platform(self):
pass
def get_custom_commands(self):
result = []
for x in dir(self):
if not x.startswith('cmd_'):
continue
if x[4:] in self.buildozer.standard_cmds:
continue
result.append((x[4:], getattr(self, x).__doc__))
return result
def get_available_packages(self):
return ['kivy']
def run_commands(self, args):
if not args:
self.buildozer.error('Missing target command')
self.buildozer.usage()
exit(1)
result = []
last_command = []
for arg in args:
if not arg.startswith('--'):
if last_command:
result.append(last_command)
last_command = []
last_command.append(arg)
else:
if not last_command:
self.buildozer.error('Argument passed without a command')
self.buildozer.usage()
exit(1)
last_command.append(arg)
if last_command:
result.append(last_command)
config_check = False
for item in result:
command, args = item[0], item[1:]
if not hasattr(self, 'cmd_{0}'.format(command)):
self.buildozer.error('Unknown command {0}'.format(command))
exit(1)
func = getattr(self, 'cmd_{0}'.format(command))
need_config_check = not hasattr(func, '__no_config')
if need_config_check and not config_check:
config_check = True
self.check_configuration_tokens()
func(args)
def cmd_clean(self, *args):
self.buildozer.clean_platform()
def cmd_update(self, *args):
self.platform_update = True
self.buildozer.prepare_for_build()
def cmd_debug(self, *args):
self.buildozer.prepare_for_build()
self.build_mode = 'debug'
self.buildozer.build()
def cmd_release(self, *args):
self.buildozer.prepare_for_build()
self.build_mode = 'release'
self.buildozer.build()
def cmd_deploy(self, *args):
self.buildozer.prepare_for_build()
def cmd_run(self, *args):
self.buildozer.prepare_for_build()
def cmd_serve(self, *args):
self.buildozer.cmd_serve()
|
{
"content_hash": "7cb9b2e98df9bbb9efa53e5b57748722",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 77,
"avg_line_length": 28.178571428571427,
"alnum_prop": 0.5339036755386565,
"repo_name": "eHealthAfrica/buildozer",
"id": "4ba9f0e4841f5f19fe2d463e619ffb05e3b05089",
"size": "3156",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "buildozer/target.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "86373"
}
],
"symlink_target": ""
}
|
#-------------------------------------------------------------------------
# CxxTest: A lightweight C++ unit testing library.
# Copyright (c) 2008 Sandia Corporation.
# This software is distributed under the LGPL License v3
# For more information, see the COPYING file in the top CxxTest directory.
# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
# the U.S. Government retains certain rights in this software.
#-------------------------------------------------------------------------
# vim: fileencoding=utf-8
from __future__ import division
# the above import important for forward-compatibility with python3,
# which is already the default in archlinux!
__all__ = ['main', 'create_manpage']
import __release__
import os
import sys
import re
import glob
from optparse import OptionParser
import cxxtest_parser
from string import Template
try:
import cxxtest_fog
imported_fog=True
except ImportError:
imported_fog=False
from cxxtest_misc import abort
try:
from os.path import relpath
except ImportError:
from cxxtest_misc import relpath
# Global data is initialized by main()
options = []
suites = []
wrotePreamble = 0
wroteWorld = 0
lastIncluded = ''
def main(args=sys.argv, catch=False):
'''The main program'''
#
# Reset global state
#
global wrotePreamble
wrotePreamble=0
global wroteWorld
wroteWorld=0
global lastIncluded
lastIncluded = ''
global suites
suites = []
global options
options = []
#
try:
files = parseCommandline(args)
if imported_fog and options.fog:
[options,suites] = cxxtest_fog.scanInputFiles( files, options )
else:
[options,suites] = cxxtest_parser.scanInputFiles( files, options )
writeOutput()
except SystemExit:
if not catch:
raise
def create_parser(asciidoc=False):
parser = OptionParser("cxxtestgen [options] [<filename> ...]")
if asciidoc:
parser.description="The cxxtestgen command processes C++ header files to perform test discovery, and then it creates files for the CxxTest test runner."
else:
parser.description="The 'cxxtestgen' command processes C++ header files to perform test discovery, and then it creates files for the 'CxxTest' test runner."
parser.add_option("--version",
action="store_true", dest="version", default=False,
help="Write the CxxTest version.")
parser.add_option("-o", "--output",
dest="outputFileName", default=None, metavar="NAME",
help="Write output to file NAME.")
parser.add_option("-w","--world", dest="world", default="cxxtest",
help="The label of the tests, used to name the XML results.")
parser.add_option("", "--include", action="append",
dest="headers", default=[], metavar="HEADER",
help="Include file HEADER in the test runner before other headers.")
parser.add_option("", "--abort-on-fail",
action="store_true", dest="abortOnFail", default=False,
help="Abort tests on failed asserts (like xUnit).")
parser.add_option("", "--main",
action="store", dest="main", default="main",
help="Specify an alternative name for the main() function.")
parser.add_option("", "--headers",
action="store", dest="header_filename", default=None,
help="Specify a filename that contains a list of header files that are processed to generate a test runner.")
parser.add_option("", "--runner",
dest="runner", default="", metavar="CLASS",
help="Create a test runner that processes test events using the class CxxTest::CLASS.")
parser.add_option("", "--gui",
dest="gui", metavar="CLASS",
help="Create a GUI test runner that processes test events using the class CxxTest::CLASS. (deprecated)")
parser.add_option("", "--error-printer",
action="store_true", dest="error_printer", default=False,
help="Create a test runner using the ErrorPrinter class, and allow the use of the standard library.")
parser.add_option("", "--xunit-printer",
action="store_true", dest="xunit_printer", default=False,
help="Create a test runner using the XUnitPrinter class.")
parser.add_option("", "--xunit-file", dest="xunit_file", default="",
help="The file to which the XML summary is written for test runners using the XUnitPrinter class. The default XML filename is TEST-<world>.xml, where <world> is the value of the --world option. (default: cxxtest)")
parser.add_option("", "--have-std",
action="store_true", dest="haveStandardLibrary", default=False,
help="Use the standard library (even if not found in tests).")
parser.add_option("", "--no-std",
action="store_true", dest="noStandardLibrary", default=False,
help="Do not use standard library (even if found in tests).")
parser.add_option("", "--have-eh",
action="store_true", dest="haveExceptionHandling", default=False,
help="Use exception handling (even if not found in tests).")
parser.add_option("", "--no-eh",
action="store_true", dest="noExceptionHandling", default=False,
help="Do not use exception handling (even if found in tests).")
parser.add_option("", "--longlong",
dest="longlong", default=None, metavar="TYPE",
help="Use TYPE as for long long integers. (default: not supported)")
parser.add_option("", "--no-static-init",
action="store_true", dest="noStaticInit", default=False,
help="Do not rely on static initialization in the test runner.")
parser.add_option("", "--template",
dest="templateFileName", default=None, metavar="TEMPLATE",
help="Generate the test runner using file TEMPLATE to define a template.")
parser.add_option("", "--root",
action="store_true", dest="root", default=False,
help="Write the main() function and global data for a test runner.")
parser.add_option("", "--part",
action="store_true", dest="part", default=False,
help="Write the tester classes for a test runner.")
#parser.add_option("", "--factor",
#action="store_true", dest="factor", default=False,
#help="Declare the _CXXTEST_FACTOR macro. (deprecated)")
if imported_fog:
fog_help = "Use new FOG C++ parser"
else:
fog_help = "Use new FOG C++ parser (disabled)"
parser.add_option("-f", "--fog-parser",
action="store_true",
dest="fog",
default=False,
help=fog_help
)
return parser
def parseCommandline(args):
'''Analyze command line arguments'''
global imported_fog
global options
parser = create_parser()
(options, args) = parser.parse_args(args=args)
if not options.header_filename is None:
if not os.path.exists(options.header_filename):
abort( "ERROR: the file '%s' does not exist!" % options.header_filename )
INPUT = open(options.header_filename)
headers = [line.strip() for line in INPUT]
args.extend( headers )
INPUT.close()
if options.fog and not imported_fog:
abort( "Cannot use the FOG parser. Check that the 'ply' package is installed. The 'ordereddict' package is also required if running Python 2.6")
if options.version:
printVersion()
# the cxxtest builder relies on this behaviour! don't remove
if options.runner == 'none':
options.runner = None
if options.xunit_printer or options.runner == "XUnitPrinter":
options.xunit_printer=True
options.runner="XUnitPrinter"
if len(args) > 1:
if options.xunit_file == "":
if options.world == "":
options.world = "cxxtest"
options.xunit_file="TEST-"+options.world+".xml"
elif options.xunit_file == "":
if options.world == "":
options.world = "cxxtest"
options.xunit_file="TEST-"+options.world+".xml"
if options.error_printer:
options.runner= "ErrorPrinter"
options.haveStandardLibrary = True
if options.noStaticInit and (options.root or options.part):
abort( '--no-static-init cannot be used with --root/--part' )
if options.gui and not options.runner:
options.runner = 'StdioPrinter'
files = setFiles(args[1:])
if len(files) == 0 and not options.root:
sys.stderr.write(parser.error("No input files found"))
return files
def printVersion():
'''Print CxxTest version and exit'''
sys.stdout.write( "This is CxxTest version %s.\n" % __release__.__version__ )
sys.exit(0)
def setFiles(patterns ):
'''Set input files specified on command line'''
files = expandWildcards( patterns )
return files
def expandWildcards( patterns ):
'''Expand all wildcards in an array (glob)'''
fileNames = []
for pathName in patterns:
patternFiles = glob.glob( pathName )
for fileName in patternFiles:
fileNames.append( fixBackslashes( fileName ) )
return fileNames
def fixBackslashes( fileName ):
'''Convert backslashes to slashes in file name'''
return re.sub( r'\\', '/', fileName, 0 )
def writeOutput():
'''Create output file'''
if options.templateFileName:
writeTemplateOutput()
else:
writeSimpleOutput()
def writeSimpleOutput():
'''Create output not based on template'''
output = startOutputFile()
writePreamble( output )
if options.root or not options.part:
writeMain( output )
if len(suites) > 0:
output.write("bool "+suites[0]['object']+"_init = false;\n")
writeWorld( output )
output.close()
include_re = re.compile( r"\s*\#\s*include\s+<cxxtest/" )
preamble_re = re.compile( r"^\s*<CxxTest\s+preamble>\s*$" )
world_re = re.compile( r"^\s*<CxxTest\s+world>\s*$" )
def writeTemplateOutput():
'''Create output based on template file'''
template = open(options.templateFileName)
output = startOutputFile()
while 1:
line = template.readline()
if not line:
break;
if include_re.search( line ):
writePreamble( output )
output.write( line )
elif preamble_re.search( line ):
writePreamble( output )
elif world_re.search( line ):
if len(suites) > 0:
output.write("bool "+suites[0]['object']+"_init = false;\n")
writeWorld( output )
else:
output.write( line )
template.close()
output.close()
def startOutputFile():
'''Create output file and write header'''
if options.outputFileName is not None:
output = open( options.outputFileName, 'w' )
else:
output = sys.stdout
output.write( "/* Generated file, do not edit */\n\n" )
return output
def writePreamble( output ):
'''Write the CxxTest header (#includes and #defines)'''
global wrotePreamble
if wrotePreamble: return
output.write( "#ifndef CXXTEST_RUNNING\n" )
output.write( "#define CXXTEST_RUNNING\n" )
output.write( "#endif\n" )
output.write( "\n" )
if options.xunit_printer:
output.write( "#include <fstream>\n" )
if options.haveStandardLibrary:
output.write( "#define _CXXTEST_HAVE_STD\n" )
if options.haveExceptionHandling:
output.write( "#define _CXXTEST_HAVE_EH\n" )
if options.abortOnFail:
output.write( "#define _CXXTEST_ABORT_TEST_ON_FAIL\n" )
if options.longlong:
output.write( "#define _CXXTEST_LONGLONG %s\n" % options.longlong )
#if options.factor:
#output.write( "#define _CXXTEST_FACTOR\n" )
for header in options.headers:
output.write( "#include \"%s\"\n" % header )
output.write( "#include <cxxtest/TestListener.h>\n" )
output.write( "#include <cxxtest/TestTracker.h>\n" )
output.write( "#include <cxxtest/TestRunner.h>\n" )
output.write( "#include <cxxtest/RealDescriptions.h>\n" )
output.write( "#include <cxxtest/TestMain.h>\n" )
if options.runner:
output.write( "#include <cxxtest/%s.h>\n" % options.runner )
if options.gui:
output.write( "#include <cxxtest/%s.h>\n" % options.gui )
output.write( "\n" )
wrotePreamble = 1
def writeMain( output ):
'''Write the main() function for the test runner'''
if not (options.gui or options.runner):
return
output.write( 'int %s( int argc, char *argv[] ) {\n' % options.main )
output.write( ' int status;\n' )
if options.noStaticInit:
output.write( ' CxxTest::initialize();\n' )
if options.gui:
tester_t = "CxxTest::GuiTuiRunner<CxxTest::%s, CxxTest::%s> " % (options.gui, options.runner)
else:
tester_t = "CxxTest::%s" % (options.runner)
if options.xunit_printer:
output.write( ' std::ofstream ofstr("%s");\n' % options.xunit_file )
output.write( ' %s tmp(ofstr);\n' % tester_t )
else:
output.write( ' %s tmp;\n' % tester_t )
output.write( ' CxxTest::RealWorldDescription::_worldName = "%s";\n' % options.world )
output.write( ' status = CxxTest::Main< %s >( tmp, argc, argv );\n' % tester_t )
output.write( ' return status;\n')
output.write( '}\n' )
def writeWorld( output ):
'''Write the world definitions'''
global wroteWorld
if wroteWorld: return
writePreamble( output )
writeSuites( output )
if options.root or not options.part:
writeRoot( output )
writeWorldDescr( output )
if options.noStaticInit:
writeInitialize( output )
wroteWorld = 1
def writeSuites(output):
'''Write all TestDescriptions and SuiteDescriptions'''
for suite in suites:
writeInclude( output, suite['file'] )
if isGenerated(suite):
generateSuite( output, suite )
if not options.noStaticInit:
if isDynamic(suite):
writeSuitePointer( output, suite )
else:
writeSuiteObject( output, suite )
writeTestList( output, suite )
writeSuiteDescription( output, suite )
writeTestDescriptions( output, suite )
def isGenerated(suite):
'''Checks whether a suite class should be created'''
return suite['generated']
def isDynamic(suite):
'''Checks whether a suite is dynamic'''
return 'create' in suite
def writeInclude(output, file):
'''Add #include "file" statement'''
global lastIncluded
file = os.path.abspath(file)
if file == lastIncluded: return
output.writelines( [ '#include "', file, '"\n\n' ] )
lastIncluded = file
def generateSuite( output, suite ):
'''Write a suite declared with CXXTEST_SUITE()'''
output.write( 'class %s : public CxxTest::TestSuite {\n' % suite['fullname'] )
output.write( 'public:\n' )
for line in suite['lines']:
output.write(line)
output.write( '};\n\n' )
def writeSuitePointer( output, suite ):
'''Create static suite pointer object for dynamic suites'''
if options.noStaticInit:
output.write( 'static %s* %s;\n\n' % (suite['fullname'], suite['object']) )
else:
output.write( 'static %s* %s = 0;\n\n' % (suite['fullname'], suite['object']) )
def writeSuiteObject( output, suite ):
'''Create static suite object for non-dynamic suites'''
output.writelines( [ "static ", suite['fullname'], " ", suite['object'], ";\n\n" ] )
def writeTestList( output, suite ):
'''Write the head of the test linked list for a suite'''
if options.noStaticInit:
output.write( 'static CxxTest::List %s;\n' % suite['tlist'] )
else:
output.write( 'static CxxTest::List %s = { 0, 0 };\n' % suite['tlist'] )
def writeWorldDescr( output ):
'''Write the static name of the world name'''
if options.noStaticInit:
output.write( 'const char* CxxTest::RealWorldDescription::_worldName;\n' )
else:
output.write( 'const char* CxxTest::RealWorldDescription::_worldName = "cxxtest";\n' )
def writeTestDescriptions( output, suite ):
'''Write all test descriptions for a suite'''
for test in suite['tests']:
writeTestDescription( output, suite, test )
def writeTestDescription( output, suite, test ):
'''Write test description object'''
if not options.noStaticInit:
output.write( 'static class %s : public CxxTest::RealTestDescription {\n' % test['class'] )
else:
output.write( 'class %s : public CxxTest::RealTestDescription {\n' % test['class'] )
#
output.write( 'public:\n' )
if not options.noStaticInit:
output.write( ' %s() : CxxTest::RealTestDescription( %s, %s, %s, "%s" ) {}\n' %
(test['class'], suite['tlist'], suite['dobject'], test['line'], test['name']) )
else:
if isDynamic(suite):
output.write( ' %s(%s* _%s) : %s(_%s) { }\n' %
(test['class'], suite['fullname'], suite['object'], suite['object'], suite['object']) )
output.write( ' %s* %s;\n' % (suite['fullname'], suite['object']) )
else:
output.write( ' %s(%s& _%s) : %s(_%s) { }\n' %
(test['class'], suite['fullname'], suite['object'], suite['object'], suite['object']) )
output.write( ' %s& %s;\n' % (suite['fullname'], suite['object']) )
output.write( ' void runTest() { %s }\n' % runBody( suite, test ) )
#
if not options.noStaticInit:
output.write( '} %s;\n\n' % test['object'] )
else:
output.write( '};\n\n' )
def runBody( suite, test ):
'''Body of TestDescription::run()'''
if isDynamic(suite): return dynamicRun( suite, test )
else: return staticRun( suite, test )
def dynamicRun( suite, test ):
'''Body of TestDescription::run() for test in a dynamic suite'''
return 'if ( ' + suite['object'] + ' ) ' + suite['object'] + '->' + test['name'] + '();'
def staticRun( suite, test ):
'''Body of TestDescription::run() for test in a non-dynamic suite'''
return suite['object'] + '.' + test['name'] + '();'
def writeSuiteDescription( output, suite ):
'''Write SuiteDescription object'''
if isDynamic( suite ):
writeDynamicDescription( output, suite )
else:
writeStaticDescription( output, suite )
def writeDynamicDescription( output, suite ):
'''Write SuiteDescription for a dynamic suite'''
output.write( 'CxxTest::DynamicSuiteDescription< %s > %s' % (suite['fullname'], suite['dobject']) )
if not options.noStaticInit:
output.write( '( %s, %s, "%s", %s, %s, %s, %s )' %
(suite['cfile'], suite['line'], suite['fullname'], suite['tlist'],
suite['object'], suite['create'], suite['destroy']) )
output.write( ';\n\n' )
def writeStaticDescription( output, suite ):
'''Write SuiteDescription for a static suite'''
output.write( 'CxxTest::StaticSuiteDescription %s' % suite['dobject'] )
if not options.noStaticInit:
output.write( '( %s, %s, "%s", %s, %s )' %
(suite['cfile'], suite['line'], suite['fullname'], suite['object'], suite['tlist']) )
output.write( ';\n\n' )
def writeRoot(output):
'''Write static members of CxxTest classes'''
output.write( '#include <cxxtest/Root.cpp>\n' )
def writeInitialize(output):
'''Write CxxTest::initialize(), which replaces static initialization'''
output.write( 'namespace CxxTest {\n' )
output.write( ' void initialize()\n' )
output.write( ' {\n' )
for suite in suites:
#print "HERE", suite
writeTestList( output, suite )
output.write( ' %s.initialize();\n' % suite['tlist'] )
#writeSuiteObject( output, suite )
if isDynamic(suite):
writeSuitePointer( output, suite )
output.write( ' %s = 0;\n' % suite['object'])
else:
writeSuiteObject( output, suite )
output.write( ' static ')
writeSuiteDescription( output, suite )
if isDynamic(suite):
#output.write( ' %s = %s.suite();\n' % (suite['object'],suite['dobject']) )
output.write( ' %s.initialize( %s, %s, "%s", %s, %s, %s, %s );\n' %
(suite['dobject'], suite['cfile'], suite['line'], suite['fullname'],
suite['tlist'], suite['object'], suite['create'], suite['destroy']) )
output.write( ' %s.setUp();\n' % suite['dobject'])
else:
output.write( ' %s.initialize( %s, %s, "%s", %s, %s );\n' %
(suite['dobject'], suite['cfile'], suite['line'], suite['fullname'],
suite['object'], suite['tlist']) )
for test in suite['tests']:
output.write( ' static %s %s(%s);\n' %
(test['class'], test['object'], suite['object']) )
output.write( ' %s.initialize( %s, %s, %s, "%s" );\n' %
(test['object'], suite['tlist'], suite['dobject'], test['line'], test['name']) )
output.write( ' }\n' )
output.write( '}\n' )
man_template=Template("""CXXTESTGEN(1)
=============
:doctype: manpage
NAME
----
cxxtestgen - performs test discovery to create a CxxTest test runner
SYNOPSIS
--------
${usage}
DESCRIPTION
-----------
${description}
OPTIONS
-------
${options}
EXIT STATUS
-----------
*0*::
Success
*1*::
Failure (syntax or usage error; configuration error; document
processing failure; unexpected error).
BUGS
----
See the CxxTest Home Page for the link to the CxxTest ticket repository.
AUTHOR
------
CxxTest was originally written by Erez Volk. Many people have
contributed to it.
RESOURCES
---------
Home page: <http://cxxtest.com/>
CxxTest User Guide: <http://cxxtest.com/cxxtest/doc/guide.html>
COPYING
-------
Copyright (c) 2008 Sandia Corporation. This software is distributed
under the Lesser GNU General Public License (LGPL) v3
""")
def create_manpage():
"""Write ASCIIDOC manpage file"""
parser = create_parser(asciidoc=True)
#
usage = parser.usage
description = parser.description
options=""
for opt in parser.option_list:
opts = opt._short_opts + opt._long_opts
optstr = '*' + ', '.join(opts) + '*'
if not opt.metavar is None:
optstr += "='%s'" % opt.metavar
optstr += '::\n'
options += optstr
#
options += opt.help
options += '\n\n'
#
OUTPUT = open('cxxtestgen.1.txt','w')
OUTPUT.write( man_template.substitute(usage=usage, description=description, options=options) )
OUTPUT.close()
|
{
"content_hash": "c891a1a7de8a17d9b15c70e5f0f51ac8",
"timestamp": "",
"source": "github",
"line_count": 617,
"max_line_length": 238,
"avg_line_length": 37.82171799027552,
"alnum_prop": 0.5909753171066164,
"repo_name": "uonyx/kvr",
"id": "1692dc1b0d60af405559c97274a9bebcf3d04f25",
"size": "23336",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "test/3rdparty/cxxtest-4.3/python/cxxtest/cxxtestgen.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "21435"
},
{
"name": "C++",
"bytes": "618455"
},
{
"name": "CMake",
"bytes": "4106"
},
{
"name": "Shell",
"bytes": "86"
}
],
"symlink_target": ""
}
|
'''
Created on Apr 18, 2014
@author: oliwa
'''
import os
import imp
import traceback
import sys
from prody.proteins.compare import matchTNMAChains
from Hungarian import Hungarian
class TNMABase(object):
'''
TNMABase is the TNMA base class
'''
def __init__(self):
'''
Constructor
'''
def importFromURI(self, uri, absl=False):
""" Dynamic python module loading, adapted from http://stamat.wordpress.com/2013/06/30/dynamic-module-import-in-python/
Args:
uri: path to the module to be loaded
Returns: a dynamically loaded python module
"""
if not absl:
uri = os.path.normpath(os.path.join(os.path.dirname(__file__), uri))
path, fname = os.path.split(uri)
mname, ext = os.path.splitext(fname)
no_ext = os.path.join(path, mname)
if os.path.exists(no_ext + '.pyc'):
try:
os.remove(no_ext + '.pyc')
except:
pass
if os.path.exists(no_ext + '.py'):
try:
return imp.load_source(mname, no_ext + '.py')
except:
raise StandardError("StandardError occurred, the Configurations file could not be dynamically loaded")
def getFullPathOfURI(self, uri):
""" Get the full path of a file, adapted from http://stamat.wordpress.com/2013/06/30/dynamic-module-import-in-python/
Args:
uri: the provided path inclusive filename
Returns:
absolute (full) path of the file
"""
return os.path.normpath(os.path.join(os.path.dirname(__file__), uri))
def getOverallMatch(self, reference, mobile, subset):
"""
Performs a matching of chains of the two elements reference and mobile returns
the matches with all atoms as specified by subset.
At first, a modified version of matchChains is called that only uses the
pairwise alignment matching of prody, but keeps the prody defaults of
minimum seqid and overlap/coverage.
- If the matches are a perfect bisection, this result is used
and returned.
- Else, the hungarian algorithm is called to find the optimal
matches, and the result returned.
In case of the hungarian algorithm, the matchChains method has been
modified as follows the following addons:
1. pairwise alignment is enforced (from Bio.pairwise2)
2. pairwise alignment is the only matching algorithm, the prody
first choice of mapping based on residue numbers and type is
ignored
3. minimum seqid and overlap criteria are set to 0.00001, the actual
matching decision will be performed by the hungarian algorithm,
and pairwise alignment is only needed for the actual values of
seqid and overlap to create the cost matrix of the hungarian
algorithm
Remarks: prepareForHungarian needs to be set to True. Otherwise, the
ProDy matching sorts matched chains internally in decreasing order
of sequence identity, but this order is sometimes not the order of
chains in the PDB file.
Args:
reference: the unbound structure
mobile: the bound structure
subset: which matched atoms to return (calpha, bb, all ...)
Returns:
the overall match of chains from the given myTuple based on the
Bio.pairwise2 scores and possibly the hungarian algorithm
"""
matches = matchTNMAChains(reference,
mobile,
prepareForHungarian = True,
pwalign="True",
subset=subset)
# if the number of chains do not match, the behavior cannot be
# defined at this point
assert reference.numChains() == mobile.numChains()
if matches is None:
return self.doHungarianMatching(reference, mobile, subset)
elif not (reference.numChains() == mobile.numChains() == len(matches)):
return self.doHungarianMatching(reference, mobile, subset)
elif not self.isAOnetoOneMatch(matches):
return self.doHungarianMatching(reference, mobile, subset)
else:
self.matches = matches
# make overall match and return it
noMatchYet = True
for match in matches:
ref_chain = match[0]
mob_chain = match[1]
if noMatchYet:
overallRefMatch = ref_chain
overallMobMatch = mob_chain
noMatchYet = False
else:
overallRefMatch += ref_chain
overallMobMatch += mob_chain
if not noMatchYet:
overallMatch = [overallRefMatch, overallMobMatch]
else:
overallMatch = [ref_chain, mob_chain]
return overallMatch
#return [matches[1][0]+matches[0][0], matches[1][1]+matches[0][1]]
def isAOnetoOneMatch(self, matches):
""" Return False if matches does not have a one to one match for each
chain, else return True.
It is assumed that len(matches) > 1 and that the number of matches
equals the number of either chains.
Args:
matches: matches as returns by matchTNMAchains
Returns: does matches contain a 1:1 perfect matching of the bisection
"""
baseSetUnbound = set(matches[0][0].getChids())
baseSetBound = set(matches[0][1].getChids())
assert len(baseSetUnbound) == 1, 'assert len(baseSetUnbound) == 1'
assert len(baseSetBound) == 1, 'assert len(baseSetBound) == 1'
for i in range(1, len(matches)):
addonSetUnbound = set(matches[i][0].getChids())
addonSetBound = set(matches[i][1].getChids())
assert len(addonSetUnbound) == 1, 'assert len(addonSetUnbound) == 1'
assert len(addonSetBound) == 1, 'assert len(addonSetBound) == 1'
if len(baseSetUnbound.intersection(addonSetUnbound)) > 0:
return False
elif len(baseSetBound.intersection(addonSetBound)) > 0:
return False
elif len(baseSetUnbound.intersection(addonSetUnbound)) == 0:
baseSetUnbound = baseSetUnbound.union(addonSetUnbound)
elif len(baseSetBound.intersection(addonSetBound)) == 0:
baseSetBound = baseSetBound.union(addonSetBound)
else:
print "**********\n\n\n set problem in isAOnetoOneMatch(...)"
sys.exit()
return True
def doHungarianMatching(self, reference, mobile, subset):
""" Do a chain matching with the help of the Hungarian Algorithm.
Args:
reference: a structure (for instance protein) to be matched
mobile: another structure (of for instance the same protein in a different conformational state) to be matched
subset: what atoms are considered for this matching (calpha, bb, all)
Returns:
object with the overall chain matchings
"""
print "Performing matching with the help of the Hungarian Algorithm."
seqid = 0.00001
overlap = 0.00001
self.matches = matchTNMAChains(reference,
mobile,
prepareForHungarian = True,
seqid=seqid,
overlap=overlap,
pwalign="True",
subset=subset)
hungarian = Hungarian()
indices, matchesMatrix = hungarian.getHungarianIndices(
reference.numChains(),
mobile.numChains(),
self.matches)
noMatchYet = True
for element in indices:
ref_chain = (matchesMatrix[element[0]][element[1]])[0]
mob_chain = (matchesMatrix[element[0]][element[1]])[1]
if noMatchYet:
overallRefMatch = ref_chain
overallMobMatch = mob_chain
noMatchYet = False
else:
overallRefMatch += ref_chain
overallMobMatch += mob_chain
if not noMatchYet:
overallMatch = [overallRefMatch, overallMobMatch]
else:
overallMatch = [ref_chain, mob_chain]
return overallMatch
def instantiateClassFromModule(self, moduleName, className):
""" Instantiate the class "className" from the module "moduleName" and return it
Args:
moduleName: the name of the module
className: the name of the class to be instantiated
Return:
The class instantiated from the provided module
"""
try:
SomeClass = getattr(moduleName, className)
obj = SomeClass()
return obj
except StandardError, e:
print "StandardError occurred, the Configurations class could not be dynamically instantiated: ", e
print traceback.format_exc()
def equalityAssertionsOfComplexAndItsProteins(self, encounter, checkInterface=True):
""" Assert/check equality of parsed proteins and their interfaces
Args:
encounter: object with all parsed proteins
checkInterface: if true, also perform the check on the interface
"""
if self.utils.isReceptor(encounter.getReference().getTitle()):
referenceSegment = "R"
counterpartSegment = "L"
else:
referenceSegment = "L"
counterpartSegment = "R"
self.utils.checkEqualityOfProteins(encounter.getReference(), encounter.unboundComplexAligned.complex.select('segment "'+referenceSegment+'."'))
self.utils.checkEqualityOfProteins(encounter.getRefChain(), encounter.getUnboundComplexAlignedChain().select('segment "'+referenceSegment+'."'))
self.utils.checkEqualityOfProteins(encounter.getUnboundCounterpart(), encounter.unboundComplexAligned.complex.select('segment "'+counterpartSegment+'."'))
self.utils.checkEqualityOfProteins(encounter.getUnboundCounterpartChain(), encounter.getUnboundComplexAlignedChain().select('segment "'+counterpartSegment+'."'))
if checkInterface:
self.utils.checkEqualityOfProteins(encounter.getRefChainInterface(), encounter.getUnboundComplexChainInterface().select('segment "'+referenceSegment+'."'))
self.utils.checkEqualityOfProteins(encounter.getUnboundCounterpartChainInterface(), encounter.getUnboundComplexChainInterface().select('segment "'+counterpartSegment+'."'))
self.utils.checkEqualityOfProteins(encounter.getMobile(), encounter.boundComplex.complex.select('segment "'+referenceSegment+'."'))
self.utils.checkEqualityOfProteins(encounter.getMobChain(), encounter.getBoundComplexChain().select('segment "'+referenceSegment+'."'))
self.utils.checkEqualityOfProteins(encounter.getBoundCounterpart(), encounter.boundComplex.complex.select('segment "'+counterpartSegment+'."'))
self.utils.checkEqualityOfProteins(encounter.getBoundCounterpartChain(), encounter.getBoundComplexChain().select('segment "'+counterpartSegment+'."'))
if checkInterface:
self.utils.checkEqualityOfProteins(encounter.getMobChainInterface(), encounter.getBoundComplexChainInterface().select('segment "'+referenceSegment+'."'))
self.utils.checkEqualityOfProteins(encounter.getBoundCounterpartChainInterface(), encounter.getBoundComplexChainInterface().select('segment "'+counterpartSegment+'."'))
def equalityAssertionsOfComplexAndItsProteinsRoundCoords(self, encounter, checkInterface=True, roundTo=True):
""" Assert/check equality of parsed proteins and their interfaces
Args:
encounter: object with all parsed proteins
"""
if self.utils.isReceptor(encounter.getReference().getTitle()):
referenceSegment = "R"
counterpartSegment = "L"
else:
referenceSegment = "L"
counterpartSegment = "R"
self.utils.checkEqualityOfProteins(encounter.getReference(), encounter.unboundComplexAligned.complex.select('segment "'+referenceSegment+'."'), roundTo)
self.utils.checkEqualityOfProteins(encounter.getRefChain(), encounter.getUnboundComplexAlignedChain().select('segment "'+referenceSegment+'."'), roundTo)
self.utils.checkEqualityOfProteins(encounter.getUnboundCounterpart(), encounter.unboundComplexAligned.complex.select('segment "'+counterpartSegment+'."'), roundTo)
self.utils.checkEqualityOfProteins(encounter.getUnboundCounterpartChain(), encounter.getUnboundComplexAlignedChain().select('segment "'+counterpartSegment+'."'), roundTo)
if checkInterface:
self.utils.checkEqualityOfProteins(encounter.getRefChainInterface(), encounter.getUnboundComplexChainInterface().select('segment "'+referenceSegment+'."'), roundTo)
self.utils.checkEqualityOfProteins(encounter.getUnboundCounterpartChainInterface(), encounter.getUnboundComplexChainInterface().select('segment "'+counterpartSegment+'."'), roundTo)
self.utils.checkEqualityOfProteins(encounter.getMobile(), encounter.boundComplex.complex.select('segment "'+referenceSegment+'."'), roundTo)
self.utils.checkEqualityOfProteins(encounter.getMobChain(), encounter.getBoundComplexChain().select('segment "'+referenceSegment+'."'), roundTo)
self.utils.checkEqualityOfProteins(encounter.getBoundCounterpart(), encounter.boundComplex.complex.select('segment "'+counterpartSegment+'."'), roundTo)
self.utils.checkEqualityOfProteins(encounter.getBoundCounterpartChain(), encounter.getBoundComplexChain().select('segment "'+counterpartSegment+'."'), roundTo)
if checkInterface:
self.utils.checkEqualityOfProteins(encounter.getMobChainInterface(), encounter.getBoundComplexChainInterface().select('segment "'+referenceSegment+'."'), roundTo)
self.utils.checkEqualityOfProteins(encounter.getBoundCounterpartChainInterface(), encounter.getBoundComplexChainInterface().select('segment "'+counterpartSegment+'."'), roundTo)
|
{
"content_hash": "fe17f1ad886f2dde9c919ea8be45e542",
"timestamp": "",
"source": "github",
"line_count": 292,
"max_line_length": 203,
"avg_line_length": 51.62671232876713,
"alnum_prop": 0.6160530679933665,
"repo_name": "Shen-Lab/cNMA",
"id": "da86f8e6e8298b1580ac96922c85f01426123345",
"size": "15075",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Software/TNMABase.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "779570"
},
{
"name": "Shell",
"bytes": "45785"
}
],
"symlink_target": ""
}
|
"""urls for bootcamps"""
from django.urls import path, include
from rest_framework import routers
from klasses.views import BootcampViewSet
router = routers.DefaultRouter()
router.register("bootcampruns", BootcampViewSet, "bootcamp-runs")
urlpatterns = [path("api/", include(router.urls))]
|
{
"content_hash": "0e4c125f23600b2e5defd6cfefde4d3a",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 65,
"avg_line_length": 24.583333333333332,
"alnum_prop": 0.7728813559322034,
"repo_name": "mitodl/bootcamp-ecommerce",
"id": "91971d9d69a36d5c95bdf84bb0346555bf255c15",
"size": "295",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "klasses/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "325"
},
{
"name": "Dockerfile",
"bytes": "998"
},
{
"name": "HTML",
"bytes": "70605"
},
{
"name": "JavaScript",
"bytes": "491664"
},
{
"name": "Procfile",
"bytes": "293"
},
{
"name": "Python",
"bytes": "1236492"
},
{
"name": "SCSS",
"bytes": "72463"
},
{
"name": "Shell",
"bytes": "7329"
}
],
"symlink_target": ""
}
|
from django.db import models
class Snapshot(models.Model):
snapped_at = models.DateField(unique=True)
href = models.CharField(max_length=55)
completed = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Cryptocurrency(models.Model):
name = models.CharField(max_length=255)
symbol = models.CharField(max_length=20, unique=True)
slug = models.CharField(max_length=50)
added_at = models.DateTimeField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Quote(models.Model):
snapshot = models.ForeignKey(
Snapshot, on_delete=models.CASCADE, related_name='quotes')
cryptocurrency = models.ForeignKey(
Cryptocurrency, on_delete=models.CASCADE, related_name='quotes')
rank = models.IntegerField()
max_supply = models.IntegerField(null=True)
circulating_supply = models.IntegerField()
total_supply = models.IntegerField()
price = models.FloatField()
volume_24h = models.FloatField()
change_7d = models.FloatField()
market_cap = models.FloatField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
unique_together = ('snapshot', 'cryptocurrency', 'rank')
|
{
"content_hash": "47de2e7b4152d9091a77ad87ca79aa79",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 72,
"avg_line_length": 34.775,
"alnum_prop": 0.7124370956146657,
"repo_name": "Tjorriemorrie/trading",
"id": "9e3f022f54c825f10104f8ac0e3cdc32ff077451",
"size": "1391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "25_cmc/mvix/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "586"
},
{
"name": "HTML",
"bytes": "10059"
},
{
"name": "JavaScript",
"bytes": "1812"
},
{
"name": "Jupyter Notebook",
"bytes": "682876"
},
{
"name": "Less",
"bytes": "671"
},
{
"name": "M4",
"bytes": "18975"
},
{
"name": "Python",
"bytes": "636401"
},
{
"name": "Shell",
"bytes": "670"
},
{
"name": "q",
"bytes": "478327533"
}
],
"symlink_target": ""
}
|
"""
Scheme Info editor widget.
"""
from AnyQt.QtWidgets import (
QWidget, QDialog, QLabel, QTextEdit, QCheckBox, QFormLayout,
QVBoxLayout, QHBoxLayout, QDialogButtonBox, QSizePolicy
)
from AnyQt.QtCore import Qt
from ..gui.lineedit import LineEdit
from ..gui.utils import StyledWidget_paintEvent, StyledWidget
class SchemeInfoEdit(QWidget):
"""Scheme info editor widget.
"""
def __init__(self, *args, **kwargs):
QWidget.__init__(self, *args, **kwargs)
self.scheme = None
self.__setupUi()
def __setupUi(self):
layout = QFormLayout()
layout.setRowWrapPolicy(QFormLayout.WrapAllRows)
layout.setFieldGrowthPolicy(QFormLayout.ExpandingFieldsGrow)
self.name_edit = LineEdit(self)
self.name_edit.setPlaceholderText(self.tr("untitled"))
self.name_edit.setSizePolicy(QSizePolicy.Expanding,
QSizePolicy.Fixed)
self.desc_edit = QTextEdit(self)
self.desc_edit.setTabChangesFocus(True)
layout.addRow(self.tr("Name"), self.name_edit)
layout.addRow(self.tr("Description"), self.desc_edit)
self.__schemeIsUntitled = True
self.setLayout(layout)
def setScheme(self, scheme):
"""Set the scheme to display/edit
"""
self.scheme = scheme
if not scheme.title:
self.name_edit.setText(self.tr("untitled"))
self.name_edit.selectAll()
self.__schemeIsUntitled = True
else:
self.name_edit.setText(scheme.title)
self.__schemeIsUntitled = False
self.desc_edit.setPlainText(scheme.description or "")
def commit(self):
"""Commit the current contents of the editor widgets
back to the scheme.
"""
if self.__schemeIsUntitled and \
self.name_edit.text() == self.tr("untitled"):
# 'untitled' text was not changed
name = ""
else:
name = str(self.name_edit.text()).strip()
description = str(self.desc_edit.toPlainText()).strip()
self.scheme.title = name
self.scheme.description = description
def paintEvent(self, event):
return StyledWidget_paintEvent(self, event)
def title(self):
return str(self.name_edit.text()).strip()
def description(self):
return str(self.desc_edit.toPlainText()).strip()
class SchemeInfoDialog(QDialog):
def __init__(self, *args, **kwargs):
QDialog.__init__(self, *args, **kwargs)
self.scheme = None
self.__autoCommit = True
self.__setupUi()
def __setupUi(self):
layout = QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
self.editor = SchemeInfoEdit(self)
self.editor.layout().setContentsMargins(20, 20, 20, 20)
self.editor.layout().setSpacing(15)
self.editor.setSizePolicy(QSizePolicy.MinimumExpanding,
QSizePolicy.MinimumExpanding)
heading = self.tr("Workflow Info")
heading = "<h3>{0}</h3>".format(heading)
self.heading = QLabel(heading, self, objectName="heading")
# Insert heading
self.editor.layout().insertRow(0, self.heading)
self.buttonbox = QDialogButtonBox(
QDialogButtonBox.Ok | QDialogButtonBox.Cancel,
Qt.Horizontal,
self
)
# Insert button box
self.editor.layout().addRow(self.buttonbox)
widget = StyledWidget(self, objectName="auto-show-container")
check_layout = QHBoxLayout()
check_layout.setContentsMargins(20, 10, 20, 10)
self.__showAtNewSchemeCheck = \
QCheckBox(self.tr("Show when I make a New Workflow."),
self,
objectName="auto-show-check",
checked=False,
)
check_layout.addWidget(self.__showAtNewSchemeCheck)
check_layout.addWidget(
QLabel(self.tr("You can also edit Workflow Info later "
"(File -> Workflow Info)."),
self,
objectName="auto-show-info"),
alignment=Qt.AlignRight)
widget.setLayout(check_layout)
widget.setSizePolicy(QSizePolicy.MinimumExpanding,
QSizePolicy.Fixed)
if self.__autoCommit:
self.buttonbox.accepted.connect(self.editor.commit)
self.buttonbox.accepted.connect(self.accept)
self.buttonbox.rejected.connect(self.reject)
layout.addWidget(self.editor, stretch=10)
layout.addWidget(widget)
self.setLayout(layout)
def setShowAtNewScheme(self, checked):
"""
Set the 'Show at new scheme' check state.
"""
self.__showAtNewSchemeCheck.setChecked(checked)
def showAtNewScheme(self):
"""
Return the check state of the 'Show at new scheme' check box.
"""
return self.__showAtNewSchemeCheck.isChecked()
def setAutoCommit(self, auto):
if self.__autoCommit != auto:
self.__autoCommit = auto
if auto:
self.buttonbox.accepted.connect(self.editor.commit)
else:
self.buttonbox.accepted.disconnect(self.editor.commit)
def setScheme(self, scheme):
"""Set the scheme to display/edit.
"""
self.scheme = scheme
self.editor.setScheme(scheme)
|
{
"content_hash": "dc9842357a9cab8666315d6b5fd36054",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 70,
"avg_line_length": 31.69142857142857,
"alnum_prop": 0.5928597187161918,
"repo_name": "cheral/orange3",
"id": "9a63b2a99113912bde376d7112a9f8eb7e891632",
"size": "5546",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Orange/canvas/application/schemeinfo.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "20412"
},
{
"name": "C++",
"bytes": "1992"
},
{
"name": "GLSL",
"bytes": "75"
},
{
"name": "HTML",
"bytes": "3503"
},
{
"name": "JavaScript",
"bytes": "12023"
},
{
"name": "Jupyter Notebook",
"bytes": "6662"
},
{
"name": "NSIS",
"bytes": "20217"
},
{
"name": "Python",
"bytes": "4139574"
},
{
"name": "Shell",
"bytes": "47441"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/structure/dantooine/shared_dant_large_rock_hut_dest_01.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "23def84efe5b968a78c4660df65afc5f",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 93,
"avg_line_length": 24.76923076923077,
"alnum_prop": 0.6987577639751553,
"repo_name": "obi-two/Rebelion",
"id": "af478f35590eb6d0402d4b21e3e20c3eca32da8e",
"size": "467",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/static/structure/dantooine/shared_dant_large_rock_hut_dest_01.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
import numpy as np
import fabio
import re
import os
from .det2lab_xds import det2lab_xds, rotvec2mat
import h5py
from numpy.linalg import norm
def r_get_numbers(matchgroup, num):
"""A helper function which can be used similarly to fscanf(fid,'%f',num) to extract num arguments from the regex iterator"""
res = []
for i in range(num):
res.append(float(next(matchgroup).group()))
return np.array(res)
def read_XPARM(path_to_XPARM='.'):
"""Loads the instrumental geometry information from the XPARM.XDS or GXPARM.XDS files at the proposed location"""
if not os.path.exists(path_to_XPARM):
raise Exception("path " + path_to_XPARM + "does not exist")
if os.path.isdir(path_to_XPARM):
candidate = os.path.join(path_to_XPARM, 'GXPARM.XDS')
if os.path.isfile(candidate):
path_to_XPARM = candidate
else:
candidate = os.path.join(path_to_XPARM, 'XPARM.XDS')
if os.path.isfile(candidate):
path_to_XPARM = candidate
else:
raise Exception("files GXPARM.XDS and XPARM.XDS are not found in the folder " + path_to_XPARM)
with open(path_to_XPARM) as f:
f.readline() # skip header
text = f.read()
# parse the rest to numbers
f = re.compile('-?\d+\.?\d*').finditer(text)
try:
result = dict(starting_frame=r_get_numbers(f, 1),
starting_angle=r_get_numbers(f, 1),
oscillation_angle=r_get_numbers(f, 1),
rotation_axis=r_get_numbers(f, 3),
wavelength=r_get_numbers(f, 1),
wavevector=r_get_numbers(f, 3),
space_group_nr=r_get_numbers(f, 1),
cell=r_get_numbers(f, 6),
unit_cell_vectors=np.reshape(r_get_numbers(f, 9), (3, 3)),
number_of_detector_segments=r_get_numbers(f, 1),
NX=r_get_numbers(f, 1),
NY=r_get_numbers(f, 1),
pixelsize_x=r_get_numbers(f, 1),
pixelsize_y=r_get_numbers(f, 1),
x_center=r_get_numbers(f, 1),
y_center=r_get_numbers(f, 1),
distance_to_detector=r_get_numbers(f, 1),
detector_x=r_get_numbers(f, 3),
detector_y=r_get_numbers(f, 3),
detector_normal=r_get_numbers(f, 3),
detector_segment_crossection=r_get_numbers(f, 5),
detector_segment_geometry=r_get_numbers(f, 9))
except StopIteration:
raise Exception('Wrong format of the XPARM.XDS file')
# check there is nothing left
try:
next(f)
except StopIteration:
pass
else:
raise Exception('Wrong format of the XPARM.XDS file')
return result
def cov2corr(inp):
sigma = np.sqrt(np.diag(inp))
return sigma, inp / np.outer(sigma, sigma)
def air_absorption_coefficient(medium, wavelength):
"""
The function returns linear absorbtion coefficient of selected medium at
given x-ray wavelength [mm^-1]
Mass attenuation coefficients are taken from NIST "Tables of X-Ray Mass
Attenuation Coefficients and Mass Energy-Absorption Coefficients from 1
keV to 20 MeV for Elements Z = 1 to 92 and 48 Additional Substances of Dosimetric Interest
J. H. Hubbell and S. M. Seltzer
http://www.nist.gov/pml/data/xraycoef/index.cfm
"""
if medium == 'Helium':
density = 1.663e-04
# the table contains photon energy [Mev] and mass attenuation coefficient
# mu/sigma [cm^2/g]
mass_attenuation_coefficient = np.array([[1.00000e-03, 6.084e+01],
[1.50000e-03, 1.676e+01],
[2.00000e-03, 6.863e+00],
[3.00000e-03, 2.007e+00],
[4.00000e-03, 9.329e-01],
[5.00000e-03, 5.766e-01],
[6.00000e-03, 4.195e-01],
[8.00000e-03, 2.933e-01],
[1.00000e-02, 2.476e-01],
[1.50000e-02, 2.092e-01],
[2.00000e-02, 1.960e-01],
[3.00000e-02, 1.838e-01],
[4.00000e-02, 1.763e-01],
[5.00000e-02, 1.703e-01],
[6.00000e-02, 1.651e-01],
[8.00000e-02, 1.562e-01],
[1.00000e-01, 1.486e-01],
[1.50000e-01, 1.336e-01],
[2.00000e-01, 1.224e-01],
[3.00000e-01, 1.064e-01],
[4.00000e-01, 9.535e-02],
[5.00000e-01, 8.707e-02],
[6.00000e-01, 8.054e-02],
[8.00000e-01, 7.076e-02],
[1.00000e+00, 6.362e-02],
[1.25000e+00, 5.688e-02],
[1.50000e+00, 5.173e-02],
[2.00000e+00, 4.422e-02],
[3.00000e+00, 3.503e-02],
[4.00000e+00, 2.949e-02],
[5.00000e+00, 2.577e-02],
[6.00000e+00, 2.307e-02],
[8.00000e+00, 1.940e-02],
[1.00000e+01, 1.703e-02],
[1.50000e+01, 1.363e-02],
[2.00000e+01, 1.183e-02]])
elif medium == 'Air':
density = 1.205e-03
mass_attenuation_coefficient = np.array([[1.00000e-03, 3.606e+03],
[1.50000e-03, 1.191e+03],
[2.00000e-03, 5.279e+02],
[3.00000e-03, 1.625e+02],
[3.20290e-03, 1.340e+02],
[3.202900000001e-03, 1.485e+02],
[4.00000e-03, 7.788e+01],
[5.00000e-03, 4.027e+01],
[6.00000e-03, 2.341e+01],
[8.00000e-03, 9.921e+00],
[1.00000e-02, 5.120e+00],
[1.50000e-02, 1.614e+00],
[2.00000e-02, 7.779e-01],
[3.00000e-02, 3.538e-01],
[4.00000e-02, 2.485e-01],
[5.00000e-02, 2.080e-01],
[6.00000e-02, 1.875e-01],
[8.00000e-02, 1.662e-01],
[1.00000e-01, 1.541e-01],
[1.50000e-01, 1.356e-01],
[2.00000e-01, 1.233e-01],
[3.00000e-01, 1.067e-01],
[4.00000e-01, 9.549e-02],
[5.00000e-01, 8.712e-02],
[6.00000e-01, 8.055e-02],
[8.00000e-01, 7.074e-02],
[1.00000e+00, 6.358e-02],
[1.25000e+00, 5.687e-02],
[1.50000e+00, 5.175e-02],
[2.00000e+00, 4.447e-02],
[3.00000e+00, 3.581e-02],
[4.00000e+00, 3.079e-02],
[5.00000e+00, 2.751e-02],
[6.00000e+00, 2.522e-02],
[8.00000e+00, 2.225e-02],
[1.00000e+01, 2.045e-02],
[1.50000e+01, 1.810e-02],
[2.00000e+01, 1.705e-02]])
else:
raise Exception('Unknown medium ' + medium)
etw = 1.23985e-2 # [Mev*Angstroem]
photon_energy = etw / wavelength
if photon_energy < min(mass_attenuation_coefficient[:, 0]):
raise Exception('Wavelength is too large, using nearest value')
if photon_energy > max(mass_attenuation_coefficient[:, 0]):
raise Exception('Wavelength is too small, using nearest value')
# 0.1 here converts from cm^-1 to mm^-1
mu = 0.1 * density * np.interp(photon_energy,
mass_attenuation_coefficient[:, 0],
mass_attenuation_coefficient[:, 1])
return mu
def create_h5py_with_large_cache(filename, cache_size_mb):
"""
Allows to open the hdf5 file with specified cache size
"""
# h5py does not allow to control the cache size from the high level
# we employ the workaround
# sources:
#http://stackoverflow.com/questions/14653259/how-to-set-cache-settings-while-using-h5py-high-level-interface
#https://groups.google.com/forum/#!msg/h5py/RVx1ZB6LpE4/KH57vq5yw2AJ
propfaid = h5py.h5p.create(h5py.h5p.FILE_ACCESS)
settings = list(propfaid.get_cache())
settings[2] = 1024 * 1024 * cache_size_mb
propfaid.set_cache(*settings)
fid = h5py.h5f.create(bytes(filename, encoding="utf-8") , flags=h5py.h5f.ACC_EXCL, fapl=propfaid)
fin = h5py.File(fid)
return fin
def accumulate_intensity(intensity,
indices,
rebinned_data,
number_of_pixels_rebinned,
number_of_pixels,
all_in_memory):
# remove elements which indices are outside the dataset
pixels_in_range = np.logical_and(np.all(indices >= 0, axis=0),
np.all(indices < np.reshape(number_of_pixels, (3, 1)), axis=0))
intensity = intensity[pixels_in_range]
indices = indices[:, pixels_in_range]
# accumulate similar indices in a temporary in-memory array
indices_str = np.ravel_multi_index((indices[0, :], indices[1, :], indices[2, :]), number_of_pixels)
unique_ind_flatten, unique_ind_ind, n = np.unique(indices_str, return_index=True, return_inverse=True)
accumulated_intensity = np.bincount(n, weights=intensity)
no_accumulated_pixels = np.bincount(n)
# now comes the fun part
if all_in_memory:
rebinned_data[unique_ind_flatten] = rebinned_data[unique_ind_flatten] + accumulated_intensity
number_of_pixels_rebinned[unique_ind_flatten] = number_of_pixels_rebinned[
unique_ind_flatten] + no_accumulated_pixels
else:
#since h5py does not allow to add the data through high level interface, the low level interface is used instead
unique_ind = indices[:, unique_ind_ind]
old_intensity = np.zeros((unique_ind.shape[1],), dtype=rebinned_data.dtype)
old_count = np.zeros((unique_ind.shape[1],), dtype=number_of_pixels_rebinned.dtype)
fspace = h5py.h5s.create_simple(tuple(number_of_pixels))
fspace.select_elements(unique_ind.T)
mspace = h5py.h5s.create_simple(old_count.shape)
rebinned_data._id.read(mspace, fspace, old_intensity)
rebinned_data._id.write(mspace, fspace, old_intensity + accumulated_intensity)
number_of_pixels_rebinned._id.read(mspace, fspace, old_count)
number_of_pixels_rebinned._id.write(mspace, fspace, old_count + no_accumulated_pixels)
def correction_coefficients(h, instrument_parameters, medium, polarization_factor, polarization_plane_normal,
wavelength, wavevector, detector_normal):
[_, scattering_vector_mm, unit_scattering_vector] = det2lab_xds(h, 0, **instrument_parameters)
mu = air_absorption_coefficient(medium, wavelength)
air_absorption = np.exp(
-mu * np.sqrt(np.sum(scattering_vector_mm ** 2, axis=0)))
#% Polarisation
polarization_plane_normal = np.array(polarization_plane_normal)
polarization_plane_normal = polarization_plane_normal / np.linalg.norm(polarization_plane_normal) # just in case
polarization_plane_other_comp = np.cross(polarization_plane_normal, wavevector.T)
polarization_plane_other_comp = polarization_plane_other_comp / np.linalg.norm(polarization_plane_other_comp)
polarization_correction = (1 - polarization_factor) * (
1 - np.dot(polarization_plane_normal, unit_scattering_vector) ** 2) + \
polarization_factor * (
1 - np.dot(polarization_plane_other_comp, unit_scattering_vector) ** 2)
#% solid angle correction
detector_normal = detector_normal/norm(detector_normal)
solid_angle_correction = abs(np.dot(detector_normal, unit_scattering_vector) ** 3)
#corrections = solid_angle_correction.*polarization_correction.*air_absorption;
corrections = solid_angle_correction * polarization_correction * air_absorption
#if(exist('detector_efficiency_correction.mat','file'))
# load detector_efficiency_correction;
# corrections = corrections./detector_efficiency_correction(:)';
# clear detector_efficiency_correction;
#end
#
#corrections = corrections(measured_pixels)';
#
# TODO: implement detector efficiency for Pilatus
return corrections
# function [rebinned_data,number_of_pixels_rebinned,Tp,metric_tensor]=...
# reconstruct_data(filename_template,...
# last_image,...
# reconstruct_in_orthonormal_basis,...
# maxind,...
# number_of_pixels,...
# measured_pixels,...
# microsteps,...
# unit_cell_transform_matrix)
def reconstruct_data(filename_template,
first_image,
last_image,
maxind,
number_of_pixels,
reconstruct_in_orthonormal_basis=False,
measured_pixels=None,
microsteps=[1, 1, 1],
#on the angle also allows fractional values. for example 1 1 0.1 will only take every tenth frame
unit_cell_transform_matrix=np.eye(3),
polarization_plane_normal=[0, 1, 0], #default for synchrotron
polarization_factor=1, #0.5 for laboratory
medium='Air', #'Air' or 'Helium
path_to_XPARM=".",
output_filename='reconstruction.h5',
size_of_cache=100,
all_in_memory=False,
override=False,
scale=None,
keep_number_of_pixels=False):
def image_name(num):
return filename_template % num #test above
def get_image(fname):
return fabio.open(fname).data
#TODO: check mar2000 and 2300 is done properly with respect to oversaturated reflections. Check what happens in other cases too
if measured_pixels is None:
measured_pixels = get_image(image_name(1)) >= 0
#TODO: maybe add scale 'median' where scale is defined as a median of a frame divided by a median of a first frame?
if scale is None:
scale = np.ones(last_image-first_image+1)
else:
assert(len(scale)==last_image-first_image+1)
if microsteps is None:
microsteps = (1, 1, 1)
assert 3 == len(microsteps), 'Microsteps should have three values: along x, y and phi.'
incr_xy = np.array(microsteps)[0:2]
assert np.all(np.mod(incr_xy, 1) == 0), 'microsteps in x and y direction should be integer'
#TODO: microstepping is omitted in this version
assert np.all(
incr_xy == np.array([1, 1])), 'microsteps are not implemented atm' #see next section and also down there
if not np.all(incr_xy == np.array([1, 1])):
def get_image(fname):
np.kron(fabio.open(fname).data,
np.ones(incr_xy)) # TODO: remove the copypaste from the previous definition of get_image
measured_pixels = 1 == np.kron(measured_pixels, np.ones(incr_xy))
microsteps = microsteps[2]
if microsteps < 1:
image_increment = 1 / microsteps
assert (np.mod(image_increment, 1) == 0)
microsteps = 1
else:
image_increment = 1
assert (3, 3) == np.shape(unit_cell_transform_matrix)
# prepare hkl indices
h = np.mgrid[1:np.size(measured_pixels, 1) + 1, 1:np.size(measured_pixels, 0) + 1].T
h = h.reshape((int(np.size(h) / 2), 2))
h = h[np.reshape(measured_pixels, (-1)), :]
number_of_pixels = np.array(number_of_pixels)
assert len(number_of_pixels) == 3
maxind = np.array(maxind, dtype=np.float_)
assert len(maxind) == 3
step_size_inv = 1.0 * (number_of_pixels - 1) / maxind / 2
step_size = 1.0/step_size_inv
to_index = lambda c: np.around(step_size_inv[:,np.newaxis]*(c+maxind[:,np.newaxis])).astype(np.int64)
if output_filename is not None:
if os.path.exists(output_filename):
if override:
os.remove(output_filename)
else:
raise Exception('file ' + output_filename + ' already exists')
output_file = create_h5py_with_large_cache(output_filename, size_of_cache)
if all_in_memory:
rebinned_data = np.zeros(np.prod(number_of_pixels),dtype=np.float_)
number_of_pixels_rebinned = np.zeros(np.prod(number_of_pixels),dtype=np.int_)
else:
if output_filename is None:
raise Exception("output filename shoud be provided")
rebinned_data = output_file.create_dataset('rebinned_data', shape=number_of_pixels, dtype='float32',
chunks=True)
number_of_pixels_rebinned = output_file.create_dataset('number_of_pixels_rebinned', shape=number_of_pixels,
dtype='int', chunks=True)
#read_xparm
instrument_parameters = read_XPARM(path_to_XPARM)
unit_cell_vectors = instrument_parameters['unit_cell_vectors']
starting_frame = instrument_parameters['starting_frame']
starting_angle = instrument_parameters['starting_angle']
rotation_axis = instrument_parameters['rotation_axis']
wavevector = instrument_parameters['wavevector']
wavelength = instrument_parameters['wavelength']
oscillation_angle = instrument_parameters['oscillation_angle']
detector_normal = instrument_parameters['detector_normal']
#TODO: implement microstepping
#%in case of microstepping
#if exist('incr_xy','var')
# NX=NX*incr_xy(1);
# NY=NY*incr_xy(2);
# pixelsize_x=pixelsize_x/incr_xy(1);
# pixelsize_y=pixelsize_y/incr_xy(2);
# x_center=x_center*incr_xy(1);
# y_center=y_center*incr_xy(2);
#end
unit_cell_vectors = np.dot(unit_cell_transform_matrix, unit_cell_vectors)
if reconstruct_in_orthonormal_basis:
[Q, _] = np.linalg.qr(unit_cell_vectors.T)
unit_cell_vectors = Q.T
metric_tensor = np.dot(unit_cell_vectors, unit_cell_vectors.T)
[_, normalized_metric_tensor] = cov2corr(metric_tensor)
transfrom_matrix = np.linalg.cholesky(np.linalg.inv(normalized_metric_tensor))
corrections = correction_coefficients(h, instrument_parameters, medium, polarization_factor,
polarization_plane_normal, wavelength, wavevector, detector_normal)
micro_oscillation_angle = oscillation_angle / microsteps
#Calculate h for frame number 0
h_starting = det2lab_xds(h, 0, **instrument_parameters)[0]
for frame_number in np.arange(first_image, last_image+1, image_increment):
print ("reconstructing frame number %i" % frame_number)
image = get_image(image_name(frame_number))
image = image[measured_pixels]
image = image / corrections * scale[frame_number-first_image]
for m in np.arange(0, microsteps):
#Phi is with respect to phi at frame number 0
phi_minus_phi0=( (frame_number - 0.5) * microsteps + m + 0.5) * micro_oscillation_angle
h_frame = np.dot(rotvec2mat(rotation_axis, -np.deg2rad(phi_minus_phi0)), h_starting)
fractional = np.dot(unit_cell_vectors, h_frame)
del h_frame
indices = to_index(fractional)
del fractional
accumulate_intensity(image, indices, rebinned_data, number_of_pixels_rebinned, number_of_pixels,
all_in_memory)
if all_in_memory:
if output_filename is None:
result = {}
else:
result = output_file
if keep_number_of_pixels:
result["rebinned_data"] = np.reshape(rebinned_data, number_of_pixels)
result["number_of_pixels_rebinned"] = np.reshape(number_of_pixels_rebinned, number_of_pixels)
else:
rebinned_data/=number_of_pixels_rebinned
result["data"] = np.reshape(rebinned_data, number_of_pixels)
else:
result = output_file
if not keep_number_of_pixels:
data = output_file.create_dataset('data', shape=number_of_pixels, dtype='float32',
chunks=True)
for i in range(number_of_pixels[0]):
data[i,:,:]=result["rebinned_data"][i,:,:]/result["number_of_pixels_rebinned"][i,:,:]
del result['rebinned_data']
del result['number_of_pixels_rebinned']
if keep_number_of_pixels:
result['format']="Yell 0.9"
else:
result['format']="Yell 1.0"
result['space_group_nr'] = instrument_parameters['space_group_nr']
result['unit_cell'] = instrument_parameters['cell']
result['metric_tensor'] = metric_tensor
result["step_sizes"] = step_size
result["lower_limits"] = -maxind
result['is_direct'] = False
if output_filename is None:
return result
else:
result.close()
#todo: add lower limits, they are needed here
#todo: add string for file version
#todo: think of making the output nexus compatible
|
{
"content_hash": "0f57c1c550a16592d0789d52d1f62be5",
"timestamp": "",
"source": "github",
"line_count": 522,
"max_line_length": 131,
"avg_line_length": 45.05555555555556,
"alnum_prop": 0.5202602151452017,
"repo_name": "aglie/meerkat",
"id": "f20fedce910a23e2f0062ff28f4eb19f90cf996a",
"size": "23519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "meerkat/meerkat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31591"
}
],
"symlink_target": ""
}
|
import datetime
def serialize_user(user):
return {
'id': user.id,
'username': user.username,
'email': user.email,
'permission': user.permission,
'updated_at': None if user.updated_at is None else user.updated_at.isoformat(),
'created_at': user.created_at.isoformat()
}
def serialize_login_history(history):
return {
'id': history.id,
'ip_address': history.ip_address,
'agent': history.agent,
'accepted': history.accepted,
'created_at': history.created_at.isoformat()
}
|
{
"content_hash": "749806850b1cc06ab47797c4ddb12763",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 87,
"avg_line_length": 26.181818181818183,
"alnum_prop": 0.5972222222222222,
"repo_name": "h4wldev/Frest",
"id": "d88e112938c3edc050f31a4dc11570625fe9cc31",
"size": "600",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/modules/frest/serialize/user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32825"
}
],
"symlink_target": ""
}
|
import logging
from optimiz.optimizer import Optimizer
class StochasticGradientDescent(Optimizer):
def __init__(self, iteration=100, alpha=0.01):
self.__iteration = iteration
self.__alpha = alpha
def optimize(self, func, x, y, model):
if len(x) != len(y):
raise ValueError("len(x) must be equals to len(y).")
for epoch in range(self.__iteration):
bg = None
wg = None
for i in range(len(x)):
bg, wg = func(x[i], y[i])
model.weights += self.__alpha * wg
model.bias += self.__alpha * bg
if (epoch + 1) % 10 == 0:
logging.info("epoch:%s", epoch)
print "epoch:%s, params:%s, gradient: %s,%s" % (epoch, model, wg, bg)
|
{
"content_hash": "1401e324b2ef43c96822ebf580def01b",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 85,
"avg_line_length": 34.608695652173914,
"alnum_prop": 0.5175879396984925,
"repo_name": "learn-ml/ml",
"id": "049006daf0b99c3368cec2730569ed27f808ab2f",
"size": "817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "optimiz/sgd.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6970"
}
],
"symlink_target": ""
}
|
import json
if __name__ == "__main__":
with open("alt_text.json", "w") as outfile:
for i, line in enumerate(open("tweets.json")):
tweet = json.loads(line)
new_line = {"id": tweet["id_str"], "media": []}
for media in tweet["extended_entities"]["media"]:
new_line["media"].append(
{
"alt_text": media["ext_alt_text"],
"display_url": media["display_url"],
"media_url": media["media_url_https"],
}
)
outfile.write(json.dumps(new_line) + "\n")
|
{
"content_hash": "26adbc9569ca480aa5b7de8ed4661f20",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 62,
"avg_line_length": 34.05263157894737,
"alnum_prop": 0.4327666151468315,
"repo_name": "alexwlchan/alexwlchan.net",
"id": "c0725a5cb7c072a7dd572240d6e634dd0291e477",
"size": "671",
"binary": false,
"copies": "1",
"ref": "refs/heads/live",
"path": "src/_files/2022/extract_alt_text.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "805"
},
{
"name": "HTML",
"bytes": "23475"
},
{
"name": "JavaScript",
"bytes": "9984"
},
{
"name": "Makefile",
"bytes": "2024"
},
{
"name": "Python",
"bytes": "59072"
},
{
"name": "Ruby",
"bytes": "60319"
},
{
"name": "SCSS",
"bytes": "27176"
},
{
"name": "Scala",
"bytes": "6364"
},
{
"name": "Shell",
"bytes": "1425"
},
{
"name": "TeX",
"bytes": "26743"
}
],
"symlink_target": ""
}
|
import os
from leela.client import event
class Load(object):
def measure(self):
(l1, l5, l15) = os.getloadavg()
return([event.Event("loadavg.1", l1),
event.Event("loadavg.5", l5),
event.Event("loadavg.15", l15)
])
|
{
"content_hash": "a95ebb4ab0e486c4ac940518291b0cb7",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 46,
"avg_line_length": 25.727272727272727,
"alnum_prop": 0.5300353356890459,
"repo_name": "locaweb/leela-client",
"id": "d3d7d4237c761c94e398c871691133a1153c3b87",
"size": "925",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/leela/client/sensors/linux/load.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Emacs Lisp",
"bytes": "142"
},
{
"name": "HTML",
"bytes": "4196"
},
{
"name": "JavaScript",
"bytes": "15087"
},
{
"name": "Python",
"bytes": "44481"
},
{
"name": "Ruby",
"bytes": "16235"
},
{
"name": "Shell",
"bytes": "3906"
}
],
"symlink_target": ""
}
|
def join_outer(self,
right,
left_on,
right_on=None):
"""
join_outer performs outer join operation on one or two frames, creating a new frame.
Parameters
----------
:param right: (Frame) Another frame to join with
:param left_on: (List[str]) Names of the columns in the left frame used to match up the two frames.
:param right_on: (Optional[List[str]]) Names of the columns in the right frame used to match up the two frames. Default is the same as the left frame.
:returns: (Frame) A new frame with the results of the join
Create a new frame from a SQL JOIN operation with another frame.
The frame on the 'left' is the currently active frame.
The frame on the 'right' is another frame.
This method take column(s) in the left frame and matches its values
with column(s) in the right frame.
The 'outer' join provides a frame with data from both frames where
the left and right frames did not have the same value in the matching
column(s).
Notes
-----
When a column is named the same in both frames, it will result in two
columns in the new frame.
The column from the *left* frame (originally the current frame) will be
copied and the column name will have the string "_L" added to it.
The same thing will happen with the column from the *right* frame,
except its name has the string "_R" appended. The order of columns
after this method is called is not guaranteed.
It is recommended that you rename the columns to meaningful terms prior
to using the ``join`` method.
Examples
--------
<hide>
>>> codes = tc.frame.create([[1], [3], [1], [0], [2], [1], [5], [3]], [('numbers', int)])
-etc-
>>> colors = tc.frame.create([[1, 'red'], [2, 'yellow'], [3, 'green'], [4, 'blue']], [('numbers', int), ('color', str)])
-etc-
>>> country_code_rows = [[1, 354, "a"],[2, 91, "a"],[2, 100, "b"],[3, 47, "a"],[4, 968, "c"],[5, 50, "c"]]
>>> country_code_schema = [("country_code", int),("area_code", int),("test_str",str)]
-etc-
>>> country_name_rows = [[1, "Iceland", "a"],[1, "Ice-land", "a"],[2, "India", "b"],[3, "Norway", "a"],[4, "Oman", "c"],[6, "Germany", "c"]]
>>> country_names_schema = [("country_code", int),("country_name", str),("test_str",str)]
-etc-
>>> country_codes_frame = tc.frame.create(country_code_rows, country_code_schema)
-etc-
>>> country_names_frame= tc.frame.create(country_name_rows, country_names_schema)
-etc-
</hide>
Consider two frames: codes and colors
>>> codes.inspect()
[#] numbers
============
[0] 1
[1] 3
[2] 1
[3] 0
[4] 2
[5] 1
[6] 5
[7] 3
>>> colors.inspect()
[#] numbers color
====================
[0] 1 red
[1] 2 yellow
[2] 3 green
[3] 4 blue
Join them on the 'numbers' column ('inner' join by default)
>>> j_outer = codes.join_outer(colors, 'numbers')
<progress>
>>> j_outer.inspect()
[#] numbers_L color
======================
[0] 0 None
[1] 1 red
[2] 1 red
[3] 1 red
[4] 2 yellow
[5] 3 green
[6] 3 green
[7] 4 blue
[8] 5 None
(The join adds an extra column *_R which is the join column from the right frame; it may be disregarded)
Consider two frames: country_codes_frame and country_names_frame
>>> country_codes_frame.inspect()
[#] country_code area_code test_str
======================================
[0] 1 354 a
[1] 2 91 a
[2] 2 100 b
[3] 3 47 a
[4] 4 968 c
[5] 5 50 c
>>> country_names_frame.inspect()
[#] country_code country_name test_str
=========================================
[0] 1 Iceland a
[1] 1 Ice-land a
[2] 2 India b
[3] 3 Norway a
[4] 4 Oman c
[5] 6 Germany c
Join them on the 'country_code' and 'test_str' columns ('inner' join by default)
>>> composite_join_outer = country_codes_frame.join_outer(country_names_frame, ['country_code', 'test_str'])
<progress>
>>> composite_join_outer.inspect()
[#] country_code_L area_code test_str_L country_name
========================================================
[0] 6 None c Germany
[1] 1 354 a Iceland
[2] 1 354 a Ice-land
[3] 2 91 a None
[4] 2 100 b India
[5] 3 47 a Norway
[6] 4 968 c Oman
[7] 5 50 c None
"""
if left_on is None:
raise ValueError("Please provide column name on which join should be performed")
elif isinstance(left_on, basestring):
left_on = [left_on]
if right_on is None:
right_on = left_on
elif isinstance(right_on, basestring):
right_on = [right_on]
if len(left_on) != len(right_on):
raise ValueError("Please provide equal number of join columns")
return self._tc.frame.create(self._scala.joinOuter(right._scala,
self._tc.jutils.convert.to_scala_list_string(left_on),
self._tc.jutils.convert.to_scala_option(
self._tc.jutils.convert.to_scala_list_string(right_on))))
|
{
"content_hash": "46e2bb320cc579619ed63de07e713a8c",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 154,
"avg_line_length": 35.36309523809524,
"alnum_prop": 0.49183639117993605,
"repo_name": "shibanis1/spark-tk",
"id": "1aceb20e8b4d3a8ac103e94fb866b2083191f965",
"size": "5941",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/sparktk/frame/ops/join_outer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "477533"
},
{
"name": "Scala",
"bytes": "795488"
},
{
"name": "Shell",
"bytes": "8209"
}
],
"symlink_target": ""
}
|
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class yc_config_openconfig_sampling__sampling_sflow_config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-sampling - based on the path /sampling/sflow/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration data for global sFlow.
"""
__slots__ = ('_path_helper', '_extmethods', '__enabled','__agent_id_ipv4','__agent_id_ipv6','__dscp','__sample_size','__polling_interval','__ingress_sampling_rate','__egress_sampling_rate',)
_yang_name = 'config'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__enabled = YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='boolean', is_config=True)
self.__agent_id_ipv4 = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\.([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])){3}'}), is_leaf=True, yang_name="agent-id-ipv4", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:ipv4-address', is_config=True)
self.__agent_id_ipv6 = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:))'}), is_leaf=True, yang_name="agent-id-ipv6", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:ipv6-address', is_config=True)
self.__dscp = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['0..63']}), is_leaf=True, yang_name="dscp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:dscp', is_config=True)
self.__sample_size = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16)(128), is_leaf=True, yang_name="sample-size", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint16', is_config=True)
self.__polling_interval = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="polling-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint16', is_config=True)
self.__ingress_sampling_rate = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ingress-sampling-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint32', is_config=True)
self.__egress_sampling_rate = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="egress-sampling-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint32', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['sampling', 'sflow', 'config']
def _get_enabled(self):
"""
Getter method for enabled, mapped from YANG variable /sampling/sflow/config/enabled (boolean)
YANG Description: Enables or disables sFlow sampling for the device.
"""
return self.__enabled
def _set_enabled(self, v, load=False):
"""
Setter method for enabled, mapped from YANG variable /sampling/sflow/config/enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled() directly.
YANG Description: Enables or disables sFlow sampling for the device.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """enabled must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='boolean', is_config=True)""",
})
self.__enabled = t
if hasattr(self, '_set'):
self._set()
def _unset_enabled(self):
self.__enabled = YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='boolean', is_config=True)
def _get_agent_id_ipv4(self):
"""
Getter method for agent_id_ipv4, mapped from YANG variable /sampling/sflow/config/agent_id_ipv4 (oc-inet:ipv4-address)
YANG Description: Sets the agent identifier for IPv4 PDUs.
"""
return self.__agent_id_ipv4
def _set_agent_id_ipv4(self, v, load=False):
"""
Setter method for agent_id_ipv4, mapped from YANG variable /sampling/sflow/config/agent_id_ipv4 (oc-inet:ipv4-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_agent_id_ipv4 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_agent_id_ipv4() directly.
YANG Description: Sets the agent identifier for IPv4 PDUs.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\.([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])){3}'}), is_leaf=True, yang_name="agent-id-ipv4", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:ipv4-address', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """agent_id_ipv4 must be of a type compatible with oc-inet:ipv4-address""",
'defined-type': "oc-inet:ipv4-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\.([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])){3}'}), is_leaf=True, yang_name="agent-id-ipv4", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:ipv4-address', is_config=True)""",
})
self.__agent_id_ipv4 = t
if hasattr(self, '_set'):
self._set()
def _unset_agent_id_ipv4(self):
self.__agent_id_ipv4 = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\.([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])){3}'}), is_leaf=True, yang_name="agent-id-ipv4", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:ipv4-address', is_config=True)
def _get_agent_id_ipv6(self):
"""
Getter method for agent_id_ipv6, mapped from YANG variable /sampling/sflow/config/agent_id_ipv6 (oc-inet:ipv6-address)
YANG Description: Sets the agent identifier for IPv6 PDUs.
"""
return self.__agent_id_ipv6
def _set_agent_id_ipv6(self, v, load=False):
"""
Setter method for agent_id_ipv6, mapped from YANG variable /sampling/sflow/config/agent_id_ipv6 (oc-inet:ipv6-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_agent_id_ipv6 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_agent_id_ipv6() directly.
YANG Description: Sets the agent identifier for IPv6 PDUs.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:))'}), is_leaf=True, yang_name="agent-id-ipv6", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:ipv6-address', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """agent_id_ipv6 must be of a type compatible with oc-inet:ipv6-address""",
'defined-type': "oc-inet:ipv6-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:))'}), is_leaf=True, yang_name="agent-id-ipv6", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:ipv6-address', is_config=True)""",
})
self.__agent_id_ipv6 = t
if hasattr(self, '_set'):
self._set()
def _unset_agent_id_ipv6(self):
self.__agent_id_ipv6 = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:))'}), is_leaf=True, yang_name="agent-id-ipv6", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:ipv6-address', is_config=True)
def _get_dscp(self):
"""
Getter method for dscp, mapped from YANG variable /sampling/sflow/config/dscp (oc-inet:dscp)
YANG Description: DSCP marking of packets generated by the sFlow subsystem
on the network device.
"""
return self.__dscp
def _set_dscp(self, v, load=False):
"""
Setter method for dscp, mapped from YANG variable /sampling/sflow/config/dscp (oc-inet:dscp)
If this variable is read-only (config: false) in the
source YANG file, then _set_dscp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dscp() directly.
YANG Description: DSCP marking of packets generated by the sFlow subsystem
on the network device.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['0..63']}), is_leaf=True, yang_name="dscp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:dscp', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """dscp must be of a type compatible with oc-inet:dscp""",
'defined-type': "oc-inet:dscp",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['0..63']}), is_leaf=True, yang_name="dscp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:dscp', is_config=True)""",
})
self.__dscp = t
if hasattr(self, '_set'):
self._set()
def _unset_dscp(self):
self.__dscp = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['0..63']}), is_leaf=True, yang_name="dscp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:dscp', is_config=True)
def _get_sample_size(self):
"""
Getter method for sample_size, mapped from YANG variable /sampling/sflow/config/sample_size (uint16)
YANG Description: Sets the maximum number of bytes to be copied from a
sampled packet.
"""
return self.__sample_size
def _set_sample_size(self, v, load=False):
"""
Setter method for sample_size, mapped from YANG variable /sampling/sflow/config/sample_size (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_sample_size is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sample_size() directly.
YANG Description: Sets the maximum number of bytes to be copied from a
sampled packet.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16)(128), is_leaf=True, yang_name="sample-size", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint16', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """sample_size must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16)(128), is_leaf=True, yang_name="sample-size", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint16', is_config=True)""",
})
self.__sample_size = t
if hasattr(self, '_set'):
self._set()
def _unset_sample_size(self):
self.__sample_size = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16)(128), is_leaf=True, yang_name="sample-size", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint16', is_config=True)
def _get_polling_interval(self):
"""
Getter method for polling_interval, mapped from YANG variable /sampling/sflow/config/polling_interval (uint16)
YANG Description: Sets the traffic sampling polling interval.
"""
return self.__polling_interval
def _set_polling_interval(self, v, load=False):
"""
Setter method for polling_interval, mapped from YANG variable /sampling/sflow/config/polling_interval (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_polling_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_polling_interval() directly.
YANG Description: Sets the traffic sampling polling interval.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="polling-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint16', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """polling_interval must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="polling-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint16', is_config=True)""",
})
self.__polling_interval = t
if hasattr(self, '_set'):
self._set()
def _unset_polling_interval(self):
self.__polling_interval = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="polling-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint16', is_config=True)
def _get_ingress_sampling_rate(self):
"""
Getter method for ingress_sampling_rate, mapped from YANG variable /sampling/sflow/config/ingress_sampling_rate (uint32)
YANG Description: Sets the ingress packet sampling rate. The rate is expressed
as an integer N, where the intended sampling rate is 1/N
packets. An implementation may implement the sampling rate as
a statistical average, rather than a strict periodic sampling.
The allowable sampling rate range is generally a property of
the system, e.g., determined by the capability of the
hardware.
"""
return self.__ingress_sampling_rate
def _set_ingress_sampling_rate(self, v, load=False):
"""
Setter method for ingress_sampling_rate, mapped from YANG variable /sampling/sflow/config/ingress_sampling_rate (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_ingress_sampling_rate is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ingress_sampling_rate() directly.
YANG Description: Sets the ingress packet sampling rate. The rate is expressed
as an integer N, where the intended sampling rate is 1/N
packets. An implementation may implement the sampling rate as
a statistical average, rather than a strict periodic sampling.
The allowable sampling rate range is generally a property of
the system, e.g., determined by the capability of the
hardware.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ingress-sampling-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ingress_sampling_rate must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ingress-sampling-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint32', is_config=True)""",
})
self.__ingress_sampling_rate = t
if hasattr(self, '_set'):
self._set()
def _unset_ingress_sampling_rate(self):
self.__ingress_sampling_rate = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ingress-sampling-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint32', is_config=True)
def _get_egress_sampling_rate(self):
"""
Getter method for egress_sampling_rate, mapped from YANG variable /sampling/sflow/config/egress_sampling_rate (uint32)
YANG Description: Sets the egress packet sampling rate. The rate is expressed
as an integer N, where the intended sampling rate is 1/N
packets. An implementation may implement the sampling rate as
a statistical average, rather than a strict periodic sampling.
The allowable sampling rate range is generally a property of
the system, e.g., determined by the capability of the
hardware.
"""
return self.__egress_sampling_rate
def _set_egress_sampling_rate(self, v, load=False):
"""
Setter method for egress_sampling_rate, mapped from YANG variable /sampling/sflow/config/egress_sampling_rate (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_egress_sampling_rate is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_egress_sampling_rate() directly.
YANG Description: Sets the egress packet sampling rate. The rate is expressed
as an integer N, where the intended sampling rate is 1/N
packets. An implementation may implement the sampling rate as
a statistical average, rather than a strict periodic sampling.
The allowable sampling rate range is generally a property of
the system, e.g., determined by the capability of the
hardware.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="egress-sampling-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """egress_sampling_rate must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="egress-sampling-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint32', is_config=True)""",
})
self.__egress_sampling_rate = t
if hasattr(self, '_set'):
self._set()
def _unset_egress_sampling_rate(self):
self.__egress_sampling_rate = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="egress-sampling-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint32', is_config=True)
enabled = __builtin__.property(_get_enabled, _set_enabled)
agent_id_ipv4 = __builtin__.property(_get_agent_id_ipv4, _set_agent_id_ipv4)
agent_id_ipv6 = __builtin__.property(_get_agent_id_ipv6, _set_agent_id_ipv6)
dscp = __builtin__.property(_get_dscp, _set_dscp)
sample_size = __builtin__.property(_get_sample_size, _set_sample_size)
polling_interval = __builtin__.property(_get_polling_interval, _set_polling_interval)
ingress_sampling_rate = __builtin__.property(_get_ingress_sampling_rate, _set_ingress_sampling_rate)
egress_sampling_rate = __builtin__.property(_get_egress_sampling_rate, _set_egress_sampling_rate)
_pyangbind_elements = OrderedDict([('enabled', enabled), ('agent_id_ipv4', agent_id_ipv4), ('agent_id_ipv6', agent_id_ipv6), ('dscp', dscp), ('sample_size', sample_size), ('polling_interval', polling_interval), ('ingress_sampling_rate', ingress_sampling_rate), ('egress_sampling_rate', egress_sampling_rate), ])
class yc_state_openconfig_sampling__sampling_sflow_state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-sampling - based on the path /sampling/sflow/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Operational state data for global sFlow.
"""
__slots__ = ('_path_helper', '_extmethods', '__enabled','__agent_id_ipv4','__agent_id_ipv6','__dscp','__sample_size','__polling_interval','__ingress_sampling_rate','__egress_sampling_rate',)
_yang_name = 'state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__enabled = YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='boolean', is_config=False)
self.__agent_id_ipv4 = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\.([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])){3}'}), is_leaf=True, yang_name="agent-id-ipv4", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:ipv4-address', is_config=False)
self.__agent_id_ipv6 = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:))'}), is_leaf=True, yang_name="agent-id-ipv6", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:ipv6-address', is_config=False)
self.__dscp = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['0..63']}), is_leaf=True, yang_name="dscp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:dscp', is_config=False)
self.__sample_size = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16)(128), is_leaf=True, yang_name="sample-size", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint16', is_config=False)
self.__polling_interval = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="polling-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint16', is_config=False)
self.__ingress_sampling_rate = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ingress-sampling-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint32', is_config=False)
self.__egress_sampling_rate = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="egress-sampling-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint32', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['sampling', 'sflow', 'state']
def _get_enabled(self):
"""
Getter method for enabled, mapped from YANG variable /sampling/sflow/state/enabled (boolean)
YANG Description: Enables or disables sFlow sampling for the device.
"""
return self.__enabled
def _set_enabled(self, v, load=False):
"""
Setter method for enabled, mapped from YANG variable /sampling/sflow/state/enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled() directly.
YANG Description: Enables or disables sFlow sampling for the device.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """enabled must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='boolean', is_config=False)""",
})
self.__enabled = t
if hasattr(self, '_set'):
self._set()
def _unset_enabled(self):
self.__enabled = YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='boolean', is_config=False)
def _get_agent_id_ipv4(self):
"""
Getter method for agent_id_ipv4, mapped from YANG variable /sampling/sflow/state/agent_id_ipv4 (oc-inet:ipv4-address)
YANG Description: Sets the agent identifier for IPv4 PDUs.
"""
return self.__agent_id_ipv4
def _set_agent_id_ipv4(self, v, load=False):
"""
Setter method for agent_id_ipv4, mapped from YANG variable /sampling/sflow/state/agent_id_ipv4 (oc-inet:ipv4-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_agent_id_ipv4 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_agent_id_ipv4() directly.
YANG Description: Sets the agent identifier for IPv4 PDUs.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\.([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])){3}'}), is_leaf=True, yang_name="agent-id-ipv4", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:ipv4-address', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """agent_id_ipv4 must be of a type compatible with oc-inet:ipv4-address""",
'defined-type': "oc-inet:ipv4-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\.([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])){3}'}), is_leaf=True, yang_name="agent-id-ipv4", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:ipv4-address', is_config=False)""",
})
self.__agent_id_ipv4 = t
if hasattr(self, '_set'):
self._set()
def _unset_agent_id_ipv4(self):
self.__agent_id_ipv4 = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\.([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])){3}'}), is_leaf=True, yang_name="agent-id-ipv4", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:ipv4-address', is_config=False)
def _get_agent_id_ipv6(self):
"""
Getter method for agent_id_ipv6, mapped from YANG variable /sampling/sflow/state/agent_id_ipv6 (oc-inet:ipv6-address)
YANG Description: Sets the agent identifier for IPv6 PDUs.
"""
return self.__agent_id_ipv6
def _set_agent_id_ipv6(self, v, load=False):
"""
Setter method for agent_id_ipv6, mapped from YANG variable /sampling/sflow/state/agent_id_ipv6 (oc-inet:ipv6-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_agent_id_ipv6 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_agent_id_ipv6() directly.
YANG Description: Sets the agent identifier for IPv6 PDUs.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:))'}), is_leaf=True, yang_name="agent-id-ipv6", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:ipv6-address', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """agent_id_ipv6 must be of a type compatible with oc-inet:ipv6-address""",
'defined-type': "oc-inet:ipv6-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:))'}), is_leaf=True, yang_name="agent-id-ipv6", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:ipv6-address', is_config=False)""",
})
self.__agent_id_ipv6 = t
if hasattr(self, '_set'):
self._set()
def _unset_agent_id_ipv6(self):
self.__agent_id_ipv6 = YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:))'}), is_leaf=True, yang_name="agent-id-ipv6", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:ipv6-address', is_config=False)
def _get_dscp(self):
"""
Getter method for dscp, mapped from YANG variable /sampling/sflow/state/dscp (oc-inet:dscp)
YANG Description: DSCP marking of packets generated by the sFlow subsystem
on the network device.
"""
return self.__dscp
def _set_dscp(self, v, load=False):
"""
Setter method for dscp, mapped from YANG variable /sampling/sflow/state/dscp (oc-inet:dscp)
If this variable is read-only (config: false) in the
source YANG file, then _set_dscp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dscp() directly.
YANG Description: DSCP marking of packets generated by the sFlow subsystem
on the network device.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['0..63']}), is_leaf=True, yang_name="dscp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:dscp', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """dscp must be of a type compatible with oc-inet:dscp""",
'defined-type': "oc-inet:dscp",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['0..63']}), is_leaf=True, yang_name="dscp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:dscp', is_config=False)""",
})
self.__dscp = t
if hasattr(self, '_set'):
self._set()
def _unset_dscp(self):
self.__dscp = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['0..63']}), is_leaf=True, yang_name="dscp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:dscp', is_config=False)
def _get_sample_size(self):
"""
Getter method for sample_size, mapped from YANG variable /sampling/sflow/state/sample_size (uint16)
YANG Description: Sets the maximum number of bytes to be copied from a
sampled packet.
"""
return self.__sample_size
def _set_sample_size(self, v, load=False):
"""
Setter method for sample_size, mapped from YANG variable /sampling/sflow/state/sample_size (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_sample_size is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sample_size() directly.
YANG Description: Sets the maximum number of bytes to be copied from a
sampled packet.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16)(128), is_leaf=True, yang_name="sample-size", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """sample_size must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16)(128), is_leaf=True, yang_name="sample-size", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint16', is_config=False)""",
})
self.__sample_size = t
if hasattr(self, '_set'):
self._set()
def _unset_sample_size(self):
self.__sample_size = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16)(128), is_leaf=True, yang_name="sample-size", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint16', is_config=False)
def _get_polling_interval(self):
"""
Getter method for polling_interval, mapped from YANG variable /sampling/sflow/state/polling_interval (uint16)
YANG Description: Sets the traffic sampling polling interval.
"""
return self.__polling_interval
def _set_polling_interval(self, v, load=False):
"""
Setter method for polling_interval, mapped from YANG variable /sampling/sflow/state/polling_interval (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_polling_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_polling_interval() directly.
YANG Description: Sets the traffic sampling polling interval.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="polling-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """polling_interval must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="polling-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint16', is_config=False)""",
})
self.__polling_interval = t
if hasattr(self, '_set'):
self._set()
def _unset_polling_interval(self):
self.__polling_interval = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="polling-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint16', is_config=False)
def _get_ingress_sampling_rate(self):
"""
Getter method for ingress_sampling_rate, mapped from YANG variable /sampling/sflow/state/ingress_sampling_rate (uint32)
YANG Description: Sets the ingress packet sampling rate. The rate is expressed
as an integer N, where the intended sampling rate is 1/N
packets. An implementation may implement the sampling rate as
a statistical average, rather than a strict periodic sampling.
The allowable sampling rate range is generally a property of
the system, e.g., determined by the capability of the
hardware.
"""
return self.__ingress_sampling_rate
def _set_ingress_sampling_rate(self, v, load=False):
"""
Setter method for ingress_sampling_rate, mapped from YANG variable /sampling/sflow/state/ingress_sampling_rate (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_ingress_sampling_rate is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ingress_sampling_rate() directly.
YANG Description: Sets the ingress packet sampling rate. The rate is expressed
as an integer N, where the intended sampling rate is 1/N
packets. An implementation may implement the sampling rate as
a statistical average, rather than a strict periodic sampling.
The allowable sampling rate range is generally a property of
the system, e.g., determined by the capability of the
hardware.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ingress-sampling-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ingress_sampling_rate must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ingress-sampling-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint32', is_config=False)""",
})
self.__ingress_sampling_rate = t
if hasattr(self, '_set'):
self._set()
def _unset_ingress_sampling_rate(self):
self.__ingress_sampling_rate = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ingress-sampling-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint32', is_config=False)
def _get_egress_sampling_rate(self):
"""
Getter method for egress_sampling_rate, mapped from YANG variable /sampling/sflow/state/egress_sampling_rate (uint32)
YANG Description: Sets the egress packet sampling rate. The rate is expressed
as an integer N, where the intended sampling rate is 1/N
packets. An implementation may implement the sampling rate as
a statistical average, rather than a strict periodic sampling.
The allowable sampling rate range is generally a property of
the system, e.g., determined by the capability of the
hardware.
"""
return self.__egress_sampling_rate
def _set_egress_sampling_rate(self, v, load=False):
"""
Setter method for egress_sampling_rate, mapped from YANG variable /sampling/sflow/state/egress_sampling_rate (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_egress_sampling_rate is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_egress_sampling_rate() directly.
YANG Description: Sets the egress packet sampling rate. The rate is expressed
as an integer N, where the intended sampling rate is 1/N
packets. An implementation may implement the sampling rate as
a statistical average, rather than a strict periodic sampling.
The allowable sampling rate range is generally a property of
the system, e.g., determined by the capability of the
hardware.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="egress-sampling-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """egress_sampling_rate must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="egress-sampling-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint32', is_config=False)""",
})
self.__egress_sampling_rate = t
if hasattr(self, '_set'):
self._set()
def _unset_egress_sampling_rate(self):
self.__egress_sampling_rate = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="egress-sampling-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint32', is_config=False)
enabled = __builtin__.property(_get_enabled)
agent_id_ipv4 = __builtin__.property(_get_agent_id_ipv4)
agent_id_ipv6 = __builtin__.property(_get_agent_id_ipv6)
dscp = __builtin__.property(_get_dscp)
sample_size = __builtin__.property(_get_sample_size)
polling_interval = __builtin__.property(_get_polling_interval)
ingress_sampling_rate = __builtin__.property(_get_ingress_sampling_rate)
egress_sampling_rate = __builtin__.property(_get_egress_sampling_rate)
_pyangbind_elements = OrderedDict([('enabled', enabled), ('agent_id_ipv4', agent_id_ipv4), ('agent_id_ipv6', agent_id_ipv6), ('dscp', dscp), ('sample_size', sample_size), ('polling_interval', polling_interval), ('ingress_sampling_rate', ingress_sampling_rate), ('egress_sampling_rate', egress_sampling_rate), ])
class yc_config_openconfig_sampling__sampling_sflow_collectors_collector_config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-sampling - based on the path /sampling/sflow/collectors/collector/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration data for sFlow collectors.
"""
__slots__ = ('_path_helper', '_extmethods', '__address','__port','__source_address','__network_instance',)
_yang_name = 'config'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__address = YANGDynClass(base=[RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\.([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])){3}'}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:))'}),], is_leaf=True, yang_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:ip-address', is_config=True)
self.__port = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16)(6343), is_leaf=True, yang_name="port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:port-number', is_config=True)
self.__source_address = YANGDynClass(base=[RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\.([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])){3}'}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:))'}),], is_leaf=True, yang_name="source-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:ip-address', is_config=True)
self.__network_instance = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="network-instance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-netinst:network-instance-ref', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['sampling', 'sflow', 'collectors', 'collector', 'config']
def _get_address(self):
"""
Getter method for address, mapped from YANG variable /sampling/sflow/collectors/collector/config/address (oc-inet:ip-address)
YANG Description: IPv4/IPv6 address of the sFlow collector.
"""
return self.__address
def _set_address(self, v, load=False):
"""
Setter method for address, mapped from YANG variable /sampling/sflow/collectors/collector/config/address (oc-inet:ip-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_address() directly.
YANG Description: IPv4/IPv6 address of the sFlow collector.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=[RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\.([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])){3}'}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:))'}),], is_leaf=True, yang_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:ip-address', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """address must be of a type compatible with oc-inet:ip-address""",
'defined-type': "oc-inet:ip-address",
'generated-type': """YANGDynClass(base=[RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\.([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])){3}'}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:))'}),], is_leaf=True, yang_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:ip-address', is_config=True)""",
})
self.__address = t
if hasattr(self, '_set'):
self._set()
def _unset_address(self):
self.__address = YANGDynClass(base=[RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\.([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])){3}'}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:))'}),], is_leaf=True, yang_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:ip-address', is_config=True)
def _get_port(self):
"""
Getter method for port, mapped from YANG variable /sampling/sflow/collectors/collector/config/port (oc-inet:port-number)
YANG Description: UDP port number for the sFlow collector.
"""
return self.__port
def _set_port(self, v, load=False):
"""
Setter method for port, mapped from YANG variable /sampling/sflow/collectors/collector/config/port (oc-inet:port-number)
If this variable is read-only (config: false) in the
source YANG file, then _set_port is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_port() directly.
YANG Description: UDP port number for the sFlow collector.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16)(6343), is_leaf=True, yang_name="port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:port-number', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """port must be of a type compatible with oc-inet:port-number""",
'defined-type': "oc-inet:port-number",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16)(6343), is_leaf=True, yang_name="port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:port-number', is_config=True)""",
})
self.__port = t
if hasattr(self, '_set'):
self._set()
def _unset_port(self):
self.__port = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16)(6343), is_leaf=True, yang_name="port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:port-number', is_config=True)
def _get_source_address(self):
"""
Getter method for source_address, mapped from YANG variable /sampling/sflow/collectors/collector/config/source_address (oc-inet:ip-address)
YANG Description: Sets the source IPv4/IPv6 address for sFlow datagrams sent
to sFlow collectors.
"""
return self.__source_address
def _set_source_address(self, v, load=False):
"""
Setter method for source_address, mapped from YANG variable /sampling/sflow/collectors/collector/config/source_address (oc-inet:ip-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_source_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_source_address() directly.
YANG Description: Sets the source IPv4/IPv6 address for sFlow datagrams sent
to sFlow collectors.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=[RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\.([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])){3}'}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:))'}),], is_leaf=True, yang_name="source-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:ip-address', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """source_address must be of a type compatible with oc-inet:ip-address""",
'defined-type': "oc-inet:ip-address",
'generated-type': """YANGDynClass(base=[RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\.([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])){3}'}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:))'}),], is_leaf=True, yang_name="source-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:ip-address', is_config=True)""",
})
self.__source_address = t
if hasattr(self, '_set'):
self._set()
def _unset_source_address(self):
self.__source_address = YANGDynClass(base=[RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\.([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])){3}'}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:))'}),], is_leaf=True, yang_name="source-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:ip-address', is_config=True)
def _get_network_instance(self):
"""
Getter method for network_instance, mapped from YANG variable /sampling/sflow/collectors/collector/config/network_instance (oc-netinst:network-instance-ref)
YANG Description: Reference to the network instance used to reach the
sFlow collector. If uspecified, the collector destination
is reachable in the default network instance.
"""
return self.__network_instance
def _set_network_instance(self, v, load=False):
"""
Setter method for network_instance, mapped from YANG variable /sampling/sflow/collectors/collector/config/network_instance (oc-netinst:network-instance-ref)
If this variable is read-only (config: false) in the
source YANG file, then _set_network_instance is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_network_instance() directly.
YANG Description: Reference to the network instance used to reach the
sFlow collector. If uspecified, the collector destination
is reachable in the default network instance.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="network-instance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-netinst:network-instance-ref', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """network_instance must be of a type compatible with oc-netinst:network-instance-ref""",
'defined-type': "oc-netinst:network-instance-ref",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="network-instance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-netinst:network-instance-ref', is_config=True)""",
})
self.__network_instance = t
if hasattr(self, '_set'):
self._set()
def _unset_network_instance(self):
self.__network_instance = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="network-instance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-netinst:network-instance-ref', is_config=True)
address = __builtin__.property(_get_address, _set_address)
port = __builtin__.property(_get_port, _set_port)
source_address = __builtin__.property(_get_source_address, _set_source_address)
network_instance = __builtin__.property(_get_network_instance, _set_network_instance)
_pyangbind_elements = OrderedDict([('address', address), ('port', port), ('source_address', source_address), ('network_instance', network_instance), ])
class yc_state_openconfig_sampling__sampling_sflow_collectors_collector_state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-sampling - based on the path /sampling/sflow/collectors/collector/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Operational state data for sFlow collectors.
"""
__slots__ = ('_path_helper', '_extmethods', '__address','__port','__source_address','__network_instance','__packets_sent',)
_yang_name = 'state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__address = YANGDynClass(base=[RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\.([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])){3}'}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:))'}),], is_leaf=True, yang_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:ip-address', is_config=False)
self.__port = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16)(6343), is_leaf=True, yang_name="port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:port-number', is_config=False)
self.__source_address = YANGDynClass(base=[RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\.([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])){3}'}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:))'}),], is_leaf=True, yang_name="source-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:ip-address', is_config=False)
self.__network_instance = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="network-instance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-netinst:network-instance-ref', is_config=False)
self.__packets_sent = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="packets-sent", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-yang:counter64', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['sampling', 'sflow', 'collectors', 'collector', 'state']
def _get_address(self):
"""
Getter method for address, mapped from YANG variable /sampling/sflow/collectors/collector/state/address (oc-inet:ip-address)
YANG Description: IPv4/IPv6 address of the sFlow collector.
"""
return self.__address
def _set_address(self, v, load=False):
"""
Setter method for address, mapped from YANG variable /sampling/sflow/collectors/collector/state/address (oc-inet:ip-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_address() directly.
YANG Description: IPv4/IPv6 address of the sFlow collector.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=[RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\.([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])){3}'}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:))'}),], is_leaf=True, yang_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:ip-address', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """address must be of a type compatible with oc-inet:ip-address""",
'defined-type': "oc-inet:ip-address",
'generated-type': """YANGDynClass(base=[RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\.([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])){3}'}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:))'}),], is_leaf=True, yang_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:ip-address', is_config=False)""",
})
self.__address = t
if hasattr(self, '_set'):
self._set()
def _unset_address(self):
self.__address = YANGDynClass(base=[RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\.([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])){3}'}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:))'}),], is_leaf=True, yang_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:ip-address', is_config=False)
def _get_port(self):
"""
Getter method for port, mapped from YANG variable /sampling/sflow/collectors/collector/state/port (oc-inet:port-number)
YANG Description: UDP port number for the sFlow collector.
"""
return self.__port
def _set_port(self, v, load=False):
"""
Setter method for port, mapped from YANG variable /sampling/sflow/collectors/collector/state/port (oc-inet:port-number)
If this variable is read-only (config: false) in the
source YANG file, then _set_port is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_port() directly.
YANG Description: UDP port number for the sFlow collector.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16)(6343), is_leaf=True, yang_name="port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:port-number', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """port must be of a type compatible with oc-inet:port-number""",
'defined-type': "oc-inet:port-number",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16)(6343), is_leaf=True, yang_name="port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:port-number', is_config=False)""",
})
self.__port = t
if hasattr(self, '_set'):
self._set()
def _unset_port(self):
self.__port = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), default=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16)(6343), is_leaf=True, yang_name="port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:port-number', is_config=False)
def _get_source_address(self):
"""
Getter method for source_address, mapped from YANG variable /sampling/sflow/collectors/collector/state/source_address (oc-inet:ip-address)
YANG Description: Sets the source IPv4/IPv6 address for sFlow datagrams sent
to sFlow collectors.
"""
return self.__source_address
def _set_source_address(self, v, load=False):
"""
Setter method for source_address, mapped from YANG variable /sampling/sflow/collectors/collector/state/source_address (oc-inet:ip-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_source_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_source_address() directly.
YANG Description: Sets the source IPv4/IPv6 address for sFlow datagrams sent
to sFlow collectors.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=[RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\.([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])){3}'}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:))'}),], is_leaf=True, yang_name="source-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:ip-address', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """source_address must be of a type compatible with oc-inet:ip-address""",
'defined-type': "oc-inet:ip-address",
'generated-type': """YANGDynClass(base=[RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\.([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])){3}'}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:))'}),], is_leaf=True, yang_name="source-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:ip-address', is_config=False)""",
})
self.__source_address = t
if hasattr(self, '_set'):
self._set()
def _unset_source_address(self):
self.__source_address = YANGDynClass(base=[RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\\.([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])){3}'}),RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '(([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:))'}),], is_leaf=True, yang_name="source-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-inet:ip-address', is_config=False)
def _get_network_instance(self):
"""
Getter method for network_instance, mapped from YANG variable /sampling/sflow/collectors/collector/state/network_instance (oc-netinst:network-instance-ref)
YANG Description: Reference to the network instance used to reach the
sFlow collector. If uspecified, the collector destination
is reachable in the default network instance.
"""
return self.__network_instance
def _set_network_instance(self, v, load=False):
"""
Setter method for network_instance, mapped from YANG variable /sampling/sflow/collectors/collector/state/network_instance (oc-netinst:network-instance-ref)
If this variable is read-only (config: false) in the
source YANG file, then _set_network_instance is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_network_instance() directly.
YANG Description: Reference to the network instance used to reach the
sFlow collector. If uspecified, the collector destination
is reachable in the default network instance.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="network-instance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-netinst:network-instance-ref', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """network_instance must be of a type compatible with oc-netinst:network-instance-ref""",
'defined-type': "oc-netinst:network-instance-ref",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="network-instance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-netinst:network-instance-ref', is_config=False)""",
})
self.__network_instance = t
if hasattr(self, '_set'):
self._set()
def _unset_network_instance(self):
self.__network_instance = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="network-instance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-netinst:network-instance-ref', is_config=False)
def _get_packets_sent(self):
"""
Getter method for packets_sent, mapped from YANG variable /sampling/sflow/collectors/collector/state/packets_sent (oc-yang:counter64)
YANG Description: The total number of packets sampled and sent to the
collector.
"""
return self.__packets_sent
def _set_packets_sent(self, v, load=False):
"""
Setter method for packets_sent, mapped from YANG variable /sampling/sflow/collectors/collector/state/packets_sent (oc-yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_packets_sent is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_packets_sent() directly.
YANG Description: The total number of packets sampled and sent to the
collector.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="packets-sent", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-yang:counter64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """packets_sent must be of a type compatible with oc-yang:counter64""",
'defined-type': "oc-yang:counter64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="packets-sent", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-yang:counter64', is_config=False)""",
})
self.__packets_sent = t
if hasattr(self, '_set'):
self._set()
def _unset_packets_sent(self):
self.__packets_sent = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="packets-sent", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-yang:counter64', is_config=False)
address = __builtin__.property(_get_address)
port = __builtin__.property(_get_port)
source_address = __builtin__.property(_get_source_address)
network_instance = __builtin__.property(_get_network_instance)
packets_sent = __builtin__.property(_get_packets_sent)
_pyangbind_elements = OrderedDict([('address', address), ('port', port), ('source_address', source_address), ('network_instance', network_instance), ('packets_sent', packets_sent), ])
class yc_collector_openconfig_sampling__sampling_sflow_collectors_collector(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-sampling - based on the path /sampling/sflow/collectors/collector. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: List of sFlow collectors to send sampling data. Packet
samples are sent to all collectors specified.
"""
__slots__ = ('_path_helper', '_extmethods', '__address','__port','__config','__state',)
_yang_name = 'collector'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__address = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='leafref', is_config=True)
self.__port = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='leafref', is_config=True)
self.__config = YANGDynClass(base=yc_config_openconfig_sampling__sampling_sflow_collectors_collector_config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='container', is_config=True)
self.__state = YANGDynClass(base=yc_state_openconfig_sampling__sampling_sflow_collectors_collector_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['sampling', 'sflow', 'collectors', 'collector']
def _get_address(self):
"""
Getter method for address, mapped from YANG variable /sampling/sflow/collectors/collector/address (leafref)
YANG Description: Reference to address list key.
"""
return self.__address
def _set_address(self, v, load=False):
"""
Setter method for address, mapped from YANG variable /sampling/sflow/collectors/collector/address (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_address() directly.
YANG Description: Reference to address list key.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='leafref', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """address must be of a type compatible with leafref""",
'defined-type': "leafref",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='leafref', is_config=True)""",
})
self.__address = t
if hasattr(self, '_set'):
self._set()
def _unset_address(self):
self.__address = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='leafref', is_config=True)
def _get_port(self):
"""
Getter method for port, mapped from YANG variable /sampling/sflow/collectors/collector/port (leafref)
YANG Description: Reference to port list key.
"""
return self.__port
def _set_port(self, v, load=False):
"""
Setter method for port, mapped from YANG variable /sampling/sflow/collectors/collector/port (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_port is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_port() directly.
YANG Description: Reference to port list key.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='leafref', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """port must be of a type compatible with leafref""",
'defined-type': "leafref",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='leafref', is_config=True)""",
})
self.__port = t
if hasattr(self, '_set'):
self._set()
def _unset_port(self):
self.__port = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='leafref', is_config=True)
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /sampling/sflow/collectors/collector/config (container)
YANG Description: Configuration data for sFlow collectors.
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /sampling/sflow/collectors/collector/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration data for sFlow collectors.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_config_openconfig_sampling__sampling_sflow_collectors_collector_config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """config must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=yc_config_openconfig_sampling__sampling_sflow_collectors_collector_config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='container', is_config=True)""",
})
self.__config = t
if hasattr(self, '_set'):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(base=yc_config_openconfig_sampling__sampling_sflow_collectors_collector_config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='container', is_config=True)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /sampling/sflow/collectors/collector/state (container)
YANG Description: Operational state data for sFlow collectors.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /sampling/sflow/collectors/collector/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Operational state data for sFlow collectors.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_state_openconfig_sampling__sampling_sflow_collectors_collector_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """state must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=yc_state_openconfig_sampling__sampling_sflow_collectors_collector_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='container', is_config=True)""",
})
self.__state = t
if hasattr(self, '_set'):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(base=yc_state_openconfig_sampling__sampling_sflow_collectors_collector_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='container', is_config=True)
address = __builtin__.property(_get_address, _set_address)
port = __builtin__.property(_get_port, _set_port)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([('address', address), ('port', port), ('config', config), ('state', state), ])
class yc_collectors_openconfig_sampling__sampling_sflow_collectors(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-sampling - based on the path /sampling/sflow/collectors. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Enclosing container for list of sFlow collectors.
"""
__slots__ = ('_path_helper', '_extmethods', '__collector',)
_yang_name = 'collectors'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__collector = YANGDynClass(base=YANGListType("address port",yc_collector_openconfig_sampling__sampling_sflow_collectors_collector, yang_name="collector", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='address port', extensions=None), is_container='list', yang_name="collector", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='list', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['sampling', 'sflow', 'collectors']
def _get_collector(self):
"""
Getter method for collector, mapped from YANG variable /sampling/sflow/collectors/collector (list)
YANG Description: List of sFlow collectors to send sampling data. Packet
samples are sent to all collectors specified.
"""
return self.__collector
def _set_collector(self, v, load=False):
"""
Setter method for collector, mapped from YANG variable /sampling/sflow/collectors/collector (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_collector is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_collector() directly.
YANG Description: List of sFlow collectors to send sampling data. Packet
samples are sent to all collectors specified.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("address port",yc_collector_openconfig_sampling__sampling_sflow_collectors_collector, yang_name="collector", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='address port', extensions=None), is_container='list', yang_name="collector", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """collector must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("address port",yc_collector_openconfig_sampling__sampling_sflow_collectors_collector, yang_name="collector", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='address port', extensions=None), is_container='list', yang_name="collector", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='list', is_config=True)""",
})
self.__collector = t
if hasattr(self, '_set'):
self._set()
def _unset_collector(self):
self.__collector = YANGDynClass(base=YANGListType("address port",yc_collector_openconfig_sampling__sampling_sflow_collectors_collector, yang_name="collector", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='address port', extensions=None), is_container='list', yang_name="collector", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='list', is_config=True)
collector = __builtin__.property(_get_collector, _set_collector)
_pyangbind_elements = OrderedDict([('collector', collector), ])
class yc_config_openconfig_sampling__sampling_sflow_interfaces_interface_config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-sampling - based on the path /sampling/sflow/interfaces/interface/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration data for sFlow data on interfaces.
"""
__slots__ = ('_path_helper', '_extmethods', '__name','__enabled','__polling_interval','__ingress_sampling_rate','__egress_sampling_rate',)
_yang_name = 'config'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__name = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-if:base-interface-ref', is_config=True)
self.__enabled = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='boolean', is_config=True)
self.__polling_interval = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="polling-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint16', is_config=True)
self.__ingress_sampling_rate = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ingress-sampling-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint32', is_config=True)
self.__egress_sampling_rate = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="egress-sampling-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint32', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['sampling', 'sflow', 'interfaces', 'interface', 'config']
def _get_name(self):
"""
Getter method for name, mapped from YANG variable /sampling/sflow/interfaces/interface/config/name (oc-if:base-interface-ref)
YANG Description: Reference to the interface for sFlow configuration and
state.
"""
return self.__name
def _set_name(self, v, load=False):
"""
Setter method for name, mapped from YANG variable /sampling/sflow/interfaces/interface/config/name (oc-if:base-interface-ref)
If this variable is read-only (config: false) in the
source YANG file, then _set_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_name() directly.
YANG Description: Reference to the interface for sFlow configuration and
state.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-if:base-interface-ref', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """name must be of a type compatible with oc-if:base-interface-ref""",
'defined-type': "oc-if:base-interface-ref",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-if:base-interface-ref', is_config=True)""",
})
self.__name = t
if hasattr(self, '_set'):
self._set()
def _unset_name(self):
self.__name = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-if:base-interface-ref', is_config=True)
def _get_enabled(self):
"""
Getter method for enabled, mapped from YANG variable /sampling/sflow/interfaces/interface/config/enabled (boolean)
YANG Description: Enables or disables sFlow on the interface. If sFlow is
globally disabled, this leaf is ignored. If sFlow
is globally enabled, this leaf may be used to disable it
for a specific interface.
"""
return self.__enabled
def _set_enabled(self, v, load=False):
"""
Setter method for enabled, mapped from YANG variable /sampling/sflow/interfaces/interface/config/enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled() directly.
YANG Description: Enables or disables sFlow on the interface. If sFlow is
globally disabled, this leaf is ignored. If sFlow
is globally enabled, this leaf may be used to disable it
for a specific interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='boolean', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """enabled must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='boolean', is_config=True)""",
})
self.__enabled = t
if hasattr(self, '_set'):
self._set()
def _unset_enabled(self):
self.__enabled = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='boolean', is_config=True)
def _get_polling_interval(self):
"""
Getter method for polling_interval, mapped from YANG variable /sampling/sflow/interfaces/interface/config/polling_interval (uint16)
YANG Description: Sets the traffic sampling polling interval.
"""
return self.__polling_interval
def _set_polling_interval(self, v, load=False):
"""
Setter method for polling_interval, mapped from YANG variable /sampling/sflow/interfaces/interface/config/polling_interval (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_polling_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_polling_interval() directly.
YANG Description: Sets the traffic sampling polling interval.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="polling-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint16', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """polling_interval must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="polling-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint16', is_config=True)""",
})
self.__polling_interval = t
if hasattr(self, '_set'):
self._set()
def _unset_polling_interval(self):
self.__polling_interval = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="polling-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint16', is_config=True)
def _get_ingress_sampling_rate(self):
"""
Getter method for ingress_sampling_rate, mapped from YANG variable /sampling/sflow/interfaces/interface/config/ingress_sampling_rate (uint32)
YANG Description: Sets the ingress packet sampling rate. The rate is expressed
as an integer N, where the intended sampling rate is 1/N
packets. An implementation may implement the sampling rate as
a statistical average, rather than a strict periodic sampling.
The allowable sampling rate range is generally a property of
the system, e.g., determined by the capability of the
hardware.
"""
return self.__ingress_sampling_rate
def _set_ingress_sampling_rate(self, v, load=False):
"""
Setter method for ingress_sampling_rate, mapped from YANG variable /sampling/sflow/interfaces/interface/config/ingress_sampling_rate (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_ingress_sampling_rate is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ingress_sampling_rate() directly.
YANG Description: Sets the ingress packet sampling rate. The rate is expressed
as an integer N, where the intended sampling rate is 1/N
packets. An implementation may implement the sampling rate as
a statistical average, rather than a strict periodic sampling.
The allowable sampling rate range is generally a property of
the system, e.g., determined by the capability of the
hardware.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ingress-sampling-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ingress_sampling_rate must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ingress-sampling-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint32', is_config=True)""",
})
self.__ingress_sampling_rate = t
if hasattr(self, '_set'):
self._set()
def _unset_ingress_sampling_rate(self):
self.__ingress_sampling_rate = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ingress-sampling-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint32', is_config=True)
def _get_egress_sampling_rate(self):
"""
Getter method for egress_sampling_rate, mapped from YANG variable /sampling/sflow/interfaces/interface/config/egress_sampling_rate (uint32)
YANG Description: Sets the egress packet sampling rate. The rate is expressed
as an integer N, where the intended sampling rate is 1/N
packets. An implementation may implement the sampling rate as
a statistical average, rather than a strict periodic sampling.
The allowable sampling rate range is generally a property of
the system, e.g., determined by the capability of the
hardware.
"""
return self.__egress_sampling_rate
def _set_egress_sampling_rate(self, v, load=False):
"""
Setter method for egress_sampling_rate, mapped from YANG variable /sampling/sflow/interfaces/interface/config/egress_sampling_rate (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_egress_sampling_rate is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_egress_sampling_rate() directly.
YANG Description: Sets the egress packet sampling rate. The rate is expressed
as an integer N, where the intended sampling rate is 1/N
packets. An implementation may implement the sampling rate as
a statistical average, rather than a strict periodic sampling.
The allowable sampling rate range is generally a property of
the system, e.g., determined by the capability of the
hardware.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="egress-sampling-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """egress_sampling_rate must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="egress-sampling-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint32', is_config=True)""",
})
self.__egress_sampling_rate = t
if hasattr(self, '_set'):
self._set()
def _unset_egress_sampling_rate(self):
self.__egress_sampling_rate = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="egress-sampling-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint32', is_config=True)
name = __builtin__.property(_get_name, _set_name)
enabled = __builtin__.property(_get_enabled, _set_enabled)
polling_interval = __builtin__.property(_get_polling_interval, _set_polling_interval)
ingress_sampling_rate = __builtin__.property(_get_ingress_sampling_rate, _set_ingress_sampling_rate)
egress_sampling_rate = __builtin__.property(_get_egress_sampling_rate, _set_egress_sampling_rate)
_pyangbind_elements = OrderedDict([('name', name), ('enabled', enabled), ('polling_interval', polling_interval), ('ingress_sampling_rate', ingress_sampling_rate), ('egress_sampling_rate', egress_sampling_rate), ])
class yc_state_openconfig_sampling__sampling_sflow_interfaces_interface_state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-sampling - based on the path /sampling/sflow/interfaces/interface/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Operational state data for sFlow data on interfaces.
"""
__slots__ = ('_path_helper', '_extmethods', '__name','__enabled','__polling_interval','__ingress_sampling_rate','__egress_sampling_rate','__packets_sampled',)
_yang_name = 'state'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__name = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-if:base-interface-ref', is_config=False)
self.__enabled = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='boolean', is_config=False)
self.__polling_interval = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="polling-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint16', is_config=False)
self.__ingress_sampling_rate = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ingress-sampling-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint32', is_config=False)
self.__egress_sampling_rate = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="egress-sampling-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint32', is_config=False)
self.__packets_sampled = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="packets-sampled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-yang:counter64', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['sampling', 'sflow', 'interfaces', 'interface', 'state']
def _get_name(self):
"""
Getter method for name, mapped from YANG variable /sampling/sflow/interfaces/interface/state/name (oc-if:base-interface-ref)
YANG Description: Reference to the interface for sFlow configuration and
state.
"""
return self.__name
def _set_name(self, v, load=False):
"""
Setter method for name, mapped from YANG variable /sampling/sflow/interfaces/interface/state/name (oc-if:base-interface-ref)
If this variable is read-only (config: false) in the
source YANG file, then _set_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_name() directly.
YANG Description: Reference to the interface for sFlow configuration and
state.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-if:base-interface-ref', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """name must be of a type compatible with oc-if:base-interface-ref""",
'defined-type': "oc-if:base-interface-ref",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-if:base-interface-ref', is_config=False)""",
})
self.__name = t
if hasattr(self, '_set'):
self._set()
def _unset_name(self):
self.__name = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-if:base-interface-ref', is_config=False)
def _get_enabled(self):
"""
Getter method for enabled, mapped from YANG variable /sampling/sflow/interfaces/interface/state/enabled (boolean)
YANG Description: Enables or disables sFlow on the interface. If sFlow is
globally disabled, this leaf is ignored. If sFlow
is globally enabled, this leaf may be used to disable it
for a specific interface.
"""
return self.__enabled
def _set_enabled(self, v, load=False):
"""
Setter method for enabled, mapped from YANG variable /sampling/sflow/interfaces/interface/state/enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled() directly.
YANG Description: Enables or disables sFlow on the interface. If sFlow is
globally disabled, this leaf is ignored. If sFlow
is globally enabled, this leaf may be used to disable it
for a specific interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """enabled must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='boolean', is_config=False)""",
})
self.__enabled = t
if hasattr(self, '_set'):
self._set()
def _unset_enabled(self):
self.__enabled = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='boolean', is_config=False)
def _get_polling_interval(self):
"""
Getter method for polling_interval, mapped from YANG variable /sampling/sflow/interfaces/interface/state/polling_interval (uint16)
YANG Description: Sets the traffic sampling polling interval.
"""
return self.__polling_interval
def _set_polling_interval(self, v, load=False):
"""
Setter method for polling_interval, mapped from YANG variable /sampling/sflow/interfaces/interface/state/polling_interval (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_polling_interval is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_polling_interval() directly.
YANG Description: Sets the traffic sampling polling interval.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="polling-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """polling_interval must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="polling-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint16', is_config=False)""",
})
self.__polling_interval = t
if hasattr(self, '_set'):
self._set()
def _unset_polling_interval(self):
self.__polling_interval = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="polling-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint16', is_config=False)
def _get_ingress_sampling_rate(self):
"""
Getter method for ingress_sampling_rate, mapped from YANG variable /sampling/sflow/interfaces/interface/state/ingress_sampling_rate (uint32)
YANG Description: Sets the ingress packet sampling rate. The rate is expressed
as an integer N, where the intended sampling rate is 1/N
packets. An implementation may implement the sampling rate as
a statistical average, rather than a strict periodic sampling.
The allowable sampling rate range is generally a property of
the system, e.g., determined by the capability of the
hardware.
"""
return self.__ingress_sampling_rate
def _set_ingress_sampling_rate(self, v, load=False):
"""
Setter method for ingress_sampling_rate, mapped from YANG variable /sampling/sflow/interfaces/interface/state/ingress_sampling_rate (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_ingress_sampling_rate is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ingress_sampling_rate() directly.
YANG Description: Sets the ingress packet sampling rate. The rate is expressed
as an integer N, where the intended sampling rate is 1/N
packets. An implementation may implement the sampling rate as
a statistical average, rather than a strict periodic sampling.
The allowable sampling rate range is generally a property of
the system, e.g., determined by the capability of the
hardware.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ingress-sampling-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ingress_sampling_rate must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ingress-sampling-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint32', is_config=False)""",
})
self.__ingress_sampling_rate = t
if hasattr(self, '_set'):
self._set()
def _unset_ingress_sampling_rate(self):
self.__ingress_sampling_rate = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="ingress-sampling-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint32', is_config=False)
def _get_egress_sampling_rate(self):
"""
Getter method for egress_sampling_rate, mapped from YANG variable /sampling/sflow/interfaces/interface/state/egress_sampling_rate (uint32)
YANG Description: Sets the egress packet sampling rate. The rate is expressed
as an integer N, where the intended sampling rate is 1/N
packets. An implementation may implement the sampling rate as
a statistical average, rather than a strict periodic sampling.
The allowable sampling rate range is generally a property of
the system, e.g., determined by the capability of the
hardware.
"""
return self.__egress_sampling_rate
def _set_egress_sampling_rate(self, v, load=False):
"""
Setter method for egress_sampling_rate, mapped from YANG variable /sampling/sflow/interfaces/interface/state/egress_sampling_rate (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_egress_sampling_rate is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_egress_sampling_rate() directly.
YANG Description: Sets the egress packet sampling rate. The rate is expressed
as an integer N, where the intended sampling rate is 1/N
packets. An implementation may implement the sampling rate as
a statistical average, rather than a strict periodic sampling.
The allowable sampling rate range is generally a property of
the system, e.g., determined by the capability of the
hardware.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="egress-sampling-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """egress_sampling_rate must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="egress-sampling-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint32', is_config=False)""",
})
self.__egress_sampling_rate = t
if hasattr(self, '_set'):
self._set()
def _unset_egress_sampling_rate(self):
self.__egress_sampling_rate = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="egress-sampling-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='uint32', is_config=False)
def _get_packets_sampled(self):
"""
Getter method for packets_sampled, mapped from YANG variable /sampling/sflow/interfaces/interface/state/packets_sampled (oc-yang:counter64)
YANG Description: Total number of packets sampled from the interface.
"""
return self.__packets_sampled
def _set_packets_sampled(self, v, load=False):
"""
Setter method for packets_sampled, mapped from YANG variable /sampling/sflow/interfaces/interface/state/packets_sampled (oc-yang:counter64)
If this variable is read-only (config: false) in the
source YANG file, then _set_packets_sampled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_packets_sampled() directly.
YANG Description: Total number of packets sampled from the interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="packets-sampled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-yang:counter64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """packets_sampled must be of a type compatible with oc-yang:counter64""",
'defined-type': "oc-yang:counter64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="packets-sampled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-yang:counter64', is_config=False)""",
})
self.__packets_sampled = t
if hasattr(self, '_set'):
self._set()
def _unset_packets_sampled(self):
self.__packets_sampled = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="packets-sampled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='oc-yang:counter64', is_config=False)
name = __builtin__.property(_get_name)
enabled = __builtin__.property(_get_enabled)
polling_interval = __builtin__.property(_get_polling_interval)
ingress_sampling_rate = __builtin__.property(_get_ingress_sampling_rate)
egress_sampling_rate = __builtin__.property(_get_egress_sampling_rate)
packets_sampled = __builtin__.property(_get_packets_sampled)
_pyangbind_elements = OrderedDict([('name', name), ('enabled', enabled), ('polling_interval', polling_interval), ('ingress_sampling_rate', ingress_sampling_rate), ('egress_sampling_rate', egress_sampling_rate), ('packets_sampled', packets_sampled), ])
class yc_interface_openconfig_sampling__sampling_sflow_interfaces_interface(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-sampling - based on the path /sampling/sflow/interfaces/interface. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: List of interfaces with sFlow data.
"""
__slots__ = ('_path_helper', '_extmethods', '__name','__config','__state',)
_yang_name = 'interface'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__name = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='leafref', is_config=True)
self.__config = YANGDynClass(base=yc_config_openconfig_sampling__sampling_sflow_interfaces_interface_config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='container', is_config=True)
self.__state = YANGDynClass(base=yc_state_openconfig_sampling__sampling_sflow_interfaces_interface_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['sampling', 'sflow', 'interfaces', 'interface']
def _get_name(self):
"""
Getter method for name, mapped from YANG variable /sampling/sflow/interfaces/interface/name (leafref)
YANG Description: Reference to list key.
"""
return self.__name
def _set_name(self, v, load=False):
"""
Setter method for name, mapped from YANG variable /sampling/sflow/interfaces/interface/name (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_name() directly.
YANG Description: Reference to list key.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='leafref', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """name must be of a type compatible with leafref""",
'defined-type': "leafref",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='leafref', is_config=True)""",
})
self.__name = t
if hasattr(self, '_set'):
self._set()
def _unset_name(self):
self.__name = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='leafref', is_config=True)
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /sampling/sflow/interfaces/interface/config (container)
YANG Description: Configuration data for sFlow data on interfaces.
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /sampling/sflow/interfaces/interface/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration data for sFlow data on interfaces.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_config_openconfig_sampling__sampling_sflow_interfaces_interface_config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """config must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=yc_config_openconfig_sampling__sampling_sflow_interfaces_interface_config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='container', is_config=True)""",
})
self.__config = t
if hasattr(self, '_set'):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(base=yc_config_openconfig_sampling__sampling_sflow_interfaces_interface_config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='container', is_config=True)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /sampling/sflow/interfaces/interface/state (container)
YANG Description: Operational state data for sFlow data on interfaces.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /sampling/sflow/interfaces/interface/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Operational state data for sFlow data on interfaces.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_state_openconfig_sampling__sampling_sflow_interfaces_interface_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """state must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=yc_state_openconfig_sampling__sampling_sflow_interfaces_interface_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='container', is_config=True)""",
})
self.__state = t
if hasattr(self, '_set'):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(base=yc_state_openconfig_sampling__sampling_sflow_interfaces_interface_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='container', is_config=True)
name = __builtin__.property(_get_name, _set_name)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([('name', name), ('config', config), ('state', state), ])
class yc_interfaces_openconfig_sampling__sampling_sflow_interfaces(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-sampling - based on the path /sampling/sflow/interfaces. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Enclosing container for list of sFlow interfaces.
"""
__slots__ = ('_path_helper', '_extmethods', '__interface',)
_yang_name = 'interfaces'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__interface = YANGDynClass(base=YANGListType("name",yc_interface_openconfig_sampling__sampling_sflow_interfaces_interface, yang_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions=None), is_container='list', yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='list', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['sampling', 'sflow', 'interfaces']
def _get_interface(self):
"""
Getter method for interface, mapped from YANG variable /sampling/sflow/interfaces/interface (list)
YANG Description: List of interfaces with sFlow data.
"""
return self.__interface
def _set_interface(self, v, load=False):
"""
Setter method for interface, mapped from YANG variable /sampling/sflow/interfaces/interface (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface() directly.
YANG Description: List of interfaces with sFlow data.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("name",yc_interface_openconfig_sampling__sampling_sflow_interfaces_interface, yang_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions=None), is_container='list', yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("name",yc_interface_openconfig_sampling__sampling_sflow_interfaces_interface, yang_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions=None), is_container='list', yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='list', is_config=True)""",
})
self.__interface = t
if hasattr(self, '_set'):
self._set()
def _unset_interface(self):
self.__interface = YANGDynClass(base=YANGListType("name",yc_interface_openconfig_sampling__sampling_sflow_interfaces_interface, yang_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions=None), is_container='list', yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='list', is_config=True)
interface = __builtin__.property(_get_interface, _set_interface)
_pyangbind_elements = OrderedDict([('interface', interface), ])
class yc_sflow_openconfig_sampling__sampling_sflow(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-sampling - based on the path /sampling/sflow. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Top-level container for sFlow data.
"""
__slots__ = ('_path_helper', '_extmethods', '__config','__state','__collectors','__interfaces',)
_yang_name = 'sflow'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(base=yc_config_openconfig_sampling__sampling_sflow_config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='container', is_config=True)
self.__state = YANGDynClass(base=yc_state_openconfig_sampling__sampling_sflow_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='container', is_config=True)
self.__collectors = YANGDynClass(base=yc_collectors_openconfig_sampling__sampling_sflow_collectors, is_container='container', yang_name="collectors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='container', is_config=True)
self.__interfaces = YANGDynClass(base=yc_interfaces_openconfig_sampling__sampling_sflow_interfaces, is_container='container', yang_name="interfaces", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['sampling', 'sflow']
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /sampling/sflow/config (container)
YANG Description: Configuration data for global sFlow.
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /sampling/sflow/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration data for global sFlow.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_config_openconfig_sampling__sampling_sflow_config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """config must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=yc_config_openconfig_sampling__sampling_sflow_config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='container', is_config=True)""",
})
self.__config = t
if hasattr(self, '_set'):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(base=yc_config_openconfig_sampling__sampling_sflow_config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='container', is_config=True)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /sampling/sflow/state (container)
YANG Description: Operational state data for global sFlow.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /sampling/sflow/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Operational state data for global sFlow.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_state_openconfig_sampling__sampling_sflow_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """state must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=yc_state_openconfig_sampling__sampling_sflow_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='container', is_config=True)""",
})
self.__state = t
if hasattr(self, '_set'):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(base=yc_state_openconfig_sampling__sampling_sflow_state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='container', is_config=True)
def _get_collectors(self):
"""
Getter method for collectors, mapped from YANG variable /sampling/sflow/collectors (container)
YANG Description: Enclosing container for list of sFlow collectors.
"""
return self.__collectors
def _set_collectors(self, v, load=False):
"""
Setter method for collectors, mapped from YANG variable /sampling/sflow/collectors (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_collectors is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_collectors() directly.
YANG Description: Enclosing container for list of sFlow collectors.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_collectors_openconfig_sampling__sampling_sflow_collectors, is_container='container', yang_name="collectors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """collectors must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=yc_collectors_openconfig_sampling__sampling_sflow_collectors, is_container='container', yang_name="collectors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='container', is_config=True)""",
})
self.__collectors = t
if hasattr(self, '_set'):
self._set()
def _unset_collectors(self):
self.__collectors = YANGDynClass(base=yc_collectors_openconfig_sampling__sampling_sflow_collectors, is_container='container', yang_name="collectors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='container', is_config=True)
def _get_interfaces(self):
"""
Getter method for interfaces, mapped from YANG variable /sampling/sflow/interfaces (container)
YANG Description: Enclosing container for list of sFlow interfaces.
"""
return self.__interfaces
def _set_interfaces(self, v, load=False):
"""
Setter method for interfaces, mapped from YANG variable /sampling/sflow/interfaces (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_interfaces is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interfaces() directly.
YANG Description: Enclosing container for list of sFlow interfaces.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_interfaces_openconfig_sampling__sampling_sflow_interfaces, is_container='container', yang_name="interfaces", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interfaces must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=yc_interfaces_openconfig_sampling__sampling_sflow_interfaces, is_container='container', yang_name="interfaces", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='container', is_config=True)""",
})
self.__interfaces = t
if hasattr(self, '_set'):
self._set()
def _unset_interfaces(self):
self.__interfaces = YANGDynClass(base=yc_interfaces_openconfig_sampling__sampling_sflow_interfaces, is_container='container', yang_name="interfaces", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='container', is_config=True)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
collectors = __builtin__.property(_get_collectors, _set_collectors)
interfaces = __builtin__.property(_get_interfaces, _set_interfaces)
_pyangbind_elements = OrderedDict([('config', config), ('state', state), ('collectors', collectors), ('interfaces', interfaces), ])
class yc_sampling_openconfig_sampling__sampling(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-sampling - based on the path /sampling. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Top-level container for sampling-related configuration and
operational state data
"""
__slots__ = ('_path_helper', '_extmethods', '__sflow',)
_yang_name = 'sampling'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__sflow = YANGDynClass(base=yc_sflow_openconfig_sampling__sampling_sflow, is_container='container', yang_name="sflow", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return ['sampling']
def _get_sflow(self):
"""
Getter method for sflow, mapped from YANG variable /sampling/sflow (container)
YANG Description: Top-level container for sFlow data.
"""
return self.__sflow
def _set_sflow(self, v, load=False):
"""
Setter method for sflow, mapped from YANG variable /sampling/sflow (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_sflow is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sflow() directly.
YANG Description: Top-level container for sFlow data.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_sflow_openconfig_sampling__sampling_sflow, is_container='container', yang_name="sflow", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """sflow must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=yc_sflow_openconfig_sampling__sampling_sflow, is_container='container', yang_name="sflow", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='container', is_config=True)""",
})
self.__sflow = t
if hasattr(self, '_set'):
self._set()
def _unset_sflow(self):
self.__sflow = YANGDynClass(base=yc_sflow_openconfig_sampling__sampling_sflow, is_container='container', yang_name="sflow", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling/sflow', defining_module='openconfig-sampling-sflow', yang_type='container', is_config=True)
sflow = __builtin__.property(_get_sflow, _set_sflow)
_pyangbind_elements = OrderedDict([('sflow', sflow), ])
class openconfig_sampling(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-sampling - based on the path /openconfig-sampling. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This module defines top-level configuration and operational
state data related to traffic sampling.
For modularity purposes, the top-level sampling container provides
a natural attachment point for implementations such as sFlow, IPFix,
NETFLOW
"""
__slots__ = ('_path_helper', '_extmethods', '__sampling',)
_yang_name = 'openconfig-sampling'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__sampling = YANGDynClass(base=yc_sampling_openconfig_sampling__sampling, is_container='container', yang_name="sampling", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling', defining_module='openconfig-sampling', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return []
def _get_sampling(self):
"""
Getter method for sampling, mapped from YANG variable /sampling (container)
YANG Description: Top-level container for sampling-related configuration and
operational state data
"""
return self.__sampling
def _set_sampling(self, v, load=False):
"""
Setter method for sampling, mapped from YANG variable /sampling (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_sampling is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sampling() directly.
YANG Description: Top-level container for sampling-related configuration and
operational state data
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=yc_sampling_openconfig_sampling__sampling, is_container='container', yang_name="sampling", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling', defining_module='openconfig-sampling', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """sampling must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=yc_sampling_openconfig_sampling__sampling, is_container='container', yang_name="sampling", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling', defining_module='openconfig-sampling', yang_type='container', is_config=True)""",
})
self.__sampling = t
if hasattr(self, '_set'):
self._set()
def _unset_sampling(self):
self.__sampling = YANGDynClass(base=yc_sampling_openconfig_sampling__sampling, is_container='container', yang_name="sampling", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/sampling', defining_module='openconfig-sampling', yang_type='container', is_config=True)
sampling = __builtin__.property(_get_sampling, _set_sampling)
_pyangbind_elements = OrderedDict([('sampling', sampling), ])
class openconfig_sampling_sflow(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-sampling-sflow - based on the path /openconfig-sampling-sflow. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This module defines configuration and operational state data
related to data plane traffic sampling based on sFlow.
RFC 3176 - InMon Corporation's sFlow: A Method for
Monitoring Traffic in Switched and Routed Networks
"""
_pyangbind_elements = {}
|
{
"content_hash": "daf67d40817defc93667cb7c1ca0e2e7",
"timestamp": "",
"source": "github",
"line_count": 2879,
"max_line_length": 981,
"avg_line_length": 65.84682181312957,
"alnum_prop": 0.6926144545900523,
"repo_name": "google/gnxi",
"id": "4e7c740a0894655604aa287e23622edc994abc86",
"size": "189597",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oc_config_validate/oc_config_validate/models/sampling.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1730"
},
{
"name": "Dockerfile",
"bytes": "1957"
},
{
"name": "Go",
"bytes": "1194247"
},
{
"name": "HTML",
"bytes": "7132"
},
{
"name": "JavaScript",
"bytes": "1887"
},
{
"name": "Python",
"bytes": "83970889"
},
{
"name": "Shell",
"bytes": "11353"
},
{
"name": "TypeScript",
"bytes": "32597"
}
],
"symlink_target": ""
}
|
"""
report test results in JUnit-XML format,
for use with Jenkins and build integration servers.
Based on initial code from Ross Lawley.
Output conforms to https://github.com/jenkinsci/xunit-plugin/blob/master/
src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd
"""
import functools
import os
import re
import sys
import time
import py
import pytest
from _pytest import nodes
from _pytest.config import filename_arg
class Junit(py.xml.Namespace):
pass
# We need to get the subset of the invalid unicode ranges according to
# XML 1.0 which are valid in this python build. Hence we calculate
# this dynamically instead of hardcoding it. The spec range of valid
# chars is: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD]
# | [#x10000-#x10FFFF]
_legal_chars = (0x09, 0x0A, 0x0D)
_legal_ranges = ((0x20, 0x7E), (0x80, 0xD7FF), (0xE000, 0xFFFD), (0x10000, 0x10FFFF))
_legal_xml_re = [
"{}-{}".format(chr(low), chr(high))
for (low, high) in _legal_ranges
if low < sys.maxunicode
]
_legal_xml_re = [chr(x) for x in _legal_chars] + _legal_xml_re
illegal_xml_re = re.compile("[^%s]" % "".join(_legal_xml_re))
del _legal_chars
del _legal_ranges
del _legal_xml_re
_py_ext_re = re.compile(r"\.py$")
def bin_xml_escape(arg):
def repl(matchobj):
i = ord(matchobj.group())
if i <= 0xFF:
return "#x%02X" % i
else:
return "#x%04X" % i
return py.xml.raw(illegal_xml_re.sub(repl, py.xml.escape(arg)))
def merge_family(left, right):
result = {}
for kl, vl in left.items():
for kr, vr in right.items():
if not isinstance(vl, list):
raise TypeError(type(vl))
result[kl] = vl + vr
left.update(result)
families = {}
families["_base"] = {"testcase": ["classname", "name"]}
families["_base_legacy"] = {"testcase": ["file", "line", "url"]}
# xUnit 1.x inherits legacy attributes
families["xunit1"] = families["_base"].copy()
merge_family(families["xunit1"], families["_base_legacy"])
# xUnit 2.x uses strict base attributes
families["xunit2"] = families["_base"]
class _NodeReporter:
def __init__(self, nodeid, xml):
self.id = nodeid
self.xml = xml
self.add_stats = self.xml.add_stats
self.family = self.xml.family
self.duration = 0
self.properties = []
self.nodes = []
self.testcase = None
self.attrs = {}
def append(self, node):
self.xml.add_stats(type(node).__name__)
self.nodes.append(node)
def add_property(self, name, value):
self.properties.append((str(name), bin_xml_escape(value)))
def add_attribute(self, name, value):
self.attrs[str(name)] = bin_xml_escape(value)
def make_properties_node(self):
"""Return a Junit node containing custom properties, if any.
"""
if self.properties:
return Junit.properties(
[
Junit.property(name=name, value=value)
for name, value in self.properties
]
)
return ""
def record_testreport(self, testreport):
assert not self.testcase
names = mangle_test_address(testreport.nodeid)
existing_attrs = self.attrs
classnames = names[:-1]
if self.xml.prefix:
classnames.insert(0, self.xml.prefix)
attrs = {
"classname": ".".join(classnames),
"name": bin_xml_escape(names[-1]),
"file": testreport.location[0],
}
if testreport.location[1] is not None:
attrs["line"] = testreport.location[1]
if hasattr(testreport, "url"):
attrs["url"] = testreport.url
self.attrs = attrs
self.attrs.update(existing_attrs) # restore any user-defined attributes
# Preserve legacy testcase behavior
if self.family == "xunit1":
return
# Filter out attributes not permitted by this test family.
# Including custom attributes because they are not valid here.
temp_attrs = {}
for key in self.attrs.keys():
if key in families[self.family]["testcase"]:
temp_attrs[key] = self.attrs[key]
self.attrs = temp_attrs
def to_xml(self):
testcase = Junit.testcase(time="%.3f" % self.duration, **self.attrs)
testcase.append(self.make_properties_node())
for node in self.nodes:
testcase.append(node)
return testcase
def _add_simple(self, kind, message, data=None):
data = bin_xml_escape(data)
node = kind(data, message=message)
self.append(node)
def write_captured_output(self, report):
if not self.xml.log_passing_tests and report.passed:
return
content_out = report.capstdout
content_log = report.caplog
content_err = report.capstderr
if content_log or content_out:
if content_log and self.xml.logging == "system-out":
if content_out:
# syncing stdout and the log-output is not done yet. It's
# probably not worth the effort. Therefore, first the captured
# stdout is shown and then the captured logs.
content = "\n".join(
[
" Captured Stdout ".center(80, "-"),
content_out,
"",
" Captured Log ".center(80, "-"),
content_log,
]
)
else:
content = content_log
else:
content = content_out
if content:
tag = getattr(Junit, "system-out")
self.append(tag(bin_xml_escape(content)))
if content_log or content_err:
if content_log and self.xml.logging == "system-err":
if content_err:
content = "\n".join(
[
" Captured Stderr ".center(80, "-"),
content_err,
"",
" Captured Log ".center(80, "-"),
content_log,
]
)
else:
content = content_log
else:
content = content_err
if content:
tag = getattr(Junit, "system-err")
self.append(tag(bin_xml_escape(content)))
def append_pass(self, report):
self.add_stats("passed")
def append_failure(self, report):
# msg = str(report.longrepr.reprtraceback.extraline)
if hasattr(report, "wasxfail"):
self._add_simple(Junit.skipped, "xfail-marked test passes unexpectedly")
else:
if hasattr(report.longrepr, "reprcrash"):
message = report.longrepr.reprcrash.message
elif isinstance(report.longrepr, str):
message = report.longrepr
else:
message = str(report.longrepr)
message = bin_xml_escape(message)
fail = Junit.failure(message=message)
fail.append(bin_xml_escape(report.longrepr))
self.append(fail)
def append_collect_error(self, report):
# msg = str(report.longrepr.reprtraceback.extraline)
self.append(
Junit.error(bin_xml_escape(report.longrepr), message="collection failure")
)
def append_collect_skipped(self, report):
self._add_simple(Junit.skipped, "collection skipped", report.longrepr)
def append_error(self, report):
if report.when == "teardown":
msg = "test teardown failure"
else:
msg = "test setup failure"
self._add_simple(Junit.error, msg, report.longrepr)
def append_skipped(self, report):
if hasattr(report, "wasxfail"):
xfailreason = report.wasxfail
if xfailreason.startswith("reason: "):
xfailreason = xfailreason[8:]
self.append(
Junit.skipped(
"", type="pytest.xfail", message=bin_xml_escape(xfailreason)
)
)
else:
filename, lineno, skipreason = report.longrepr
if skipreason.startswith("Skipped: "):
skipreason = skipreason[9:]
details = "{}:{}: {}".format(filename, lineno, skipreason)
self.append(
Junit.skipped(
bin_xml_escape(details),
type="pytest.skip",
message=bin_xml_escape(skipreason),
)
)
self.write_captured_output(report)
def finalize(self):
data = self.to_xml().unicode(indent=0)
self.__dict__.clear()
self.to_xml = lambda: py.xml.raw(data)
def _warn_incompatibility_with_xunit2(request, fixture_name):
"""Emits a PytestWarning about the given fixture being incompatible with newer xunit revisions"""
from _pytest.warning_types import PytestWarning
xml = getattr(request.config, "_xml", None)
if xml is not None and xml.family not in ("xunit1", "legacy"):
request.node.warn(
PytestWarning(
"{fixture_name} is incompatible with junit_family '{family}' (use 'legacy' or 'xunit1')".format(
fixture_name=fixture_name, family=xml.family
)
)
)
@pytest.fixture
def record_property(request):
"""Add an extra properties the calling test.
User properties become part of the test report and are available to the
configured reporters, like JUnit XML.
The fixture is callable with ``(name, value)``, with value being automatically
xml-encoded.
Example::
def test_function(record_property):
record_property("example_key", 1)
"""
_warn_incompatibility_with_xunit2(request, "record_property")
def append_property(name, value):
request.node.user_properties.append((name, value))
return append_property
@pytest.fixture
def record_xml_attribute(request):
"""Add extra xml attributes to the tag for the calling test.
The fixture is callable with ``(name, value)``, with value being
automatically xml-encoded
"""
from _pytest.warning_types import PytestExperimentalApiWarning
request.node.warn(
PytestExperimentalApiWarning("record_xml_attribute is an experimental feature")
)
_warn_incompatibility_with_xunit2(request, "record_xml_attribute")
# Declare noop
def add_attr_noop(name, value):
pass
attr_func = add_attr_noop
xml = getattr(request.config, "_xml", None)
if xml is not None:
node_reporter = xml.node_reporter(request.node.nodeid)
attr_func = node_reporter.add_attribute
return attr_func
def _check_record_param_type(param, v):
"""Used by record_testsuite_property to check that the given parameter name is of the proper
type"""
__tracebackhide__ = True
if not isinstance(v, str):
msg = "{param} parameter needs to be a string, but {g} given"
raise TypeError(msg.format(param=param, g=type(v).__name__))
@pytest.fixture(scope="session")
def record_testsuite_property(request):
"""
Records a new ``<property>`` tag as child of the root ``<testsuite>``. This is suitable to
writing global information regarding the entire test suite, and is compatible with ``xunit2`` JUnit family.
This is a ``session``-scoped fixture which is called with ``(name, value)``. Example:
.. code-block:: python
def test_foo(record_testsuite_property):
record_testsuite_property("ARCH", "PPC")
record_testsuite_property("STORAGE_TYPE", "CEPH")
``name`` must be a string, ``value`` will be converted to a string and properly xml-escaped.
"""
__tracebackhide__ = True
def record_func(name, value):
"""noop function in case --junitxml was not passed in the command-line"""
__tracebackhide__ = True
_check_record_param_type("name", name)
xml = getattr(request.config, "_xml", None)
if xml is not None:
record_func = xml.add_global_property # noqa
return record_func
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting")
group.addoption(
"--junitxml",
"--junit-xml",
action="store",
dest="xmlpath",
metavar="path",
type=functools.partial(filename_arg, optname="--junitxml"),
default=None,
help="create junit-xml style report file at given path.",
)
group.addoption(
"--junitprefix",
"--junit-prefix",
action="store",
metavar="str",
default=None,
help="prepend prefix to classnames in junit-xml output",
)
parser.addini(
"junit_suite_name", "Test suite name for JUnit report", default="pytest"
)
parser.addini(
"junit_logging",
"Write captured log messages to JUnit report: "
"one of no|system-out|system-err",
default="no",
) # choices=['no', 'stdout', 'stderr'])
parser.addini(
"junit_log_passing_tests",
"Capture log information for passing tests to JUnit report: ",
type="bool",
default=True,
)
parser.addini(
"junit_duration_report",
"Duration time to report: one of total|call",
default="total",
) # choices=['total', 'call'])
parser.addini(
"junit_family",
"Emit XML for schema: one of legacy|xunit1|xunit2",
default="xunit1",
)
def pytest_configure(config):
xmlpath = config.option.xmlpath
# prevent opening xmllog on slave nodes (xdist)
if xmlpath and not hasattr(config, "slaveinput"):
config._xml = LogXML(
xmlpath,
config.option.junitprefix,
config.getini("junit_suite_name"),
config.getini("junit_logging"),
config.getini("junit_duration_report"),
config.getini("junit_family"),
config.getini("junit_log_passing_tests"),
)
config.pluginmanager.register(config._xml)
def pytest_unconfigure(config):
xml = getattr(config, "_xml", None)
if xml:
del config._xml
config.pluginmanager.unregister(xml)
def mangle_test_address(address):
path, possible_open_bracket, params = address.partition("[")
names = path.split("::")
try:
names.remove("()")
except ValueError:
pass
# convert file path to dotted path
names[0] = names[0].replace(nodes.SEP, ".")
names[0] = _py_ext_re.sub("", names[0])
# put any params back
names[-1] += possible_open_bracket + params
return names
class LogXML:
def __init__(
self,
logfile,
prefix,
suite_name="pytest",
logging="no",
report_duration="total",
family="xunit1",
log_passing_tests=True,
):
logfile = os.path.expanduser(os.path.expandvars(logfile))
self.logfile = os.path.normpath(os.path.abspath(logfile))
self.prefix = prefix
self.suite_name = suite_name
self.logging = logging
self.log_passing_tests = log_passing_tests
self.report_duration = report_duration
self.family = family
self.stats = dict.fromkeys(["error", "passed", "failure", "skipped"], 0)
self.node_reporters = {} # nodeid -> _NodeReporter
self.node_reporters_ordered = []
self.global_properties = []
# List of reports that failed on call but teardown is pending.
self.open_reports = []
self.cnt_double_fail_tests = 0
# Replaces convenience family with real family
if self.family == "legacy":
self.family = "xunit1"
def finalize(self, report):
nodeid = getattr(report, "nodeid", report)
# local hack to handle xdist report order
slavenode = getattr(report, "node", None)
reporter = self.node_reporters.pop((nodeid, slavenode))
if reporter is not None:
reporter.finalize()
def node_reporter(self, report):
nodeid = getattr(report, "nodeid", report)
# local hack to handle xdist report order
slavenode = getattr(report, "node", None)
key = nodeid, slavenode
if key in self.node_reporters:
# TODO: breasks for --dist=each
return self.node_reporters[key]
reporter = _NodeReporter(nodeid, self)
self.node_reporters[key] = reporter
self.node_reporters_ordered.append(reporter)
return reporter
def add_stats(self, key):
if key in self.stats:
self.stats[key] += 1
def _opentestcase(self, report):
reporter = self.node_reporter(report)
reporter.record_testreport(report)
return reporter
def pytest_runtest_logreport(self, report):
"""handle a setup/call/teardown report, generating the appropriate
xml tags as necessary.
note: due to plugins like xdist, this hook may be called in interlaced
order with reports from other nodes. for example:
usual call order:
-> setup node1
-> call node1
-> teardown node1
-> setup node2
-> call node2
-> teardown node2
possible call order in xdist:
-> setup node1
-> call node1
-> setup node2
-> call node2
-> teardown node2
-> teardown node1
"""
close_report = None
if report.passed:
if report.when == "call": # ignore setup/teardown
reporter = self._opentestcase(report)
reporter.append_pass(report)
elif report.failed:
if report.when == "teardown":
# The following vars are needed when xdist plugin is used
report_wid = getattr(report, "worker_id", None)
report_ii = getattr(report, "item_index", None)
close_report = next(
(
rep
for rep in self.open_reports
if (
rep.nodeid == report.nodeid
and getattr(rep, "item_index", None) == report_ii
and getattr(rep, "worker_id", None) == report_wid
)
),
None,
)
if close_report:
# We need to open new testcase in case we have failure in
# call and error in teardown in order to follow junit
# schema
self.finalize(close_report)
self.cnt_double_fail_tests += 1
reporter = self._opentestcase(report)
if report.when == "call":
reporter.append_failure(report)
self.open_reports.append(report)
else:
reporter.append_error(report)
elif report.skipped:
reporter = self._opentestcase(report)
reporter.append_skipped(report)
self.update_testcase_duration(report)
if report.when == "teardown":
reporter = self._opentestcase(report)
reporter.write_captured_output(report)
for propname, propvalue in report.user_properties:
reporter.add_property(propname, propvalue)
self.finalize(report)
report_wid = getattr(report, "worker_id", None)
report_ii = getattr(report, "item_index", None)
close_report = next(
(
rep
for rep in self.open_reports
if (
rep.nodeid == report.nodeid
and getattr(rep, "item_index", None) == report_ii
and getattr(rep, "worker_id", None) == report_wid
)
),
None,
)
if close_report:
self.open_reports.remove(close_report)
def update_testcase_duration(self, report):
"""accumulates total duration for nodeid from given report and updates
the Junit.testcase with the new total if already created.
"""
if self.report_duration == "total" or report.when == self.report_duration:
reporter = self.node_reporter(report)
reporter.duration += getattr(report, "duration", 0.0)
def pytest_collectreport(self, report):
if not report.passed:
reporter = self._opentestcase(report)
if report.failed:
reporter.append_collect_error(report)
else:
reporter.append_collect_skipped(report)
def pytest_internalerror(self, excrepr):
reporter = self.node_reporter("internal")
reporter.attrs.update(classname="pytest", name="internal")
reporter._add_simple(Junit.error, "internal error", excrepr)
def pytest_sessionstart(self):
self.suite_start_time = time.time()
def pytest_sessionfinish(self):
dirname = os.path.dirname(os.path.abspath(self.logfile))
if not os.path.isdir(dirname):
os.makedirs(dirname)
logfile = open(self.logfile, "w", encoding="utf-8")
suite_stop_time = time.time()
suite_time_delta = suite_stop_time - self.suite_start_time
numtests = (
self.stats["passed"]
+ self.stats["failure"]
+ self.stats["skipped"]
+ self.stats["error"]
- self.cnt_double_fail_tests
)
logfile.write('<?xml version="1.0" encoding="utf-8"?>')
logfile.write(
Junit.testsuite(
self._get_global_properties_node(),
[x.to_xml() for x in self.node_reporters_ordered],
name=self.suite_name,
errors=self.stats["error"],
failures=self.stats["failure"],
skipped=self.stats["skipped"],
tests=numtests,
time="%.3f" % suite_time_delta,
).unicode(indent=0)
)
logfile.close()
def pytest_terminal_summary(self, terminalreporter):
terminalreporter.write_sep("-", "generated xml file: %s" % (self.logfile))
def add_global_property(self, name, value):
__tracebackhide__ = True
_check_record_param_type("name", name)
self.global_properties.append((name, bin_xml_escape(value)))
def _get_global_properties_node(self):
"""Return a Junit node containing custom properties, if any.
"""
if self.global_properties:
return Junit.properties(
[
Junit.property(name=name, value=value)
for name, value in self.global_properties
]
)
return ""
|
{
"content_hash": "8355d28255efbeca7095571d56dd2d28",
"timestamp": "",
"source": "github",
"line_count": 692,
"max_line_length": 112,
"avg_line_length": 33.74277456647399,
"alnum_prop": 0.5644539614561028,
"repo_name": "ekwoodrich/python-dvrip",
"id": "ea33e606c30407ec3cfbdb3a0db9386d757a4a82",
"size": "23350",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "env/lib/python3.5/site-packages/_pytest/junitxml.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5706"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) 2017-2022 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
Writes a Dockerfile using information from container.yaml - the Dockerfile
results in an image with the actual filesystem tree we care about at
/var/tmp/flatpak-build. The Dockerfile will later be updated by the
flatpak_update_dockerfile plugin to have specifics from the composed module.
"""
from pathlib import Path
from typing import List
from atomic_reactor.dirs import BuildDir
from osbs.repo_utils import ModuleSpec
from atomic_reactor.constants import RELATIVE_REPOS_PATH, YUM_REPOS_DIR
from atomic_reactor.plugin import Plugin
from atomic_reactor.utils.flatpak_util import FlatpakUtil
from atomic_reactor.utils.rpm import rpm_qf_args
from atomic_reactor.util import is_flatpak_build
# /var/tmp/flatpak-build is the final image we'll turn into a Flaptak
# In order for 'dnf module enable' to work correctly, we need an
# /etc/os-release in the install root with the correct PLATFORM_ID
# for our base package set. To make that work, we install system-release
# into a *different* install root and copy /etc/os-release over.
#
# We also have to redo the addition of yum repos from the "pre_inject_yum_repo"
# plugin after first removing any yum repos in the base image - we want
# /only/ the yum repos from atomic_reactor, and nothing else.
DOCKERFILE_TEMPLATE = '''FROM {base_image}
LABEL name="{name}"
LABEL com.redhat.component="{component}"
LABEL version="{stream}"
LABEL release="@RELEASE@"
RUN rm -f {yum_repos_dir}*
ADD {relative_repos_path}* {yum_repos_dir}
ADD {includepkgs} /tmp/
RUN cat /tmp/atomic-reactor-includepkgs >> /etc/dnf/dnf.conf && \\
INSTALLDIR=/var/tmp/flatpak-build && \\
DNF='\\
dnf -y --nogpgcheck \\
' && \\
$DNF --installroot=$INSTALLDIR-init install system-release && \\
mkdir -p $INSTALLDIR/etc/ && \\
cp $INSTALLDIR-init/etc/os-release $INSTALLDIR/etc/os-release && \\
$DNF --installroot=$INSTALLDIR module enable @ENABLE_MODULES@ && \\
$DNF --installroot=$INSTALLDIR install @INSTALL_PACKAGES@
RUN rpm --root=/var/tmp/flatpak-build {rpm_qf_args} > /var/tmp/flatpak-build.rpm_qf
COPY {cleanupscript} /var/tmp/flatpak-build/tmp/
RUN chroot /var/tmp/flatpak-build/ /bin/sh /tmp/cleanup.sh
'''
FLATPAK_INCLUDEPKGS_FILENAME = 'atomic-reactor-includepkgs'
FLATPAK_CLEANUPSCRIPT_FILENAME = 'cleanup.sh'
class FlatpakCreateDockerfilePlugin(Plugin):
key = "flatpak_create_dockerfile"
is_allowed_to_fail = False
def __init__(self, workflow):
"""
constructor
:param workflow: DockerBuildWorkflow instance
"""
# call parent constructor
super(FlatpakCreateDockerfilePlugin, self).__init__(workflow)
self.default_base_image = self.workflow.conf.flatpak_base_image
def run(self):
"""
run the plugin
"""
if not is_flatpak_build(self.workflow):
self.log.info('not flatpak build, skipping plugin')
return
flatpak_util = FlatpakUtil(workflow_config=self.workflow.conf,
source_config=self.workflow.source.config,
composes=None)
source_spec = flatpak_util.get_flatpak_source_spec()
module_info = ModuleSpec.from_str(source_spec)
# Load additional information from the flatpak section
flatpak_yaml = self.workflow.source.config.flatpak
base_image = flatpak_yaml.get('base_image', self.default_base_image)
name = flatpak_yaml.get('name', module_info.name)
component = flatpak_yaml.get('component', module_info.name)
# Create the dockerfile
def _create_dockerfile(build_dir: BuildDir) -> List[Path]:
content = DOCKERFILE_TEMPLATE.format(name=name,
component=component,
cleanupscript=FLATPAK_CLEANUPSCRIPT_FILENAME,
includepkgs=FLATPAK_INCLUDEPKGS_FILENAME,
stream=module_info.stream.replace('-', '_'),
base_image=base_image,
relative_repos_path=RELATIVE_REPOS_PATH,
rpm_qf_args=rpm_qf_args(),
yum_repos_dir=YUM_REPOS_DIR)
build_dir.dockerfile_path.write_text(content, "utf-8")
return [build_dir.dockerfile_path]
created_files = self.workflow.build_dir.for_all_platforms_copy(_create_dockerfile)
dockerfile_path = created_files[0]
self.workflow.reset_dockerfile_images(str(dockerfile_path))
|
{
"content_hash": "1cebb914699aaaad76361a1f6f7b86cd",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 94,
"avg_line_length": 39.33064516129032,
"alnum_prop": 0.6456838220217347,
"repo_name": "projectatomic/atomic-reactor",
"id": "b9d3d380f354020f204c39ed06bea7fa9a809766",
"size": "4877",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "atomic_reactor/plugins/flatpak_create_dockerfile.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "1988"
},
{
"name": "Python",
"bytes": "1981631"
},
{
"name": "Shell",
"bytes": "8544"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, url, include
urlpatterns = patterns(
'',
url(r'^accounts/', include('django.contrib.auth.urls')),
)
|
{
"content_hash": "13ae9bf9951c8c913fd7b8a72da6ef45",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 60,
"avg_line_length": 24.666666666666668,
"alnum_prop": 0.6824324324324325,
"repo_name": "webkom/django-auth-abakus",
"id": "7ae483931c2be7fd784c5d19221c9ac99b5bae8f",
"size": "171",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "434"
},
{
"name": "Python",
"bytes": "14959"
}
],
"symlink_target": ""
}
|
class Searcher(object):
def __init__(self, index, ret_field):
self.index = index
self.ret_field = ret_field
def find(self, query, ret_field=None):
intersect_lists = []
ret_field = ret_field if ret_field is not None else self.ret_field
ret_field_id = self.index.field_keys[ret_field]
for term_id, constraints in query:
plist = self.index.load_plist(term_id)
if len(constraints) == 0:
intersect_lists.append(plist.fields[ret_field_id])
continue
candidates = []
for i in xrange(0, len(plist.fields[0])):
candidate_is_ok = True
for c_name, constraint in constraints:
c_field_id = self.index.field_keys[c_name]
if not constraint(plist.fields[c_field_id][i]):
candidate_is_ok = False
break
candidate = plist.fields[ret_field_id][i]
if candidate_is_ok:
candidates.append(candidate)
if len(candidates) == 0:
return []
intersect_lists.append(candidates)
last_candidates = set(intersect_lists[0])
for i in xrange(1, len(intersect_lists)):
new_candidates = set()
for j in xrange(len(intersect_lists[i])):
if intersect_lists[i][j] in last_candidates:
new_candidates.add(intersect_lists[i][j])
last_candidates = new_candidates
return last_candidates
def find_or(self, query):
results = dict()
for term_id, constraints in query:
found_documents = self.find([(term_id, constraints)])
for doc_id in found_documents:
if doc_id in results:
results[doc_id].append(term_id)
else:
results[doc_id] = [term_id]
return results
|
{
"content_hash": "d1d284960bbeb47d4255c95b67bf5329",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 74,
"avg_line_length": 33.6271186440678,
"alnum_prop": 0.5246975806451613,
"repo_name": "zaycev/metaphor-search",
"id": "92cdb14bea0da28ba2a0958332ec2428d5ae3765",
"size": "2050",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sear/sear/searcher.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "181383"
},
{
"name": "Shell",
"bytes": "7764"
}
],
"symlink_target": ""
}
|
import os
from config import Config
from flask import Flask
from flask_ask import Ask, request, session, question, statement
from werkzeug.contrib.fixers import ProxyFix
from unidecode import unidecode
import logging
from sl import SL
try:
from urllib.parse import quote_plus
except:
from urllib import quote_plus
config = Config()
app = Flask(__name__)
ask = Ask(app, "/")
logging.getLogger('flask_ask').setLevel(logging.DEBUG)
sl = SL(os.environ['SL_API_KEY'])
tts_host = os.environ.get('TTS_HOST')
def get_site_id(transporatation):
return os.environ.get('SL_%s_SITE_ID' % transporatation.upper())
@ask.launch
def launch():
speech_text = 'Bus or Metro?'
return question(speech_text).reprompt(speech_text).simple_card('SL', speech_text)
@ask.intent('SLRealTimeCityIntent')
def real_time_city(transportation):
sl.reset_filter()
if transportation in ('metro', 'subway'):
sl.metro = True
sl.journey_direction = 1
sl.site_id = get_site_id('metro')
else:
speech_text = "I only support metro with this quetion"
return statement(speech_text).simple_card('SL', speech_text)
return _generate_answer(transportation)
@ask.intent('SLRealTimeIntent')
def real_time(transportation):
sl.reset_filter()
if transportation in ('metro', 'subway'):
sl.metro = True
sl.site_id = get_site_id('metro')
elif transportation == 'bus':
sl.bus = True
sl.site_id = get_site_id('bus')
else:
speech_text = "Sorry I didn't catch what you asked for there, which transporatation did you want to go with. Bus or Metro?"
return question(speech_text).reprompt(speech_text).simple_card('SL', speech_text)
return _generate_answer(transportation)
@ask.intent('SLDeviationIntent')
def deviation(transportation):
sl.reset_filter()
if transportation in ('metro', 'subway'):
sl.metro = True
sl.site_id = get_site_id('metro')
elif transportation == 'bus':
sl.bus = True
sl.site_id = get_site_id('bus')
else:
speech_text = "Sorry I didn't catch which transportation you wanted there."
return statement(speech_text).simple_card('SL', speech_text)
result, deviations = sl.simple_list()
speech_text, card_text = _generate_deviation(deviations)
speech_text = '<speak>' + speech_text + '</speak>'
return statement(speech_text).simple_card('SL', card_text)
def _generate_deviation(deviations):
speech_reply = []
card_reply = []
if deviations and tts_host:
for d in deviations:
deviation = quote_plus(d['Deviation']['Text'].encode('utf-8'))
speech_reply.append('<s>%s <audio src="%s%s"/></s>' % (d['StopInfo']['TransportMode'].capitalize(),
tts_host, deviation))
card_reply.append('%s - %s' % (d['StopInfo']['TransportMode'].capitalize(), d['Deviation']['Text']))
elif deviations and not tts_host:
speech_reply.append('<s>There are some deviations right now.</s>')
for d in deviations:
deviation = quote_plus(d['Deviation']['Text'].encode('utf-8'))
card_reply.append('%s - %s' % (d['StopInfo']['TransportMode'].capitalize(), d['Deviation']['Text']))
else:
speech_reply.append(u'<s>There are no known deviations right now</s>')
card_reply = speech_reply
speech_text = ''.join(speech_reply)
card_text = '\n'.join(card_reply)
return speech_text, card_text
def _generate_answer(transportation):
result, deviations = sl.simple_list()
speech_reply = []
card_reply = []
if deviations:
st, ct = _generate_deviation(deviations)
speech_reply.append(st)
card_reply.append(ct)
if not result:
speech_reply.append(u'<s>I can not find any departures with the %s</s>' % transportation)
card_reply = speech_reply
speech_text = ''.join(speech_reply)
speech_text = '<speak>' + speech_text + '</speak>'
card_text = '\n'.join(card_reply)
return statement(speech_text).simple_card('SL', card_text)
for r in result:
r['transportation'] = transportation
if tts_host:
destination = quote_plus(r['destination'].encode('utf-8'))
r['speech_destination'] = '<audio src="%s%s"/>' % (tts_host, destination)
else:
r['speech_destination'] = r['destination']
if transportation == 'bus':
r['speech_destination'] = '%(line_number)s to ' % r + r['speech_destination']
else:
r['speech_destination'] = r['speech_destination']
cnt = len(speech_reply)
if deviations:
cnt -= 1
if cnt < 3:
if cnt == 0:
speech_reply.append(u'<s>The next %(transportation)s %(speech_destination)s will depart %(time_left)s</s>' % r)
if cnt == 1:
speech_reply.append(u'<s>Followed by %(speech_destination)s %(time_left)s</s>' % r)
if cnt > 1:
speech_reply.append(u'<s>%(speech_destination)s %(time_left)s</s>' % r)
card_reply.append(u'%(transport_type)s %(line_number)s to %(destination)s will depart %(time_left)s.' % r)
speech_text = ''.join(speech_reply)
speech_text = '<speak>' + speech_text + '</speak>'
card_text = '\n'.join(card_reply)
return statement(unidecode(speech_text)).simple_card('Next %s' % transportation, card_text)
@ask.intent('AMAZON.HelpIntent')
def help():
speech_text = 'You can ask me when the bus or subway goes. For example, When does the next bus go?'
return question(speech_text).reprompt(speech_text).simple_card('SL', speech_text)
@ask.session_ended
def session_ended():
return "", 200
if __name__ == '__main__':
# Be sure to set config.debug_mode to False in production
port = int(os.environ.get("PORT", config.port))
if port != config.port:
config.debug = False
app.wsgi_app = ProxyFix(app.wsgi_app)
app.run(host='0.0.0.0', debug=config.debug_mode, port=port)
|
{
"content_hash": "262d927a11096206d5d967338f4fb13e",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 131,
"avg_line_length": 34.78977272727273,
"alnum_prop": 0.6178343949044586,
"repo_name": "clearminds/alexa-sl",
"id": "ebc2df8bc617725d4f1b5413fe9b00077a98ba80",
"size": "6147",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11406"
}
],
"symlink_target": ""
}
|
problem = """
We shall say that an n-digit number is pandigital if it makes use of all the digits 1 to n exactly once. For example, 2143 is a 4-digit pandigital and is also prime.
What is the largest n-digit pandigital prime that exists?
"""
from itertools import permutations, imap
from math import sqrt
primes = set()
composites = set([1])
upper_bound = int(sqrt(987654321))
for i in range(2, upper_bound+1):
if i in composites:
continue
primes.add(i)
for j in range(2*i, upper_bound+1, i):
composites.add(j)
def is_prime(p):
if p <= 1:
return False
if p in primes:
return True
for d in primes:
if d > p:
break
if p % d == 0:
return False
return True
def pandigital_primes():
for n in range(9, 1, -1):
for p in imap(lambda p: int(''.join(map(str, p))), permutations(range(n,0,-1))):
if is_prime(p):
yield p
print next(pandigital_primes())
|
{
"content_hash": "ee109bed771648b5f95a5b29765ebd1b",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 165,
"avg_line_length": 25.333333333333332,
"alnum_prop": 0.6052631578947368,
"repo_name": "lorenyu/project-euler",
"id": "a9de5dcf9eb46750d24eb254f6f8ae78dc60c08a",
"size": "988",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "problem-041.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "51438"
}
],
"symlink_target": ""
}
|
from pool import physics
class TestTriangleArea():
def test_triangle_area1(self):
assert physics.triangle_area(1, 1, 0) == 0
def test_triangle_area2(self):
assert physics.triangle_area(3, 4, 5) == 0.5 * 3 * 4
|
{
"content_hash": "43754c2895722ae4c6e7e646e568205d",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 60,
"avg_line_length": 29.375,
"alnum_prop": 0.6425531914893617,
"repo_name": "max-kov/pool",
"id": "9c476bbe7b20c28ca6580fed024b1989552705b9",
"size": "235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_triangle_area.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "56164"
},
{
"name": "Shell",
"bytes": "871"
}
],
"symlink_target": ""
}
|
import argparse
import math
import tkinter as tk
import tkinter.messagebox as tk_messagebox
import PIL.ImageTk
import PIL.Image
import numpy as np
IMAGE_SIZE = (600, 400)
class ImageManipulationWindow:
def __init__(self, image_path):
self.root = tk.Tk()
self.root.grid_columnconfigure(3, weight=1)
self.root.grid_columnconfigure(7, weight=1)
self.image = PIL.Image.open(image_path).convert('L')
self.label_original = tk.Label(self.root)
self.label_original.grid(row=0, column=0, columnspan=4)
self.label_modified = tk.Label(self.root)
self.label_modified.grid(row=0, column=4, columnspan=4)
self.set_original_image(self.image)
self.set_modified_image(self.image)
self.setup_pixelwise_operation_controls()
self.setup_thresholding_operation_controls()
def setup_pixelwise_operation_controls(self):
self.add_label = tk.Label(self.root, text='Add')
self.add_label.grid(row=1, column=0, sticky='W')
self.add_entry = tk.Entry(self.root)
self.add_entry.grid(row=1, column=1, sticky='W')
self.add_button = tk.Button(self.root, text='Perform',
command=self.perform_addition)
self.add_button.grid(row=1, column=2, sticky='W')
self.multiply_label = tk.Label(self.root, text='Multiply')
self.multiply_label.grid(row=2, column=0, sticky='W')
self.multiply_entry = tk.Entry(self.root)
self.multiply_entry.grid(row=2, column=1, sticky='W')
self.multiply_button = tk.Button(self.root, text='Perform',
command=self.perform_multiplication)
self.multiply_button.grid(row=2, column=2, sticky='W')
self.exponentiate_label = tk.Label(self.root, text='Exponentiate')
self.exponentiate_label.grid(row=3, column=0, sticky='W')
self.exponentiate_entry = tk.Entry(self.root)
self.exponentiate_entry.grid(row=3, column=1, sticky='W')
self.exponentiate_button = tk.Button(
self.root, text='Perform', command=self.perform_exponentiation
)
self.exponentiate_button.grid(row=3, column=2, sticky='W')
self.logarithm_label = tk.Label(self.root, text='Logarithm')
self.logarithm_label.grid(row=4, column=0, sticky='W')
self.logarithm_button = tk.Button(
self.root, text='Perform', command=self.perform_logarithm
)
self.logarithm_button.grid(row=4, column=1, sticky='W')
self.negation_label = tk.Label(self.root, text='Negate')
self.negation_label.grid(row=5, column=0, sticky='W')
self.negation_button = tk.Button(
self.root, text='Perform', command=self.perform_negation
)
self.negation_button.grid(row=5, column=1, sticky='W')
self.contrast_label = tk.Label(self.root, text='Contrast')
self.contrast_label.grid(row=6, column=0, sticky='W')
self.contrast_button = tk.Button(
self.root, text='Perform', command=self.perform_contrasting
)
self.contrast_button.grid(row=6, column=1, sticky='W')
self.reset_button = tk.Button(
self.root, text='Reset', command=self.perform_reset
)
self.reset_button.grid(row=7, column=0, sticky='W')
def setup_thresholding_operation_controls(self):
self.bernsen_label = tk.Label(
self.root, text='Bernsen thresholding'
)
self.bernsen_label.grid(row=1, column=4, sticky='W')
self.bernsen_button = tk.Button(
self.root, text='Perform',
command=self.perform_bernsen
)
self.bernsen_button.grid(row=1, column=5, sticky='W')
self.niblack_label = tk.Label(
self.root, text='Niblack thresholding'
)
self.niblack_label.grid(row=2, column=4, sticky='W')
self.niblack_button = tk.Button(
self.root, text='Perform',
command=self.perform_niblack
)
self.niblack_button.grid(row=2, column=5, sticky='W')
self.adaptive_label = tk.Label(
self.root, text='Adaptive thresholding'
)
self.adaptive_label.grid(row=3, column=4, sticky='W')
self.adaptive_button = tk.Button(
self.root, text='Perform',
command=self.perform_adaptive
)
self.adaptive_button.grid(row=3, column=5, sticky='W')
def set_original_image(self, image):
self._scaled_tk_image_original = PIL.ImageTk.PhotoImage(
self.scale_image(image)
)
self.label_original.config(image=self._scaled_tk_image_original)
def set_modified_image(self, image):
self.modified_image = image
self._scaled_tk_image_modified = PIL.ImageTk.PhotoImage(
self.scale_image(image)
)
self.label_modified.config(image=self._scaled_tk_image_modified)
def perform_addition(self):
try:
argument = float(self.add_entry.get())
except ValueError:
tk_messagebox.showerror('Error', 'Invalid argument')
return
original_pixels = list(self.modified_image.getdata())
modified_pixels = [
self.normalize_color_value(original_value + argument)
for original_value in original_pixels
]
self.show_modified_pixels(modified_pixels)
def perform_multiplication(self):
try:
argument = float(self.multiply_entry.get())
except ValueError:
tk_messagebox.showerror('Error', 'Invalid argument')
return
original_pixels = list(self.modified_image.getdata())
modified_pixels = [
self.normalize_color_value(original_value * argument)
for original_value in original_pixels
]
self.show_modified_pixels(modified_pixels)
def perform_exponentiation(self):
try:
argument = float(self.exponentiate_entry.get())
except ValueError:
tk_messagebox.showerror('Error', 'Invalid argument')
return
_, max_value = self.modified_image.getextrema()
original_pixels = list(self.modified_image.getdata())
modified_pixels = [
self.normalize_color_value(
255 * (original_value / max_value) ** argument
)
for original_value in original_pixels
]
self.show_modified_pixels(modified_pixels)
def perform_logarithm(self):
_, max_value = self.modified_image.getextrema()
original_pixels = list(self.modified_image.getdata())
modified_pixels = [
self.normalize_color_value(
255 * (math.log(original_value + 1) / math.log(max_value + 1))
)
for original_value in original_pixels
]
self.show_modified_pixels(modified_pixels)
def perform_negation(self):
original_pixels = list(self.modified_image.getdata())
modified_pixels = [
self.normalize_color_value(255 - original_value)
for original_value in original_pixels
]
self.show_modified_pixels(modified_pixels)
def perform_contrasting(self):
min_value, max_value = self.modified_image.getextrema()
coeff = 255 / (max_value - min_value)
original_pixels = list(self.modified_image.getdata())
modified_pixels = [
self.normalize_color_value(
coeff * (original_value - min_value)
)
for original_value in original_pixels
]
self.show_modified_pixels(modified_pixels)
def perform_bernsen(self):
r = 5
min_contrast = 15
pixel_array = np.array(self.image)
vertical_split = np.split(pixel_array, range(r, self.image.height, r))
segments = [
np.split(rows, range(r, self.image.width, r), axis=1)
for rows in vertical_split
]
for segment_row in segments:
for segment in segment_row:
min_value = int(segment.min())
max_value = int(segment.max())
mid_value = (min_value + max_value) / 2
if max_value - min_value <= min_contrast:
if mid_value < 128:
fill_value = 0
else:
fill_value = 255
segment.fill(fill_value)
else:
for i in range(segment.shape[0]):
for j in range(segment.shape[1]):
segment[i, j] = (
0 if segment[i, j] < mid_value else 255
)
vertical_split = [
np.concatenate(segment_row, axis=1)
for segment_row in segments
]
modified_pixels = np.concatenate(vertical_split)
modified_image = PIL.Image.fromarray(modified_pixels)
self.set_modified_image(modified_image)
def perform_niblack(self):
r = 15
k = -0.2
def clipped_range(dimension_size, coord, window):
radius = (window - 1) // 2
min_coord = max(0, coord - radius)
max_coord = min(dimension_size, coord + radius + 1)
return slice(min_coord, max_coord)
pixel_array = np.array(self.image)
new_array = pixel_array.copy()
for i in range(pixel_array.shape[0]):
print(i)
for j in range(pixel_array.shape[1]):
vertical_slice = clipped_range(pixel_array.shape[0], i, r)
horizontal_slice = clipped_range(pixel_array.shape[1], j, r)
segment = pixel_array[vertical_slice, horizontal_slice]
mean = np.mean(segment)
stddev = np.std(segment)
threshold = mean + k * stddev
new_array[i, j] = (
0 if pixel_array[i, j] < threshold else 255
)
modified_image = PIL.Image.fromarray(new_array)
self.set_modified_image(modified_image)
def perform_adaptive(self):
k = 3
alpha = 2 / 3
def clipped_range(dimension_size, coord, radius):
min_coord = max(0, coord - radius)
max_coord = min(dimension_size, coord + radius + 1)
return slice(min_coord, max_coord)
pixel_array = np.array(self.image)
new_array = pixel_array.copy()
for i in range(pixel_array.shape[0]):
print(i)
for j in range(pixel_array.shape[1]):
current_k = k
threshold = None
while True:
vertical_slice = clipped_range(
pixel_array.shape[0], i, current_k
)
horizontal_slice = clipped_range(
pixel_array.shape[1], j, current_k
)
segment = pixel_array[vertical_slice, horizontal_slice]
mean = np.mean(segment)
min_value = segment.min()
max_value = segment.max()
df_max = abs(max_value - mean)
df_min = abs(min_value - mean)
if df_max == df_min:
if min_value != max_value:
current_k += 1
continue
else:
threshold = alpha * mean
break
else:
if df_max > df_min:
threshold = alpha * (
2 / 3 * min_value +
1 / 3 * mean
)
else:
threshold = alpha * (
1 / 3 * min_value +
2 / 3 * mean
)
break
new_array[i, j] = (
0 if pixel_array[i, j] < threshold else 255
)
modified_image = PIL.Image.fromarray(new_array)
self.set_modified_image(modified_image)
def perform_reset(self):
self.set_modified_image(self.image)
def show_modified_pixels(self, modified_pixels):
modified_image = PIL.Image.new(
'L', (self.image.width, self.image.height)
)
modified_image.putdata(modified_pixels)
self.set_modified_image(modified_image)
@staticmethod
def normalize_color_value(value):
value = round(value)
if value > 255:
value = 255
elif value < 0:
value = 0
return value
@staticmethod
def scale_image(image):
scaled_image = image.copy()
scaled_image.thumbnail(IMAGE_SIZE)
return scaled_image
def main():
parser = argparse.ArgumentParser()
parser.add_argument('image_path', type=str, help='Path to the image file')
args = parser.parse_args()
window = ImageManipulationWindow(args.image_path)
window.root.mainloop()
if __name__ == '__main__':
main()
|
{
"content_hash": "9384cf9c8743b19cd9f6f9207579f92a",
"timestamp": "",
"source": "github",
"line_count": 376,
"max_line_length": 78,
"avg_line_length": 35.398936170212764,
"alnum_prop": 0.549060856498873,
"repo_name": "CG2016/barkovsky_3",
"id": "5a44f5cb824ceeacf106292eb6658e78b89d0dc7",
"size": "13332",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lab6/manipulation.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "810"
},
{
"name": "HTML",
"bytes": "3707"
},
{
"name": "JavaScript",
"bytes": "66031"
},
{
"name": "Python",
"bytes": "45966"
}
],
"symlink_target": ""
}
|
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import NoSuchElementException
from credentials import edx_username, edx_password
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from login import login_to_edx
import json
import pprint
from datetime import datetime
import urllib2
def enroll_in(course_list):
for course in course_list:
try:
driver.get(course['url'])
frame = ""
for registration_frame in driver.find_elements_by_class_name('iframe-register'):
if registration_frame.is_displayed():
frame = registration_frame
driver.switch_to_frame(frame)
try:
registration_button = driver.find_element_by_class_name("action-register")
is_verified_course ='has-option-verified' in registration_button.get_attribute('class')
WebDriverWait(driver,10).until(EC.visibility_of(registration_button))
registration_button.click()
#ActionChains(driver).move_to_element(registration_button).click(registration_button).perform()
if is_verified_course:
element = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.NAME, "honor_mode"))
)
driver.find_element_by_name("honor_mode").click()
print "---> Registered for course: " + course['url']
except NoSuchElementException:
# Either "access courseware" or "you are registered" depending on whether the course is open
print "Already registered for course: " + course['url']
except Exception as e:
print "!! Error enrolling"
print str(type(e)) + ":" + str(e)
avail = ""
if 'Availability' in course:
avail = course['Availability']
elif 'availability' in course:
avail = course['availability']
#if avail != 'Starting Soon' and avail != 'Upcoming':
if __name__ == "__main__":
driver = webdriver.Firefox()
login_to_edx(driver, edx_username, edx_password)
pp = pprint.PrettyPrinter(indent=4)
today = datetime.today().date()
webjson = urllib2.urlopen("https://www.edx.org/search/api/all")
course_index = json.load(webjson)
course_index = [course for course in course_index if 'HarvardX' in course['schools']]
enroll_in(course_index)
driver.quit()
|
{
"content_hash": "bda006d44a1670b82cd323830dca7e18",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 101,
"avg_line_length": 37.015625,
"alnum_prop": 0.6884761502743774,
"repo_name": "Lyla-Fischer/get_edx_data",
"id": "665f7fbf4693de11bb484c3286377f268d354372",
"size": "2488",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "edx_register.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15040"
}
],
"symlink_target": ""
}
|
"""
The MIT License (MIT)
Copyright (c) 2016 xdei (https://github.com/xdei/Phobos)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
level=logging.DEBUG
)
logger = logging.getLogger('Phobos')
|
{
"content_hash": "db85c83577efa6f04a47b175951bd9d9",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 78,
"avg_line_length": 42.41935483870968,
"alnum_prop": 0.7688212927756654,
"repo_name": "xdei/Phobos",
"id": "4985c0eeb079eab77f53a896817b573e173c3b69",
"size": "1339",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/logger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23202"
}
],
"symlink_target": ""
}
|
class Model(object):
def __init__(self, *args, **kwargs):
for k, value in kwargs.items():
if k in dir(self):
setattr(self, k, value)
@classmethod
def to_instance(cls, json):
return cls(**json)
def to_json(self):
return_json = {}
for field_name in sorted(self.columns()):
value = getattr(self, field_name)
if value:
return_json[field_name] = value
return return_json
@classmethod
def columns(cls):
raise NotImplementedError()
@classmethod
def find(cls, *args, **kwargs):
raise NotImplementedError()
@classmethod
def get(cls, id=None):
raise NotImplementedError()
def update(self, dict_fields_to_update):
return self.__save()
def put(self):
result = self.__save()
if self._hasnt_hooks():
return result
if result:
for hook in self.hooks:
instance = hook()
# this is to make AND with the result of all hooks
# the flow just continue if the result of all hoks is true
instance.after_save(self)
return result
def __save(self):
if self._hasnt_hooks():
return True
all_passed = True
hooks_didnt_passed = []
for hook in self.hooks:
instance = hook()
# this is to make AND with the result of all hooks
# the flow just continue if the result of all hoks is true
if not instance.before_save(self):
all_passed = False
hooks_didnt_passed.append(instance.__class__.__name__)
if not all_passed:
raise Exception("The hook(s) %s.before_save didnt return True" % ", ".join(hooks_didnt_passed))
return True
def delete(self):
if self._hasnt_hooks():
return True
all_passed = True
hooks_didnt_passed = []
for hook in self.hooks:
instance = hook()
# this is to make AND with the result of all hoks
# the flow just continue if the result of all hoks is true
if not instance.before_delete(self):
all_passed = False
hooks_didnt_passed.append(instance.__class__.__name__)
if not all_passed:
raise Exception("The hook(s) %s.before_delete didnt return True" % ", ".join(hooks_didnt_passed))
return True
def _hasnt_hooks(self):
return not hasattr(self, 'hooks')
|
{
"content_hash": "7d522c1989b9627469a5b5a1d75d2740",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 109,
"avg_line_length": 25.95,
"alnum_prop": 0.54373795761079,
"repo_name": "felipevolpone/ray",
"id": "3578ffdb0b535a83ae672ea66c95439e20c9fab2",
"size": "2596",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ray-core/ray/model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "77888"
},
{
"name": "Shell",
"bytes": "493"
}
],
"symlink_target": ""
}
|
"""
Recurrent layers.
TODO: write more documentation
"""
__docformat__ = 'restructedtext en'
__authors__ = ("Razvan Pascanu "
"KyungHyun Cho "
"Caglar Gulcehre ")
__contact__ = "Razvan Pascanu <r.pascanu@gmail>"
import numpy
import copy
import theano
import theano.tensor as TT
# Nicer interface of scan
from theano.sandbox.scan import scan
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from groundhog import utils
from groundhog.utils import sample_weights, \
sample_weights_classic,\
sample_weights_orth, \
init_bias, \
constant_shape, \
sample_zeros
from basic import Layer
class RecurrentMultiLayer(Layer):
"""
Constructs a recurrent layer whose transition from h_tm1 to h_t is given
by an MLP or logistic regression. In our ICLR submission this is a
DT-RNN model.
"""
def __init__(self,
rng,
n_hids=[500,500],
activation = [TT.tanh, TT.tanh],
scale=.01,
sparsity = -1,
activ_noise=0.,
weight_noise=False,
dropout = 1.,
init_fn='sample_weights',
bias_fn='init_bias',
bias_scale = 0.,
grad_scale = 1.,
profile = 0,
name=None):
"""
:type rng: numpy random generator
:param rng: numpy random generator
:type n_in: int
:param n_in: number of inputs units
:type n_hids: list of ints
:param n_hids: Number of hidden units on each layer of the MLP
:type activation: string/function or list of
:param activation: Activation function for the embedding layers. If
a list it needs to have a value for each layer. If not, the same
activation will be applied to all layers
:type scale: float or list of
:param scale: depending on the initialization function, it can be
the standard deviation of the Gaussian from which the weights
are sampled or the largest singular value. If a single value it
will be used for each layer, otherwise it has to have one value
for each layer
:type sparsity: int or list of
:param sparsity: if a single value, it will be used for each layer,
otherwise it has to be a list with as many values as layers. If
negative, it means the weight matrix is dense. Otherwise it
means this many randomly selected input units are connected to
an output unit
:type weight_noise: bool
:param weight_noise: If true, the model is used with weight noise
(and the right shared variable are constructed, to keep track of the
noise)
:type dropout: float
:param dropout: the probability with which hidden units are dropped
from the hidden layer. If set to 1, dropout is not used
:type init_fn: string or function
:param init_fn: function used to initialize the weights of the
layer. We recommend using either `sample_weights_classic` or
`sample_weights` defined in the utils
:type bias_fn: string or function
:param bias_fn: function used to initialize the biases. We recommend
using `init_bias` defined in the utils
:type bias_scale: float
:param bias_scale: argument passed to `bias_fn`, depicting the scale
of the initial bias
:type grad_scale: float or theano scalar
:param grad_scale: factor with which the gradients with respect to
the parameters of this layer are scaled. It is used for
differentiating between the different parameters of a model.
:type name: string
:param name: name of the layer (used to name parameters). NB: in
this library names are very important because certain parts of the
code relies on name to disambiguate between variables, therefore
each layer should have a unique name.
"""
self.grad_scale = grad_scale
if type(n_hids) not in (list, tuple):
n_hids = [n_hids]
n_layers = len(n_hids)
if type(scale) not in (list, tuple):
scale = [scale] * n_layers
if type(sparsity) not in (list, tuple):
sparsity = [sparsity] * n_layers
for idx, sp in enumerate(sparsity):
if sp < 0: sparsity[idx] = n_hids[idx]
if type(activation) not in (list, tuple):
activation = [activation] * n_layers
if type(bias_scale) not in (list, tuple):
bias_scale = [bias_scale] * (n_layers-1)
if type(bias_fn) not in (list, tuple):
bias_fn = [bias_fn] * (n_layers-1)
if type(init_fn) not in (list, tuple):
init_fn = [init_fn] * n_layers
for dx in xrange(n_layers):
if dx < n_layers-1:
if type(bias_fn[dx]) is str or type(bias_fn[dx]) is unicode:
bias_fn[dx] = eval(bias_fn[dx])
if type(init_fn[dx]) is str or type(init_fn[dx]) is unicode:
init_fn[dx] = eval(init_fn[dx])
if type(activation[dx]) is str or type(activation[dx]) is unicode:
activation[dx] = eval(activation[dx])
self.scale = scale
self.n_layers = n_layers
self.sparsity = sparsity
self.activation = activation
self.n_hids = n_hids
self.bias_scale = bias_scale
self.bias_fn = bias_fn
self.init_fn = init_fn
self.weight_noise = weight_noise
self.activ_noise = activ_noise
self.profile = profile
self.dropout = dropout
assert rng is not None, "random number generator should not be empty!"
super(RecurrentMultiLayer, self).__init__(n_hids[0],
n_hids[-1],
rng,
name)
self.trng = RandomStreams(self.rng.randint(int(1e6)))
self.params = []
self._init_params()
def _init_params(self):
self.W_hhs = []
self.b_hhs = []
for dx in xrange(self.n_layers):
W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_hhs.append(theano.shared(value=W_hh, name="W%d_%s" %
(dx,self.name)))
if dx > 0:
self.b_hhs.append(theano.shared(
self.bias_fn[dx-1](self.n_hids[dx],
self.bias_scale[dx-1],
self.rng),
name='b%d_%s' %(dx, self.name)))
self.params = [x for x in self.W_hhs] + [x for x in self.b_hhs]
self.params_grad_scale = [self.grad_scale for x in self.params]
if self.weight_noise:
self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]
self.nb_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_hhs]
self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nb_hhs]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape)
for x in self.noise_params]
def step_fprop(self,
state_below,
mask=None,
dpmask=None,
state_before=None,
init_state=None,
use_noise=True,
no_noise_bias=False):
"""
Constructs the computational graph of a single step of the recurrent
layer.
:type state_below: theano variable
:param state_below: the input to the layer
:type mask: None or theano variable
:param mask: mask describing the length of each sequence in a
minibatch
:type state_before: theano variable
:param state_before: the previous value of the hidden state of the
layer
:type use_noise: bool
:param use_noise: flag saying if weight noise should be used in
computing the output of this layer
:type no_noise_bias: bool
:param no_noise_bias: flag saying if weight noise should be added to
the bias as well
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hhs = [(x+y) for x, y in zip(self.W_hhs, self.nW_hhs)]
if not no_noise_bias:
b_hhs = [(x+y) for x, y in zip(self.b_hhs, self.nb_hss)]
else:
b_hhs = self.b_hhs
else:
W_hhs = self.W_hhs
b_hhs = self.b_hhs
preactiv = TT.dot(state_before, W_hhs[0]) +state_below
h = self.activation[0](preactiv)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval +=[h]
for dx in xrange(1, self.n_layers):
preactiv = TT.dot(h, W_hhs[dx]) + b_hhs[dx-1]
h = self.activation[dx](preactiv)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
rval[-1] = h
return rval
def fprop(self,
state_below,
mask=None,
init_state=None,
n_steps=None,
batch_size=None,
use_noise=True,
truncate_gradient=-1,
no_noise_bias = False):
"""
Evaluates the forward through a recurrent layer
:type state_below: theano variable
:param state_below: the input of the recurrent layer
:type mask: None or theano variable
:param mask: mask describing the length of each sequence in a
minibatch
:type init_state: theano variable or None
:param init_state: initial state for the hidden layer
:type n_steps: None or int or theano scalar
:param n_steps: Number of steps the recurrent netowrk does
:type batch_size: int
:param batch_size: the size of the minibatch over which scan runs
:type use_noise: bool
:param use_noise: flag saying if weight noise should be used in
computing the output of this layer
:type truncate_gradient: int
:param truncate_gradient: If negative, no truncation is used,
otherwise truncated BPTT is used, where you go backwards only this
amount of steps
:type no_noise_bias: bool
:param no_noise_bias: flag saying if weight noise should be added to
the bias as well
"""
if theano.config.floatX=='float32':
floatX = numpy.float32
else:
floatX = numpy.float64
if n_steps is None:
n_steps = state_below.shape[0]
if batch_size and batch_size != 1:
n_steps = n_steps / batch_size
if batch_size is None and state_below.ndim == 3:
batch_size = state_below.shape[1]
if state_below.ndim == 2 and \
(not isinstance(batch_size,int) or batch_size > 1):
state_below = state_below.reshape((n_steps, batch_size, self.n_in))
if not init_state:
if not isinstance(batch_size, int) or batch_size != 1:
init_state = TT.alloc(floatX(0), batch_size, self.n_hids[0])
else:
init_state = TT.alloc(floatX(0), self.n_hids[0])
if mask:
inps = [state_below, mask]
fn = lambda x,y,z : self.step_fprop(x,y,None, z, use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
inps = [state_below]
fn = lambda tx, ty: self.step_fprop(tx, None, None, ty,
use_noise=use_noise,
no_noise_bias=no_noise_bias)
if self.dropout < 1. and use_noise:
# build dropout mask outside scan
allhid = numpy.sum(self.n_hids)
shape = state_below.shape
if state_below.ndim == 3:
alldpmask = self.trng.binomial(
(n_steps, batch_size, allhid),
n = 1, p = self.dropout, dtype=state_below.dtype)
else:
alldpmask = self.trng.binomial(
(n_steps, allhid),
n = 1, p = self.dropout, dtype=state_below.dtype)
inps.append(alldpmask)
if mask:
fn = lambda x,y,z,u : self.step_fprop(x,y,z,u,use_noise=use_noise)
else:
fn = lambda tx, ty, tu: self.step_fprop(tx,None,ty,tu,
use_noise=use_noise)
rval, updates = theano.scan(fn,
sequences = inps,
outputs_info = [None]*(self.n_layers-1) +
[init_state],
name='layer_%s'%self.name,
profile=self.profile,
truncate_gradient = truncate_gradient,
n_steps = n_steps)
if not isinstance(rval,(list, tuple)):
rval = [rval]
new_h = rval[-1]
self.out = rval[-1]
self.rval = rval
self.updates =updates
return self.out
class RecurrentMultiLayerInp(RecurrentMultiLayer):
"""
Similar to the RecurrentMultiLayer, with the exception that the input is
fed into the top layer of the MLP (rather than being an input to the
MLP).
"""
def _init_params(self):
self.W_hhs = []
self.b_hhs = []
for dx in xrange(self.n_layers):
W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_hhs.append(theano.shared(value=W_hh, name="W%d_%s" %
(dx,self.name)))
if dx < self.n_layers-1:
self.b_hhs.append(theano.shared(
self.bias_fn[dx](self.n_hids[dx],
self.bias_scale[dx],
self.rng),
name='b%d_%s' %(dx, self.name)))
self.params = [x for x in self.W_hhs] + [x for x in self.b_hhs]
self.params_grad_scale = [self.grad_scale for x in self.params]
self.restricted_params = [x for x in self.params]
if self.weight_noise:
self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]
self.nb_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_hhs]
self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nb_hhs]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape)
for x in self.noise_params]
def step_fprop(self,
state_below,
mask=None,
dpmask=None,
state_before=None,
no_noise_bias=False,
use_noise=True):
"""
See parent class
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hhs = [(x+y) for x, y in zip(self.W_hhs,self.nW_hss)]
if not no_noise_bias:
b_hhs = [(x+y) for x, y in zip(self.b_hhs,self.nb_hhs)]
else:
b_hhs = self.b_hhs
else:
W_hhs = self.W_hhs
b_hhs = self.b_hhs
h = self.activation[0](TT.dot(state_before,
W_hhs[0])+b_hhs[0])
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval += [h]
for dx in xrange(1, self.n_layers-1):
h = self.activation[dx](TT.dot(h,
W_hhs[dx])+b_hhs[dx])
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
h = self.activation[-1](TT.dot(h, W_hhs[-1]) + state_below)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
rval[-1] = h
return rval
class RecurrentMultiLayerShortPath(RecurrentMultiLayer):
"""
A similar layer to RecurrentMultiLayer (the DT-RNN), with the difference
that we have shortcut connections in the MLP representing the transition
from previous hidden state to the next
"""
def _init_params(self):
self.W_hhs = []
self.b_hhs = []
self.W_shortp = []
for dx in xrange(self.n_layers):
W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_hhs.append(theano.shared(value=W_hh, name="W%d_%s" %
(dx,self.name)))
if dx > 0:
W_shp = self.init_fn[dx](self.n_hids[self.n_layers-1],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_shortp.append(theano.shared(value=W_shp,
name='W_s%d_%s'%(dx,self.name)))
self.b_hhs.append(theano.shared(
self.bias_fn[dx-1](self.n_hids[dx],
self.bias_scale[dx-1],
self.rng),
name='b%d_%s' %(dx, self.name)))
self.params = [x for x in self.W_hhs] + [x for x in self.b_hhs] +\
[x for x in self.W_shortp]
self.params_grad_scale = [self.grad_scale for x in self.params]
self.restricted_params = [x for x in self.params]
if self.weight_noise:
self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]
self.nb_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_hhs]
self.nW_shortp = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_shortp]
self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nb_hhs] + [x for x in self.nW_shortp]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape) for x in self.noise_params]
def step_fprop(self,
state_below,
mask=None,
dpmask=None,
state_before=None,
no_noise_bias=False,
use_noise=True):
"""
See parent class
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hhs = [(x+y) for x, y in zip(self.W_hhs,self.nW_hhs)]
if not no_noise_bias:
b_hhs = [(x+y) for x, y in zip(self.b_hhs,self.nb_hhs)]
else:
b_hhs = self.b_hhs
W_shp = [(x+y) for x, y in zip(self.W_shortp,self.nW_shortp)]
else:
W_hhs = self.W_hhs
b_hhs = self.b_hhs
W_shp = self.W_shortp
h = self.activation[0](TT.dot(state_before,
W_hhs[0])+state_below)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval += [h]
for dx in xrange(1, self.n_layers):
h = self.activation[dx](TT.dot(h,
W_hhs[dx])+
TT.dot(state_before,
W_shp[dx-1])+b_hhs[dx-1])
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
rval[-1] = h
return rval
class RecurrentMultiLayerShortPathInp(RecurrentMultiLayer):
"""
Similar to the RecurrentMultiLayerShortPath class, just that the input
is fed into the last layer of the MLP (similar to
RecurrentMultiLayerInp).
"""
def _init_params(self):
self.W_hhs = []
self.b_hhs = []
self.W_shortp = []
for dx in xrange(self.n_layers):
W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_hhs.append(theano.shared(value=W_hh, name="W%d_%s" %
(dx,self.name)))
if dx > 0:
W_shp = self.init_fn[dx](self.n_hids[self.n_layers-1],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_shortp.append(theano.shared(value=W_shp,
name='W_s%d_%s'%(dx,self.name)))
if dx < self.n_layers-1:
self.b_hhs.append(theano.shared(
self.bias_fn[dx](self.n_hids[dx],
self.bias_scale[dx],
self.rng),
name='b%d_%s' %(dx, self.name)))
self.params = [x for x in self.W_hhs] + [x for x in self.b_hhs] +\
[x for x in self.W_shortp]
self.restricted_params = [x for x in self.params]
self.params_grad_scale = [self.grad_scale for x in self.params]
if self.weight_noise:
self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]
self.nb_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_hhs]
self.nW_shortp = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_shortp]
self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nb_hhs] + [x for x in self.nW_shortp]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape) for x in self.noise_params]
def step_fprop(self,
state_below,
mask=None,
dpmask=None,
state_before=None,
no_noise_bias=False,
use_noise=True):
"""
See parent class
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hhs = [(x+y) for x, y in zip(self.W_hhs, self.nW_hhs)]
if not no_noise_bias:
b_hhs = [(x+y) for x, y in zip(self.b_hhs, self.nb_hhs)]
else:
b_hhs = self.b_hhs
W_shp = [(x+y) for x, y in zip(self.W_shortp, self.nW_shortp)]
else:
W_hhs = self.W_hhs
b_hhs = self.b_hhs
W_shp = self.W_shortp
h = self.activation[0](TT.dot(state_before,
W_hhs[0])+b_hhs[0])
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval += [h]
for dx in xrange(1, self.n_layers-1):
h = self.activation[dx](TT.dot(h,
W_hhs[dx])+
TT.dot(state_before,
W_shp[dx-1])+b_hhs[dx])
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
h = self.activation[-1](TT.dot(h, W_hhs[-1]) +
TT.dot(state_before, W_shp[-1])+state_below)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval +=[h]
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
rval += [h]
return rval
class RecurrentMultiLayerShortPathInpAll(RecurrentMultiLayer):
"""
Similar to RecurrentMultiLayerShortPathInp class, just that the input is
fed to all layers of the MLP depicting the deep transition between h_tm1
to h_t.
"""
def _init_params(self):
self.W_hhs = []
self.W_shortp = []
for dx in xrange(self.n_layers):
W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_hhs.append(theano.shared(value=W_hh, name="W%d_%s" %
(dx,self.name)))
if dx > 0:
W_shp = self.init_fn[dx](self.n_hids[self.n_layers-1],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_shortp.append(theano.shared(value=W_shp,
name='W_s%d_%s'%(dx,self.name)))
self.params = [x for x in self.W_hhs] +\
[x for x in self.W_shortp]
self.params_grad_scale = [self.grad_scale for x in self.params]
self.restricted_params = [x for x in self.params]
if self.weight_noise:
self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]
self.nW_shortp = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_shortp]
self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nW_shortp]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape) for x in self.noise_params]
def step_fprop(self,
state_below,
mask=None,
dpmask=None,
state_before=None,
no_noise_bias=False,
use_noise=True):
"""
See parent class
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hhs = [(x+y) for x, y in zip(self.W_hhs,self.nW_hhs)]
W_shp = [(x+y) for x, y in zip(self.W_shortp,self.nW_shortp)]
else:
W_hhs = self.W_hhs
W_shp = self.W_shortp
def slice_state_below(dx, sb = state_below):
st = 0
for p in xrange(dx):
st += self.n_hids[p]
ed = st + self.n_hids[dx]
if sb.ndim == 1:
return sb[st:ed]
else:
return sb[:,st:ed]
h = self.activation[0](TT.dot(state_before, W_hhs[0]) + slice_state_below(0))
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval += [h]
for dx in xrange(1, self.n_layers):
h = self.activation[dx](TT.dot(h, W_hhs[dx]) +
TT.dot(state_before, W_shp[dx-1]) +
slice_state_below(dx))
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
rval[-1] = h
return rval
class RecurrentLayer(Layer):
"""
Standard recurrent layer with gates.
See arXiv verion of our paper.
"""
def __init__(self, rng,
n_hids=500,
scale=.01,
sparsity = -1,
activation = TT.tanh,
activ_noise=0.,
weight_noise=False,
bias_fn='init_bias',
bias_scale = 0.,
dropout = 1.,
init_fn='sample_weights',
kind_reg = None,
grad_scale = 1.,
profile = 0,
gating = False,
reseting = False,
gater_activation = TT.nnet.sigmoid,
reseter_activation = TT.nnet.sigmoid,
name=None):
"""
:type rng: numpy random generator
:param rng: numpy random generator
:type n_in: int
:param n_in: number of inputs units
:type n_hids: int
:param n_hids: Number of hidden units on each layer of the MLP
:type activation: string/function or list of
:param activation: Activation function for the embedding layers. If
a list it needs to have a value for each layer. If not, the same
activation will be applied to all layers
:type scale: float or list of
:param scale: depending on the initialization function, it can be
the standard deviation of the Gaussian from which the weights
are sampled or the largest singular value. If a single value it
will be used for each layer, otherwise it has to have one value
for each layer
:type sparsity: int or list of
:param sparsity: if a single value, it will be used for each layer,
otherwise it has to be a list with as many values as layers. If
negative, it means the weight matrix is dense. Otherwise it
means this many randomly selected input units are connected to
an output unit
:type weight_noise: bool
:param weight_noise: If true, the model is used with weight noise
(and the right shared variable are constructed, to keep track of the
noise)
:type dropout: float
:param dropout: the probability with which hidden units are dropped
from the hidden layer. If set to 1, dropout is not used
:type init_fn: string or function
:param init_fn: function used to initialize the weights of the
layer. We recommend using either `sample_weights_classic` or
`sample_weights` defined in the utils
:type bias_fn: string or function
:param bias_fn: function used to initialize the biases. We recommend
using `init_bias` defined in the utils
:type bias_scale: float
:param bias_scale: argument passed to `bias_fn`, depicting the scale
of the initial bias
:type grad_scale: float or theano scalar
:param grad_scale: factor with which the gradients with respect to
the parameters of this layer are scaled. It is used for
differentiating between the different parameters of a model.
:type gating: bool
:param gating: If true, an update gate is used
:type reseting: bool
:param reseting: If true, a reset gate is used
:type gater_activation: string or function
:param name: The activation function of the update gate
:type reseter_activation: string or function
:param name: The activation function of the reset gate
:type name: string
:param name: name of the layer (used to name parameters). NB: in
this library names are very important because certain parts of the
code relies on name to disambiguate between variables, therefore
each layer should have a unique name.
"""
self.grad_scale = grad_scale
if type(init_fn) is str or type(init_fn) is unicode:
init_fn = eval(init_fn)
if type(bias_fn) is str or type(bias_fn) is unicode:
bias_fn = eval(bias_fn)
if type(activation) is str or type(activation) is unicode:
activation = eval(activation)
if type(gater_activation) is str or type(gater_activation) is unicode:
gater_activation = eval(gater_activation)
if type(reseter_activation) is str or type(reseter_activation) is unicode:
reseter_activation = eval(reseter_activation)
self.scale = scale
self.sparsity = sparsity
self.activation = activation
self.n_hids = n_hids
self.bias_scale = bias_scale
self.bias_fn = bias_fn
self.init_fn = init_fn
self.weight_noise = weight_noise
self.activ_noise = activ_noise
self.profile = profile
self.dropout = dropout
self.gating = gating
self.reseting = reseting
self.gater_activation = gater_activation
self.reseter_activation = reseter_activation
assert rng is not None, "random number generator should not be empty!"
super(RecurrentLayer, self).__init__(self.n_hids,
self.n_hids, rng, name)
self.trng = RandomStreams(self.rng.randint(int(1e6)))
self.params = []
self._init_params()
def _init_params(self):
self.W_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="W_%s"%self.name)
self.params = [self.W_hh]
if self.gating:
self.G_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="G_%s"%self.name)
self.params.append(self.G_hh)
if self.reseting:
self.R_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="R_%s"%self.name)
self.params.append(self.R_hh)
self.params_grad_scale = [self.grad_scale for x in self.params]
self.restricted_params = [x for x in self.params]
if self.weight_noise:
self.nW_hh = theano.shared(self.W_hh.get_value()*0, name='noise_'+self.W_hh.name)
self.nG_hh = theano.shared(self.G_hh.get_value()*0, name='noise_'+self.G_hh.name)
self.noise_params = [self.nW_hh,self.nG_hh]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape)
for x in self.noise_params]
def step_fprop(self,
state_below,
mask = None,
state_before = None,
gater_below = None,
reseter_below = None,
use_noise=True,
no_noise_bias = False):
"""
Constructs the computational graph of this layer.
:type state_below: theano variable
:param state_below: the input to the layer
:type mask: None or theano variable
:param mask: mask describing the length of each sequence in a
minibatch
:type state_before: theano variable
:param state_before: the previous value of the hidden state of the
layer
:type gater_below: theano variable
:param gater_below: the input to the update gate
:type reseter_below: theano variable
:param reseter_below: the input to the reset gate
:type use_noise: bool
:param use_noise: flag saying if weight noise should be used in
computing the output of this layer
:type no_noise_bias: bool
:param no_noise_bias: flag saying if weight noise should be added to
the bias as well
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hh = self.W_hh + self.nW_hh
if self.gating:
G_hh = self.G_hh + self.nG_hh
if self.reseting:
R_hh = self.R_hh + self.nR_hh
else:
W_hh = self.W_hh
if self.gating:
G_hh = self.G_hh
if self.reseting:
R_hh = self.R_hh
# Reset gate:
# optionally reset the hidden state.
if self.reseting and reseter_below:
reseter = self.reseter_activation(TT.dot(state_before, R_hh) +
reseter_below)
reseted_state_before = reseter * state_before
else:
reseted_state_before = state_before
# Feed the input to obtain potential new state.
preactiv = TT.dot(reseted_state_before, W_hh) + state_below
h = self.activation(preactiv)
# Update gate:
# optionally reject the potential new state and use the new one.
if self.gating and gater_below:
gater = self.gater_activation(TT.dot(state_before, G_hh) +
gater_below)
h = gater * h + (1-gater) * state_before
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
return h
def fprop(self,
state_below,
mask=None,
init_state=None,
gater_below=None,
reseter_below=None,
nsteps=None,
batch_size=None,
use_noise=True,
truncate_gradient=-1,
no_noise_bias = False
):
if theano.config.floatX=='float32':
floatX = numpy.float32
else:
floatX = numpy.float64
if nsteps is None:
nsteps = state_below.shape[0]
if batch_size and batch_size != 1:
nsteps = nsteps / batch_size
if batch_size is None and state_below.ndim == 3:
batch_size = state_below.shape[1]
if state_below.ndim == 2 and \
(not isinstance(batch_size,int) or batch_size > 1):
state_below = state_below.reshape((nsteps, batch_size, self.n_in))
if gater_below:
gater_below = gater_below.reshape((nsteps, batch_size, self.n_in))
if reseter_below:
reseter_below = reseter_below.reshape((nsteps, batch_size, self.n_in))
if not init_state:
if not isinstance(batch_size, int) or batch_size != 1:
init_state = TT.alloc(floatX(0), batch_size, self.n_hids)
else:
init_state = TT.alloc(floatX(0), self.n_hids)
# FIXME: Find a way to clean this up
if self.reseting and reseter_below:
if self.gating and gater_below:
if mask:
inps = [state_below, mask, gater_below, reseter_below]
fn = lambda x,y,g,r,z : self.step_fprop(x,y,z, gater_below=g, reseter_below=r, use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
inps = [state_below, gater_below, reseter_below]
fn = lambda tx, tg,tr, ty: self.step_fprop(tx, None, ty, gater_below=tg,
reseter_below=tr,
use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
if mask:
inps = [state_below, mask, reseter_below]
fn = lambda x,y,r,z : self.step_fprop(x,y,z, use_noise=use_noise,
reseter_below=r,
no_noise_bias=no_noise_bias)
else:
inps = [state_below, reseter_below]
fn = lambda tx,tr,ty: self.step_fprop(tx, None, ty,
reseter_below=tr,
use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
if self.gating and gater_below:
if mask:
inps = [state_below, mask, gater_below]
fn = lambda x,y,g,z : self.step_fprop(x,y,z, gater_below=g, use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
inps = [state_below, gater_below]
fn = lambda tx, tg, ty: self.step_fprop(tx, None, ty, gater_below=tg,
use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
if mask:
inps = [state_below, mask]
fn = lambda x,y,z : self.step_fprop(x,y,z, use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
inps = [state_below]
fn = lambda tx, ty: self.step_fprop(tx, None, ty,
use_noise=use_noise,
no_noise_bias=no_noise_bias)
rval, updates = theano.scan(fn,
sequences = inps,
outputs_info = [init_state],
name='layer_%s'%self.name,
profile=self.profile,
truncate_gradient = truncate_gradient,
n_steps = nsteps)
new_h = rval
self.out = rval
self.rval = rval
self.updates =updates
return self.out
class LSTMLayer(Layer):
"""
Standard LSTM Layer
"""
def __init__(self, rng,
n_hids=500,
scale=.01,
sparsity = -1,
activation = TT.tanh,
activ_noise=0.,
weight_noise=False,
bias_fn='init_bias',
bias_scale = 0.,
dropout = 1.,
init_fn='sample_weights',
kind_reg = None,
grad_scale = 1.,
profile = 0,
name=None,
**kwargs):
"""
:type rng: numpy random generator
:param rng: numpy random generator
:type n_in: int
:param n_in: number of inputs units
:type n_hids: int
:param n_hids: Number of hidden units on each layer of the MLP
:type activation: string/function or list of
:param activation: Activation function for the embedding layers. If
a list it needs to have a value for each layer. If not, the same
activation will be applied to all layers
:type scale: float or list of
:param scale: depending on the initialization function, it can be
the standard deviation of the Gaussian from which the weights
are sampled or the largest singular value. If a single value it
will be used for each layer, otherwise it has to have one value
for each layer
:type sparsity: int or list of
:param sparsity: if a single value, it will be used for each layer,
otherwise it has to be a list with as many values as layers. If
negative, it means the weight matrix is dense. Otherwise it
means this many randomly selected input units are connected to
an output unit
:type weight_noise: bool
:param weight_noise: If true, the model is used with weight noise
(and the right shared variable are constructed, to keep track of the
noise)
:type dropout: float
:param dropout: the probability with which hidden units are dropped
from the hidden layer. If set to 1, dropout is not used
:type init_fn: string or function
:param init_fn: function used to initialize the weights of the
layer. We recommend using either `sample_weights_classic` or
`sample_weights` defined in the utils
:type bias_fn: string or function
:param bias_fn: function used to initialize the biases. We recommend
using `init_bias` defined in the utils
:type bias_scale: float
:param bias_scale: argument passed to `bias_fn`, depicting the scale
of the initial bias
:type grad_scale: float or theano scalar
:param grad_scale: factor with which the gradients with respect to
the parameters of this layer are scaled. It is used for
differentiating between the different parameters of a model.
:type name: string
:param name: name of the layer (used to name parameters). NB: in
this library names are very important because certain parts of the
code relies on name to disambiguate between variables, therefore
each layer should have a unique name.
"""
self.grad_scale = grad_scale
if type(init_fn) is str or type(init_fn) is unicode:
init_fn = eval(init_fn)
if type(bias_fn) is str or type(bias_fn) is unicode:
bias_fn = eval(bias_fn)
if type(activation) is str or type(activation) is unicode:
activation = eval(activation)
self.scale = scale
self.sparsity = sparsity
self.activation = activation
self.n_hids = n_hids
self.bias_scale = bias_scale
self.bias_fn = bias_fn
self.init_fn = init_fn
self.weight_noise = weight_noise
self.activ_noise = activ_noise
self.profile = profile
self.dropout = dropout
assert rng is not None, "random number generator should not be empty!"
super(LSTMLayer, self).__init__(self.n_hids,
self.n_hids, rng, name)
self.trng = RandomStreams(self.rng.randint(int(1e6)))
self.params = []
self._init_params()
def _init_params(self):
self.W_hi = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="Whi_%s"%self.name)
self.params = [self.W_hi]
self.W_ci = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="Wci_%s"%self.name)
self.params += [self.W_ci]
self.W_hf = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="Whf_%s"%self.name)
self.params += [self.W_hf]
self.W_cf = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="Wcf_%s"%self.name)
self.params += [self.W_cf]
self.W_hc = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="Wcf_%s"%self.name)
self.params += [self.W_hc]
self.W_ho = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="Wcf_%s"%self.name)
self.params += [self.W_ho]
self.W_co = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="Wcf_%s"%self.name)
self.params += [self.W_co]
self.params_grad_scale = [self.grad_scale for x in self.params]
self.restricted_params = [x for x in self.params]
if self.weight_noise:
self.noise_params = [theano.shared(p.get_value()*0, name='noise_'+p.name) for p in self.params]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape)
for x in self.noise_params]
def _get_slice_below(self, state_below, to='cell'):
if to == 'cell':
offset = 0
elif to == 'input':
offset = 1 * self.n_hids
elif to == 'output':
offset = 2 * self.n_hids
elif to == 'forget':
offset = 3 * self.n_hids
else:
raise Warning('Unknown gate/cell types')
if state_below.ndim == 3:
return state_below[:,:,offset:offset+self.n_hids]
if state_below.ndim == 2:
return state_below[:,offset:offset+self.n_hids]
return state_below[offset:offset+self.n_hids]
def _get_slice_before(self, state_before, fr='cell'):
if fr == 'cell':
offset = self.n_hids
elif fr == 'hidden':
offset = 0
else:
raise Warning('Unknown cell/gate types')
if state_before.ndim == 2:
return state_before[:,offset:offset+self.n_hids]
return state_before[offset:offset+self.n_hids]
def step_fprop(self,
state_below,
mask = None,
state_before = None,
use_noise=True,
no_noise_bias = False,
**kwargs):
"""
Constructs the computational graph of this layer.
:type state_below: theano variable
:param state_below: the input to the layer
:type mask: None or theano variable
:param mask: mask describing the length of each sequence in a
minibatch
:type state_before: theano variable
:param state_before: the previous value of the hidden state of the
layer
:type use_noise: bool
:param use_noise: flag saying if weight noise should be used in
computing the output of this layer
:type no_noise_bias: bool
:param no_noise_bias: flag saying if weight noise should be added to
the bias as well
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hi = self.W_hi + self.nW_hi
W_ci = self.W_ci + self.nW_ci
W_hf = self.W_hf + self.nW_hf
W_cf = self.W_cf + self.nW_cf
W_hc = self.W_hc + self.nW_hc
W_ho = self.W_ho + self.nW_ho
W_co = self.W_co + self.nW_co
else:
W_hi = self.W_hi
W_ci = self.W_ci
W_hf = self.W_hf
W_cf = self.W_cf
W_hc = self.W_hc
W_ho = self.W_ho
W_co = self.W_co
# input gate
ig = TT.nnet.sigmoid(self._get_slice_below(state_below,'input') +
TT.dot(self._get_slice_before(state_before,'hidden'), W_hi) +
TT.dot(self._get_slice_before(state_before,'cell'), W_ci))
# forget gate
fg = TT.nnet.sigmoid(self._get_slice_below(state_below,'forget') +
TT.dot(self._get_slice_before(state_before,'hidden'), W_hf) +
TT.dot(self._get_slice_before(state_before,'cell'), W_cf))
# cell
cc = fg * self._get_slice_before(state_before,'cell') + \
ig * self.activation(self._get_slice_below(state_below,'cell') +
TT.dot(self._get_slice_before(state_before,'hidden'), W_hc))
# output gate
og = TT.nnet.sigmoid(self._get_slice_below(state_below,'output') +
TT.dot(self._get_slice_before(state_before,'hidden'), W_ho) +
TT.dot(cc, W_co))
# hidden state
hh = og * self.activation(cc)
if hh.ndim == 2:
h = TT.concatenate([hh, cc], axis=1)
else:
h = TT.concatenate([hh, cc], axis=0)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
return h
def fprop(self,
state_below,
mask=None,
init_state=None,
nsteps=None,
batch_size=None,
use_noise=True,
truncate_gradient=-1,
no_noise_bias = False,
**kwargs
):
if theano.config.floatX=='float32':
floatX = numpy.float32
else:
floatX = numpy.float64
if nsteps is None:
nsteps = state_below.shape[0]
if batch_size and batch_size != 1:
nsteps = nsteps / batch_size
if batch_size is None and state_below.ndim == 3:
batch_size = state_below.shape[1]
if state_below.ndim == 2 and \
(not isinstance(batch_size,int) or batch_size > 1):
state_below = state_below.reshape((nsteps, batch_size, state_below.shape[-1]))
if not init_state:
if not isinstance(batch_size, int) or batch_size != 1:
init_state = TT.alloc(floatX(0), batch_size, self.n_hids * 2)
else:
init_state = TT.alloc(floatX(0), self.n_hids * 2)
if mask:
inps = [state_below, mask]
fn = lambda x,y,z : self.step_fprop(x,y,z, use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
inps = [state_below]
fn = lambda tx, ty: self.step_fprop(tx, None, ty,
use_noise=use_noise,
no_noise_bias=no_noise_bias)
rval, updates = theano.scan(fn,
sequences = inps,
outputs_info = [init_state],
name='layer_%s'%self.name,
profile=self.profile,
truncate_gradient = truncate_gradient,
n_steps = nsteps)
new_h = rval
self.out = rval
self.rval = rval
self.updates = updates
return self.out
class DoubleRecurrentLayer(Layer):
def __init__(self, rng,
n_hids=500,
scale=.01,
sparsity = -1,
activation = TT.tanh,
activ_noise=0.,
weight_noise=False,
bias_fn='init_bias',
bias_scale = 0.,
dropout = 1.,
init_fn='sample_weights',
kind_reg = None,
grad_scale = 1.,
profile = 0,
gater_activation = TT.nnet.sigmoid,
reseter_activation = TT.nnet.sigmoid,
gating=True,
reseting=True,
name=None):
"""
:type rng: numpy random generator
:param rng: numpy random generator
:type n_in: int
:param n_in: number of inputs units
:type n_hids: int
:param n_hids: Number of hidden units on each layer of the MLP
:type activation: string/function or list of
:param activation: Activation function for the embedding layers. If
a list it needs to have a value for each layer. If not, the same
activation will be applied to all layers
:type scale: float or list of
:param scale: depending on the initialization function, it can be
the standard deviation of the Gaussian from which the weights
are sampled or the largest singular value. If a single value it
will be used for each layer, otherwise it has to have one value
for each layer
:type sparsity: int or list of
:param sparsity: if a single value, it will be used for each layer,
otherwise it has to be a list with as many values as layers. If
negative, it means the weight matrix is dense. Otherwise it
means this many randomly selected input units are connected to
an output unit
:type weight_noise: bool
:param weight_noise: If true, the model is used with weight noise
(and the right shared variable are constructed, to keep track of the
noise)
:type dropout: float
:param dropout: the probability with which hidden units are dropped
from the hidden layer. If set to 1, dropout is not used
:type init_fn: string or function
:param init_fn: function used to initialize the weights of the
layer. We recommend using either `sample_weights_classic` or
`sample_weights` defined in the utils
:type bias_fn: string or function
:param bias_fn: function used to initialize the biases. We recommend
using `init_bias` defined in the utils
:type bias_scale: float
:param bias_scale: argument passed to `bias_fn`, depicting the scale
of the initial bias
:type grad_scale: float or theano scalar
:param grad_scale: factor with which the gradients with respect to
the parameters of this layer are scaled. It is used for
differentiating between the different parameters of a model.
:type gater_activation: string or function
:param name: The activation function of the update gate
:type reseter_activation: string or function
:param name: The activation function of the reset gate
:type name: string
:param name: name of the layer (used to name parameters). NB: in
this library names are very important because certain parts of the
code relies on name to disambiguate between variables, therefore
each layer should have a unique name.
"""
self.grad_scale = grad_scale
if type(init_fn) is str or type(init_fn) is unicode:
init_fn = eval(init_fn)
if type(bias_fn) is str or type(bias_fn) is unicode:
bias_fn = eval(bias_fn)
if type(activation) is str or type(activation) is unicode:
activation = eval(activation)
if type(gater_activation) is str or type(gater_activation) is unicode:
gater_activation = eval(gater_activation)
if type(reseter_activation) is str or type(reseter_activation) is unicode:
reseter_activation = eval(reseter_activation)
self.scale = scale
self.sparsity = sparsity
self.activation = activation
self.n_hids = n_hids
self.bias_scale = bias_scale
self.bias_fn = bias_fn
self.init_fn = init_fn
self.weight_noise = weight_noise
self.activ_noise = activ_noise
self.profile = profile
self.dropout = dropout
self.gater_activation = gater_activation
self.reseter_activation = reseter_activation
assert rng is not None, "random number generator should not be empty!"
super(DoubleRecurrentLayer, self).__init__(self.n_hids,
self.n_hids, rng, name)
self.trng = RandomStreams(self.rng.randint(int(1e6)))
self.params = []
self._init_params()
def _init_params(self):
self.W_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="W_%s"%self.name)
self.params = [self.W_hh]
self.G_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="G_%s"%self.name)
self.params.append(self.G_hh)
self.R_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="R_%s"%self.name)
self.params.append(self.R_hh)
self.Wrev_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="Wrev_%s"%self.name)
self.params += [self.Wrev_hh]
self.Grev_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="Grev_%s"%self.name)
self.params.append(self.Grev_hh)
self.Rrev_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="Rrev_%s"%self.name)
self.params.append(self.Rrev_hh)
self.Wgg_fwd = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="Wgg_fwd_%s"%self.name)
self.params += [self.Wgg_fwd]
self.Wgg_rev = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="Wgg_rev_%s"%self.name)
self.params += [self.Wgg_rev]
self.W2_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="W2_%s"%self.name)
self.params.append(self.W2_hh)
self.U2_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="U2_%s"%self.name)
self.params.append(self.U2_hh)
self.V2_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="V2_%s"%self.name)
self.params.append(self.V2_hh)
self.G2_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="G2_%s"%self.name)
self.params.append(self.G2_hh)
self.R2_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="R2_%s"%self.name)
self.params.append(self.R2_hh)
self.S_h = theano.shared(
eval('numpy.'+theano.config.floatX)(0.01 * numpy.random.rand(self.n_hids)),
#sample_weights_classic(self.n_hids, 1, -1, 0.01, rng=self.rng),
name='S_%s'%self.name)
self.params.append(self.S_h)
self.W_att = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="W_att_%s"%self.name)
self.params.append(self.W_att)
self.U_att = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="U_att_%s"%self.name)
self.params.append(self.U_att)
self.V_att = theano.shared(
eval('numpy.'+theano.config.floatX)(0.01 * numpy.random.rand(self.n_hids)),
name='V_att_%s'%self.name)
self.params.append(self.V_att)
self.params_grad_scale = [self.grad_scale for x in self.params]
self.restricted_params = [x for x in self.params]
if self.weight_noise:
self.nW_hh = theano.shared(self.W_hh.get_value()*0, name='noise_'+self.W_hh.name)
self.nG_hh = theano.shared(self.G_hh.get_value()*0, name='noise_'+self.G_hh.name)
self.nR_hh = theano.shared(self.G_hh.get_value()*0, name='noise_'+self.G_hh.name)
self.noise_params = [self.nW_hh,self.nG_hh,self.nR_hh]
self.nW2_hh = theano.shared(self.W2_hh.get_value()*0, name='noise_'+self.W2_hh.name)
self.nG2_hh = theano.shared(self.G2_hh.get_value()*0, name='noise_'+self.G2_hh.name)
self.nR2_hh = theano.shared(self.G2_hh.get_value()*0, name='noise_'+self.G2_hh.name)
self.noise_params += [self.nW2_hh,self.nG2_hh,self.nR2_hh]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape)
for x in self.noise_params]
def fprop(self,
state_below,
mask=None,
init_state=None,
gater_below=None,
reseter_below=None,
nsteps=None,
batch_size=None,
use_noise=True,
truncate_gradient=-1,
no_noise_bias = False
):
if theano.config.floatX=='float32':
floatX = numpy.float32
else:
floatX = numpy.float64
if nsteps is None:
nsteps = state_below.shape[0]
if batch_size and batch_size != 1:
nsteps = nsteps / batch_size
if batch_size is None and state_below.ndim == 3:
batch_size = state_below.shape[1]
if state_below.ndim == 2 and \
(not isinstance(batch_size,int) or batch_size > 1):
state_below = state_below.reshape((nsteps, batch_size, self.n_in))
if gater_below:
gater_below = gater_below.reshape((nsteps, batch_size, self.n_in))
if reseter_below:
reseter_below = reseter_below.reshape((nsteps, batch_size, self.n_in))
if not init_state:
if not isinstance(batch_size, int) or batch_size != 1:
init_state = TT.alloc(floatX(0), batch_size, self.n_hids)
else:
init_state = TT.alloc(floatX(0), self.n_hids)
if self.weight_noise and use_noise and self.noise_params:
W_hh = self.W_hh + self.nW_hh
G_hh = self.G_hh + self.nG_hh
R_hh = self.R_hh + self.nR_hh
W2_hh = self.W2_hh + self.nW2_hh
G2_hh = self.G2_hh + self.nG2_hh
R2_hh = self.R2_hh + self.nR2_hh
else:
W_hh = self.W_hh
G_hh = self.G_hh
R_hh = self.R_hh
W2_hh = self.W2_hh
G2_hh = self.G2_hh
R2_hh = self.R2_hh
Wrev_hh = self.Wrev_hh
Grev_hh = self.Grev_hh
Rrev_hh = self.Rrev_hh
Wgg_fwd = self.Wgg_fwd
Wgg_rev = self.Wgg_rev
U2_hh = self.U2_hh
V2_hh = self.V2_hh
W_att = self.W_att
U_att = self.U_att
V_att = self.V_att
S_h = self.S_h
W_att = self.W_att
U_att = self.U_att
V_att = self.V_att
S_h = self.S_h
def _scan1(state_below, mask, state_before, gater_below, reseter_below,
use_noise=True, no_noise_bias = False):
reseter = self.reseter_activation(TT.dot(state_before, R_hh) + reseter_below)
reseted_state_before = reseter * state_before
# Feed the input to obtain potential new state.
preactiv = TT.dot(reseted_state_before, W_hh) + state_below
h = self.activation(preactiv)
gater = self.gater_activation(TT.dot(state_before, G_hh) + gater_below)
h = gater * h + (1-gater) * state_before
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
return h
def _scan1rev(state_below, mask, state_before, gater_below, reseter_below,
use_noise=True, no_noise_bias = False):
reseter = self.reseter_activation(TT.dot(state_before, Rrev_hh) + reseter_below)
reseted_state_before = reseter * state_before
# Feed the input to obtain potential new state.
preactiv = TT.dot(reseted_state_before, Wrev_hh) + state_below
h = self.activation(preactiv)
gater = self.gater_activation(TT.dot(state_before, Grev_hh) + gater_below)
h = gater * h + (1-gater) * state_before
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
return h
if mask:
inps = [state_below, mask, gater_below, reseter_below]
fn = lambda x,y,g,r,z : _scan1(x,y,z, gater_below=g, reseter_below=r,
use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
inps = [state_below, gater_below, reseter_below]
fn = lambda tx, tg,tr, ty: _scan1(tx, None, ty, gater_below=tg,
reseter_below=tr,
use_noise=use_noise,
no_noise_bias=no_noise_bias)
rval, updates = theano.scan(fn,
sequences = inps,
outputs_info = [init_state],
name='layer_%s'%self.name,
profile=self.profile,
truncate_gradient = truncate_gradient,
n_steps = nsteps)
rval_fwd = rval
if mask:
mask_rev = mask[::-1]
else:
mask_rev = None
state_below_rev = state_below[::-1]
gater_below_rev = gater_below[::-1]
reseter_below_rev = reseter_below[::-1]
if mask_rev:
inps = [state_below_rev, mask_rev, gater_below_rev, reseter_below_rev]
fn = lambda x,y,g,r,z : _scan1rev(x,y,z, gater_below=g, reseter_below=r,
use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
inps = [state_below_rev, gater_below_rev, reseter_below_rev]
fn = lambda tx, tg,tr, ty: _scan1rev(tx, None, ty, gater_below=tg,
reseter_below=tr,
use_noise=use_noise,
no_noise_bias=no_noise_bias)
rval, updates_rev = theano.scan(fn,
sequences = inps,
outputs_info = [init_state],
name='layer_%s'%self.name,
profile=self.profile,
truncate_gradient = truncate_gradient,
n_steps = nsteps)
updates += updates_rev
rval_rev = rval[::-1]
state_below = TT.dot(rval_fwd, Wgg_fwd) + TT.dot(rval_rev, Wgg_rev)
state_below_att = TT.dot(state_below, W_att)
def _scan2(mask, state_before, beta_before,
use_noise=True, no_noise_bias = False):
# attention
state_before_att = TT.dot(state_before, U_att)
if state_below_att.ndim == 3:
att = TT.tanh(state_below_att + state_before_att[None,:,:])
else:
att = TT.tanh(state_below_att + state_before_att[None,:])
att = TT.exp(TT.dot(att, V_att))
att = att / att.sum(0, keepdims=True)
if state_below.ndim == 3:
real_below = (state_below * att[:,:,None]).sum(axis=0)
else:
real_below = (state_below * att[:,None]).sum(axis=0)
# reset gate
reseter = self.reseter_activation(TT.dot(state_before, R2_hh) +
TT.dot(real_below, U2_hh))
reseted_state_before = reseter * state_before
# Feed the input to obtain potential new state.
preactiv = TT.dot(reseted_state_before, U2_hh) + TT.dot(real_below, W2_hh)
h = self.activation(preactiv)
# update gate
gater = self.gater_activation(TT.dot(state_before, G2_hh) +
TT.dot(real_below, V2_hh))
h = gater * h + (floatX(1)-gater) * state_before
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (floatX(1)-mask) * state_before
# stopping probability
s = TT.nnet.sigmoid(TT.dot(h, S_h))
beta = beta_before * (floatX(1) - s)
return h, beta
if mask:
inps = [mask]
fn = lambda y,z,b : _scan2(y,z,b,use_noise=use_noise, no_noise_bias=no_noise_bias)
else:
inps = []
fn = lambda y,b: _scan2(None, y, b, use_noise=use_noise, no_noise_bias=no_noise_bias)
if not isinstance(batch_size, int) or batch_size != 1:
init_beta = TT.alloc(floatX(1), batch_size)
else:
init_beta = TT.alloc(floatX(1), 1)
rval, updates2 = theano.scan(fn,
sequences = inps,
outputs_info = [init_state, init_beta],
name='layer2_%s'%self.name,
profile=self.profile,
truncate_gradient=truncate_gradient,
n_steps = nsteps)
new_h = rval[0]
betas = rval[1]
updates += updates2
if new_h.ndim == 3:
self.out = new_h * betas[:,:,None]
else:
self.out = new_h * betas[:,None]
self.rval = rval
self.updates =updates
return self.out
|
{
"content_hash": "02d2d984125e5145f1a5d3f4fb34ad49",
"timestamp": "",
"source": "github",
"line_count": 2057,
"max_line_length": 119,
"avg_line_length": 39.77442877977637,
"alnum_prop": 0.49495208761122517,
"repo_name": "kyunghyuncho/GroundHog",
"id": "f4ccb752318133e709f9827e3c3176d39cc4abb1",
"size": "81816",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "groundhog/layers/rec_layers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "8023"
},
{
"name": "PHP",
"bytes": "372"
},
{
"name": "Perl",
"bytes": "28016"
},
{
"name": "Python",
"bytes": "484440"
},
{
"name": "Shell",
"bytes": "2189"
}
],
"symlink_target": ""
}
|
""" interactive debugging with PDB, the Python Debugger. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import pdb
import sys
from doctest import UnexpectedException
from _pytest import outcomes
from _pytest.config import hookimpl
from _pytest.config.exceptions import UsageError
def _validate_usepdb_cls(value):
"""Validate syntax of --pdbcls option."""
try:
modname, classname = value.split(":")
except ValueError:
raise argparse.ArgumentTypeError(
"{!r} is not in the format 'modname:classname'".format(value)
)
return (modname, classname)
def pytest_addoption(parser):
group = parser.getgroup("general")
group._addoption(
"--pdb",
dest="usepdb",
action="store_true",
help="start the interactive Python debugger on errors or KeyboardInterrupt.",
)
group._addoption(
"--pdbcls",
dest="usepdb_cls",
metavar="modulename:classname",
type=_validate_usepdb_cls,
help="start a custom interactive Python debugger on errors. "
"For example: --pdbcls=IPython.terminal.debugger:TerminalPdb",
)
group._addoption(
"--trace",
dest="trace",
action="store_true",
help="Immediately break when running each test.",
)
def pytest_configure(config):
if config.getvalue("trace"):
config.pluginmanager.register(PdbTrace(), "pdbtrace")
if config.getvalue("usepdb"):
config.pluginmanager.register(PdbInvoke(), "pdbinvoke")
pytestPDB._saved.append(
(pdb.set_trace, pytestPDB._pluginmanager, pytestPDB._config)
)
pdb.set_trace = pytestPDB.set_trace
pytestPDB._pluginmanager = config.pluginmanager
pytestPDB._config = config
# NOTE: not using pytest_unconfigure, since it might get called although
# pytest_configure was not (if another plugin raises UsageError).
def fin():
(
pdb.set_trace,
pytestPDB._pluginmanager,
pytestPDB._config,
) = pytestPDB._saved.pop()
config._cleanup.append(fin)
class pytestPDB(object):
""" Pseudo PDB that defers to the real pdb. """
_pluginmanager = None
_config = None
_saved = []
_recursive_debug = 0
_wrapped_pdb_cls = None
@classmethod
def _is_capturing(cls, capman):
if capman:
return capman.is_capturing()
return False
@classmethod
def _import_pdb_cls(cls, capman):
if not cls._config:
# Happens when using pytest.set_trace outside of a test.
return pdb.Pdb
usepdb_cls = cls._config.getvalue("usepdb_cls")
if cls._wrapped_pdb_cls and cls._wrapped_pdb_cls[0] == usepdb_cls:
return cls._wrapped_pdb_cls[1]
if usepdb_cls:
modname, classname = usepdb_cls
try:
__import__(modname)
mod = sys.modules[modname]
# Handle --pdbcls=pdb:pdb.Pdb (useful e.g. with pdbpp).
parts = classname.split(".")
pdb_cls = getattr(mod, parts[0])
for part in parts[1:]:
pdb_cls = getattr(pdb_cls, part)
except Exception as exc:
value = ":".join((modname, classname))
raise UsageError(
"--pdbcls: could not import {!r}: {}".format(value, exc)
)
else:
pdb_cls = pdb.Pdb
wrapped_cls = cls._get_pdb_wrapper_class(pdb_cls, capman)
cls._wrapped_pdb_cls = (usepdb_cls, wrapped_cls)
return wrapped_cls
@classmethod
def _get_pdb_wrapper_class(cls, pdb_cls, capman):
import _pytest.config
class PytestPdbWrapper(pdb_cls, object):
_pytest_capman = capman
_continued = False
def do_debug(self, arg):
cls._recursive_debug += 1
ret = super(PytestPdbWrapper, self).do_debug(arg)
cls._recursive_debug -= 1
return ret
def do_continue(self, arg):
ret = super(PytestPdbWrapper, self).do_continue(arg)
if cls._recursive_debug == 0:
tw = _pytest.config.create_terminal_writer(cls._config)
tw.line()
capman = self._pytest_capman
capturing = pytestPDB._is_capturing(capman)
if capturing:
if capturing == "global":
tw.sep(">", "PDB continue (IO-capturing resumed)")
else:
tw.sep(
">",
"PDB continue (IO-capturing resumed for %s)"
% capturing,
)
capman.resume()
else:
tw.sep(">", "PDB continue")
cls._pluginmanager.hook.pytest_leave_pdb(config=cls._config, pdb=self)
self._continued = True
return ret
do_c = do_cont = do_continue
def do_quit(self, arg):
"""Raise Exit outcome when quit command is used in pdb.
This is a bit of a hack - it would be better if BdbQuit
could be handled, but this would require to wrap the
whole pytest run, and adjust the report etc.
"""
ret = super(PytestPdbWrapper, self).do_quit(arg)
if cls._recursive_debug == 0:
outcomes.exit("Quitting debugger")
return ret
do_q = do_quit
do_exit = do_quit
def setup(self, f, tb):
"""Suspend on setup().
Needed after do_continue resumed, and entering another
breakpoint again.
"""
ret = super(PytestPdbWrapper, self).setup(f, tb)
if not ret and self._continued:
# pdb.setup() returns True if the command wants to exit
# from the interaction: do not suspend capturing then.
if self._pytest_capman:
self._pytest_capman.suspend_global_capture(in_=True)
return ret
def get_stack(self, f, t):
stack, i = super(PytestPdbWrapper, self).get_stack(f, t)
if f is None:
# Find last non-hidden frame.
i = max(0, len(stack) - 1)
while i and stack[i][0].f_locals.get("__tracebackhide__", False):
i -= 1
return stack, i
return PytestPdbWrapper
@classmethod
def _init_pdb(cls, method, *args, **kwargs):
""" Initialize PDB debugging, dropping any IO capturing. """
import _pytest.config
if cls._pluginmanager is not None:
capman = cls._pluginmanager.getplugin("capturemanager")
else:
capman = None
if capman:
capman.suspend(in_=True)
if cls._config:
tw = _pytest.config.create_terminal_writer(cls._config)
tw.line()
if cls._recursive_debug == 0:
# Handle header similar to pdb.set_trace in py37+.
header = kwargs.pop("header", None)
if header is not None:
tw.sep(">", header)
else:
capturing = cls._is_capturing(capman)
if capturing == "global":
tw.sep(">", "PDB %s (IO-capturing turned off)" % (method,))
elif capturing:
tw.sep(
">",
"PDB %s (IO-capturing turned off for %s)"
% (method, capturing),
)
else:
tw.sep(">", "PDB %s" % (method,))
_pdb = cls._import_pdb_cls(capman)(**kwargs)
if cls._pluginmanager:
cls._pluginmanager.hook.pytest_enter_pdb(config=cls._config, pdb=_pdb)
return _pdb
@classmethod
def set_trace(cls, *args, **kwargs):
"""Invoke debugging via ``Pdb.set_trace``, dropping any IO capturing."""
frame = sys._getframe().f_back
_pdb = cls._init_pdb("set_trace", *args, **kwargs)
_pdb.set_trace(frame)
class PdbInvoke(object):
def pytest_exception_interact(self, node, call, report):
capman = node.config.pluginmanager.getplugin("capturemanager")
if capman:
capman.suspend_global_capture(in_=True)
out, err = capman.read_global_capture()
sys.stdout.write(out)
sys.stdout.write(err)
_enter_pdb(node, call.excinfo, report)
def pytest_internalerror(self, excrepr, excinfo):
tb = _postmortem_traceback(excinfo)
post_mortem(tb)
class PdbTrace(object):
@hookimpl(hookwrapper=True)
def pytest_pyfunc_call(self, pyfuncitem):
_test_pytest_function(pyfuncitem)
yield
def _test_pytest_function(pyfuncitem):
_pdb = pytestPDB._init_pdb("runcall")
testfunction = pyfuncitem.obj
pyfuncitem.obj = _pdb.runcall
if "func" in pyfuncitem._fixtureinfo.argnames: # pragma: no branch
raise ValueError("--trace can't be used with a fixture named func!")
pyfuncitem.funcargs["func"] = testfunction
new_list = list(pyfuncitem._fixtureinfo.argnames)
new_list.append("func")
pyfuncitem._fixtureinfo.argnames = tuple(new_list)
def _enter_pdb(node, excinfo, rep):
# XXX we re-use the TerminalReporter's terminalwriter
# because this seems to avoid some encoding related troubles
# for not completely clear reasons.
tw = node.config.pluginmanager.getplugin("terminalreporter")._tw
tw.line()
showcapture = node.config.option.showcapture
for sectionname, content in (
("stdout", rep.capstdout),
("stderr", rep.capstderr),
("log", rep.caplog),
):
if showcapture in (sectionname, "all") and content:
tw.sep(">", "captured " + sectionname)
if content[-1:] == "\n":
content = content[:-1]
tw.line(content)
tw.sep(">", "traceback")
rep.toterminal(tw)
tw.sep(">", "entering PDB")
tb = _postmortem_traceback(excinfo)
rep._pdbshown = True
post_mortem(tb)
return rep
def _postmortem_traceback(excinfo):
if isinstance(excinfo.value, UnexpectedException):
# A doctest.UnexpectedException is not useful for post_mortem.
# Use the underlying exception instead:
return excinfo.value.exc_info[2]
else:
return excinfo._excinfo[2]
def post_mortem(t):
p = pytestPDB._init_pdb("post_mortem")
p.reset()
p.interaction(None, t)
if p.quitting:
outcomes.exit("Quitting debugger")
|
{
"content_hash": "1430cf13cd41672fcb05a717d57a2930",
"timestamp": "",
"source": "github",
"line_count": 333,
"max_line_length": 86,
"avg_line_length": 33.333333333333336,
"alnum_prop": 0.5477477477477477,
"repo_name": "cloudera/hue",
"id": "99d35a5ab77e180e9933af75d39d4f54f6ceedcd",
"size": "11124",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/pytest-4.6.11/src/_pytest/debugging.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "2347"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "Batchfile",
"bytes": "143575"
},
{
"name": "C",
"bytes": "5129166"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "718011"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "680715"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "Closure Templates",
"bytes": "1072"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "Cython",
"bytes": "1016963"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "13576"
},
{
"name": "EJS",
"bytes": "752"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GAP",
"bytes": "29873"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "28328425"
},
{
"name": "Haml",
"bytes": "920"
},
{
"name": "Handlebars",
"bytes": "173"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "Java",
"bytes": "457398"
},
{
"name": "JavaScript",
"bytes": "39181239"
},
{
"name": "Jinja",
"bytes": "356"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Less",
"bytes": "396102"
},
{
"name": "Lex",
"bytes": "218764"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1751"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "1025937"
},
{
"name": "Mako",
"bytes": "3644004"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "29403"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "84273"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "6235"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Pug",
"bytes": "584"
},
{
"name": "Python",
"bytes": "92881549"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "484108"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "SCSS",
"bytes": "78508"
},
{
"name": "Sass",
"bytes": "770"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "249165"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "SourcePawn",
"bytes": "948"
},
{
"name": "Stylus",
"bytes": "682"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "341963"
},
{
"name": "Twig",
"bytes": "761"
},
{
"name": "TypeScript",
"bytes": "1241396"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "Vue",
"bytes": "350385"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "522199"
},
{
"name": "Yacc",
"bytes": "1070437"
},
{
"name": "jq",
"bytes": "4"
}
],
"symlink_target": ""
}
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
INCLUDES = """
#include <openssl/engine.h>
"""
TYPES = """
static const long Cryptography_HAS_ENGINE_CRYPTODEV;
typedef ... ENGINE;
typedef ... RSA_METHOD;
typedef ... DSA_METHOD;
typedef ... DH_METHOD;
typedef struct {
int (*bytes)(unsigned char *, int);
int (*pseudorand)(unsigned char *, int);
int (*status)();
...;
} RAND_METHOD;
typedef int (*ENGINE_GEN_INT_FUNC_PTR)(ENGINE *);
typedef ... *ENGINE_CTRL_FUNC_PTR;
typedef ... *ENGINE_LOAD_KEY_PTR;
typedef ... *ENGINE_CIPHERS_PTR;
typedef ... *ENGINE_DIGESTS_PTR;
typedef ... ENGINE_CMD_DEFN;
typedef ... UI_METHOD;
static const unsigned int ENGINE_METHOD_RSA;
static const unsigned int ENGINE_METHOD_DSA;
static const unsigned int ENGINE_METHOD_RAND;
static const unsigned int ENGINE_METHOD_CIPHERS;
static const unsigned int ENGINE_METHOD_DIGESTS;
static const unsigned int ENGINE_METHOD_ALL;
static const unsigned int ENGINE_METHOD_NONE;
static const int ENGINE_R_CONFLICTING_ENGINE_ID;
"""
FUNCTIONS = """
ENGINE *ENGINE_get_first(void);
ENGINE *ENGINE_get_last(void);
ENGINE *ENGINE_get_next(ENGINE *);
ENGINE *ENGINE_get_prev(ENGINE *);
int ENGINE_add(ENGINE *);
int ENGINE_remove(ENGINE *);
ENGINE *ENGINE_by_id(const char *);
int ENGINE_init(ENGINE *);
int ENGINE_finish(ENGINE *);
void ENGINE_load_builtin_engines(void);
void ENGINE_cleanup(void);
ENGINE *ENGINE_get_default_RSA(void);
ENGINE *ENGINE_get_default_DSA(void);
ENGINE *ENGINE_get_default_DH(void);
ENGINE *ENGINE_get_default_RAND(void);
ENGINE *ENGINE_get_cipher_engine(int);
ENGINE *ENGINE_get_digest_engine(int);
int ENGINE_set_default_RSA(ENGINE *);
int ENGINE_set_default_DSA(ENGINE *);
int ENGINE_set_default_DH(ENGINE *);
int ENGINE_set_default_RAND(ENGINE *);
int ENGINE_set_default_ciphers(ENGINE *);
int ENGINE_set_default_digests(ENGINE *);
int ENGINE_set_default_string(ENGINE *, const char *);
int ENGINE_set_default(ENGINE *, unsigned int);
unsigned int ENGINE_get_table_flags(void);
void ENGINE_set_table_flags(unsigned int);
int ENGINE_register_RSA(ENGINE *);
void ENGINE_unregister_RSA(ENGINE *);
void ENGINE_register_all_RSA(void);
int ENGINE_register_DSA(ENGINE *);
void ENGINE_unregister_DSA(ENGINE *);
void ENGINE_register_all_DSA(void);
int ENGINE_register_DH(ENGINE *);
void ENGINE_unregister_DH(ENGINE *);
void ENGINE_register_all_DH(void);
int ENGINE_register_RAND(ENGINE *);
void ENGINE_unregister_RAND(ENGINE *);
void ENGINE_register_all_RAND(void);
int ENGINE_register_ciphers(ENGINE *);
void ENGINE_unregister_ciphers(ENGINE *);
void ENGINE_register_all_ciphers(void);
int ENGINE_register_digests(ENGINE *);
void ENGINE_unregister_digests(ENGINE *);
void ENGINE_register_all_digests(void);
int ENGINE_register_complete(ENGINE *);
int ENGINE_register_all_complete(void);
int ENGINE_ctrl(ENGINE *, int, long, void *, void (*)(void));
int ENGINE_cmd_is_executable(ENGINE *, int);
int ENGINE_ctrl_cmd(ENGINE *, const char *, long, void *, void (*)(void), int);
int ENGINE_ctrl_cmd_string(ENGINE *, const char *, const char *, int);
ENGINE *ENGINE_new(void);
int ENGINE_free(ENGINE *);
int ENGINE_up_ref(ENGINE *);
int ENGINE_set_id(ENGINE *, const char *);
int ENGINE_set_name(ENGINE *, const char *);
int ENGINE_set_RSA(ENGINE *, const RSA_METHOD *);
int ENGINE_set_DSA(ENGINE *, const DSA_METHOD *);
int ENGINE_set_DH(ENGINE *, const DH_METHOD *);
int ENGINE_set_RAND(ENGINE *, const RAND_METHOD *);
int ENGINE_set_destroy_function(ENGINE *, ENGINE_GEN_INT_FUNC_PTR);
int ENGINE_set_init_function(ENGINE *, ENGINE_GEN_INT_FUNC_PTR);
int ENGINE_set_finish_function(ENGINE *, ENGINE_GEN_INT_FUNC_PTR);
int ENGINE_set_ctrl_function(ENGINE *, ENGINE_CTRL_FUNC_PTR);
int ENGINE_set_load_privkey_function(ENGINE *, ENGINE_LOAD_KEY_PTR);
int ENGINE_set_load_pubkey_function(ENGINE *, ENGINE_LOAD_KEY_PTR);
int ENGINE_set_ciphers(ENGINE *, ENGINE_CIPHERS_PTR);
int ENGINE_set_digests(ENGINE *, ENGINE_DIGESTS_PTR);
int ENGINE_set_flags(ENGINE *, int);
int ENGINE_set_cmd_defns(ENGINE *, const ENGINE_CMD_DEFN *);
const char *ENGINE_get_id(const ENGINE *);
const char *ENGINE_get_name(const ENGINE *);
const RSA_METHOD *ENGINE_get_RSA(const ENGINE *);
const DSA_METHOD *ENGINE_get_DSA(const ENGINE *);
const DH_METHOD *ENGINE_get_DH(const ENGINE *);
const RAND_METHOD *ENGINE_get_RAND(const ENGINE *);
const EVP_CIPHER *ENGINE_get_cipher(ENGINE *, int);
const EVP_MD *ENGINE_get_digest(ENGINE *, int);
int ENGINE_get_flags(const ENGINE *);
const ENGINE_CMD_DEFN *ENGINE_get_cmd_defns(const ENGINE *);
EVP_PKEY *ENGINE_load_private_key(ENGINE *, const char *, UI_METHOD *, void *);
EVP_PKEY *ENGINE_load_public_key(ENGINE *, const char *, UI_METHOD *, void *);
void ENGINE_add_conf_module(void);
"""
MACROS = """
/* these became macros in 1.1.0 */
void ENGINE_load_openssl(void);
void ENGINE_load_dynamic(void);
void ENGINE_load_cryptodev(void);
"""
CUSTOMIZATIONS = """
#if defined(LIBRESSL_VERSION_NUMBER)
static const long Cryptography_HAS_ENGINE_CRYPTODEV = 0;
void (*ENGINE_load_cryptodev)(void) = NULL;
#else
static const long Cryptography_HAS_ENGINE_CRYPTODEV = 1;
#endif
"""
|
{
"content_hash": "a137b86f44cd40b311d4e2f3767434e5",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 79,
"avg_line_length": 36.294520547945204,
"alnum_prop": 0.7329684846197396,
"repo_name": "jayceyxc/hue",
"id": "afdd54e42913888f7472995ea85bdf1da5294c02",
"size": "5299",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/cryptography-1.3.1/src/_cffi_src/openssl/engine.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3096"
},
{
"name": "Batchfile",
"bytes": "41710"
},
{
"name": "C",
"bytes": "2716690"
},
{
"name": "C++",
"bytes": "200268"
},
{
"name": "CSS",
"bytes": "630891"
},
{
"name": "Emacs Lisp",
"bytes": "11704"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Go",
"bytes": "6671"
},
{
"name": "HTML",
"bytes": "23982883"
},
{
"name": "Java",
"bytes": "575404"
},
{
"name": "JavaScript",
"bytes": "5068327"
},
{
"name": "Lex",
"bytes": "36239"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "Makefile",
"bytes": "146292"
},
{
"name": "Mako",
"bytes": "3334641"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "PLSQL",
"bytes": "31565"
},
{
"name": "PLpgSQL",
"bytes": "3646"
},
{
"name": "Perl",
"bytes": "3499"
},
{
"name": "PigLatin",
"bytes": "328"
},
{
"name": "Python",
"bytes": "45608023"
},
{
"name": "Roff",
"bytes": "16669"
},
{
"name": "Shell",
"bytes": "46700"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "Thrift",
"bytes": "278712"
},
{
"name": "Visual Basic",
"bytes": "2884"
},
{
"name": "XSLT",
"bytes": "517693"
},
{
"name": "Yacc",
"bytes": "381310"
}
],
"symlink_target": ""
}
|
"""
OVERALL CREDIT TO:
t0mm0, Eldorado, VOINAGE, BSTRDMKR, tknorris, smokdpi, TheHighway
urlresolver XBMC Addon
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from lib import jsunpack
from urlresolver9 import common
from urlresolver9.resolver import UrlResolver, ResolverError
class WatchersResolver(UrlResolver):
name = "watchers"
domains = ['watchers.to']
pattern = '(?://|\.)(watchers\.to)/(?:embed-)?([a-zA-Z0-9]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
response = self.net.http_GET(web_url)
html = response.content
if html:
packed = re.search('(eval\(function.*?)\s*</script>', html, re.DOTALL)
if packed:
js = jsunpack.unpack(packed.group(1))
else:
js = html
video_url = None
link = re.search('([^"]*.m3u8)', js)
if link:
video_url = link.group(1)
common.log_utils.log_debug('watchers.to Link Found: %s' % video_url)
if video_url == None:
link = re.search('([^"]*.mp4)', js)
if link:
video_url = link.group(1)
common.log_utils.log_debug('watchers.to Link Found: %s' % video_url)
if video_url != None:
return video_url + '|Referer=http://watchers.to/player7/jwplayer.flash.swf'
else:
raise ResolverError('No playable video found.')
def get_url(self, host, media_id):
return 'http://watchers.to/embed-%s.html' % media_id
#return self._default_get_url(host, media_id)
|
{
"content_hash": "0d658128b56dd581694d725b3fa7c0cc",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 91,
"avg_line_length": 35.656716417910445,
"alnum_prop": 0.6019254918375889,
"repo_name": "mrknow/filmkodi",
"id": "07c19699bd88df9696e2e14a1bdd63d91e73b00d",
"size": "2389",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "script.mrknow.urlresolver/lib/urlresolver9/plugins/watchers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7510"
},
{
"name": "Python",
"bytes": "8058464"
},
{
"name": "Shell",
"bytes": "18531"
}
],
"symlink_target": ""
}
|
import sys
class Loader:
def __init__(self, code_size, data_size=0):
from peachpy.util import is_int
if not is_int(code_size):
raise TypeError("code size must be an integer")
if not is_int(data_size):
raise TypeError("data size must be an integer")
if code_size <= 0:
raise ValueError("code size must be positive")
if data_size < 0:
raise ValueError("data size must be non-negative")
import mmap
self.allocation_granularity = max(mmap.ALLOCATIONGRANULARITY, mmap.PAGESIZE)
self.code_address = None
self.code_size = self.allocation_size(code_size)
self.data_address = None
self.data_size = self.allocation_size(data_size)
self._release_memory = None
osname = sys.platform.lower()
if osname == "darwin" or osname.startswith("linux"):
import ctypes
if osname == "darwin":
libc = ctypes.cdll.LoadLibrary("libc.dylib")
else:
libc = ctypes.cdll.LoadLibrary("libc.so.6")
# void* mmap(void* addr, size_t len, int prot, int flags, int fd, off_t offset)
mmap_function = libc.mmap
mmap_function.restype = ctypes.c_void_p
mmap_function.argtype = [ctypes.c_void_p, ctypes.c_size_t,
ctypes.c_int, ctypes.c_int,
ctypes.c_int, ctypes.c_size_t]
# int munmap(void* addr, size_t len)
munmap_function = libc.munmap
munmap_function.restype = ctypes.c_int
munmap_function.argtype = [ctypes.c_void_p, ctypes.c_size_t]
def munmap(address, size):
munmap_result = munmap_function(address, size)
assert munmap_result == 0
self._release_memory = lambda address_size: munmap(address_size[0], address_size[1])
# Allocate code segment
code_address = mmap_function(None, self.code_size,
mmap.PROT_READ | mmap.PROT_WRITE | mmap.PROT_EXEC,
mmap.MAP_ANON | mmap.MAP_PRIVATE,
-1, 0)
if code_address == -1:
raise OSError("Failed to allocate memory for code segment")
self.code_address = code_address
if self.data_size > 0:
# Allocate data segment
data_address = mmap_function(None, self.data_size,
mmap.PROT_READ | mmap.PROT_WRITE,
mmap.MAP_ANON | mmap.MAP_PRIVATE,
-1, 0)
if data_address == -1:
raise OSError("Failed to allocate memory for data segment")
self.data_address = data_address
elif osname == "win32":
import ctypes
# From WinNT.h
PAGE_READWRITE = 0x04
PAGE_EXECUTE_READWRITE = 0x40
MEM_COMMIT = 0x1000
MEM_RESERVE = 0x2000
MEM_RELEASE = 0x8000
# LPVOID WINAPI VirtualAlloc(LPVOID address, SIZE_T size, DWORD allocationType, DWORD protect)
VirtualAlloc_function = ctypes.windll.kernel32.VirtualAlloc
VirtualAlloc_function.restype = ctypes.c_void_p
VirtualAlloc_function.argtype = [ctypes.c_void_p, ctypes.c_size_t, ctypes.c_ulong, ctypes.c_ulong]
# BOOL WINAPI VirtualFree(LPVOID lpAddress, SIZE_T dwSize, DWORD dwFreeType)
VirtualFree_function = ctypes.windll.kernel32.VirtualFree
VirtualFree_function.restype = ctypes.c_int
VirtualFree_function.argtype = [ctypes.c_void_p, ctypes.c_size_t, ctypes.c_ulong]
def VirtualFree(address, size):
VirtualFree_result = VirtualFree_function(address, size, MEM_RELEASE)
assert VirtualFree_result != 0
self._release_memory = lambda address_size: VirtualFree(address_size[0], address_size[1])
# Allocate code segment
code_address = VirtualAlloc_function(None, self.code_size,
MEM_RESERVE | MEM_COMMIT,
PAGE_EXECUTE_READWRITE)
if not code_address:
raise OSError("Failed to allocate memory for code segment")
self.code_address = code_address
if self.data_size > 0:
# Allocate data segment
data_address = VirtualAlloc_function(None, self.data_size,
MEM_RESERVE | MEM_COMMIT,
PAGE_READWRITE)
if not data_address:
raise OSError("Failed to allocate memory for data segment")
self.data_address = data_address
elif osname == "nacl":
import dynacl
# Allocate code segment
self.allocation = dynacl.allocate(self.code_size, self.data_size)
self.code_address = self.allocation.code_address
self.data_address = self.allocation.data_address
self.copy_code = self._nacl_copy_code
else:
raise ValueError("Unknown host OS: " + osname)
def allocation_size(self, segment_size):
import peachpy.util
return peachpy.util.roundup(segment_size, self.allocation_granularity)
def copy_code(self, code_segment):
import ctypes
ctypes.memmove(self.code_address,
ctypes.c_char_p(bytes(code_segment)),
len(code_segment))
def _nacl_copy_code(self, code_segment):
code_offset = 0
self.allocation.dyncode_create(code_segment, code_offset)
def __del__(self):
if self._release_memory is not None:
if self.code_address is not None:
self._release_memory((self.code_address, self.code_size))
self.code_address = None
if self.data_address is not None:
self._release_memory((self.data_address, self.data_size))
self.data_address = None
|
{
"content_hash": "18550d6d33b5e3c35268ff4f0fe15d85",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 110,
"avg_line_length": 44.394366197183096,
"alnum_prop": 0.546002538071066,
"repo_name": "silky/PeachPy",
"id": "25e3227ab21c34ed9e04e1f8c7a7297ac1c5d666",
"size": "6450",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "peachpy/loader.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1809768"
}
],
"symlink_target": ""
}
|
from odoo import api, fields, models, _
class ProductTemplate(models.Model):
_inherit = ['product.template']
attachment_count = fields.Integer(compute='_compute_attachment_count', string="File")
@api.multi
def _compute_attachment_count(self):
attachment_data = self.env['ir.attachment'].read_group([('res_model', '=', self._name), ('res_id', 'in', self.ids), ('product_downloadable', '=', True)], ['res_id'], ['res_id'])
mapped_data = dict([(data['res_id'], data['res_id_count']) for data in attachment_data])
for product_template in self:
product_template.attachment_count = mapped_data.get(product_template.id, 0)
@api.multi
def action_open_attachments(self):
self.ensure_one()
return {
'name': _('Digital Attachments'),
'domain': [('res_model', '=', self._name), ('res_id', '=', self.id), ('product_downloadable', '=', True)],
'res_model': 'ir.attachment',
'type': 'ir.actions.act_window',
'view_mode': 'kanban,form',
'view_type': 'form',
'context': "{'default_res_model': '%s','default_res_id': %d, 'default_product_downloadable': True}" % (self._name, self.id),
'help': """
<p class="oe_view_nocontent_create">Click on create to add attachments for this digital product.</p>
<p>The attached files are the ones that will be purchased and sent to the customer.</p>
""",
}
class Product(models.Model):
_inherit = 'product.product'
attachment_count = fields.Integer(compute='_compute_attachment_count', string="File")
@api.multi
def _compute_attachment_count(self):
IrAttachment = self.env['ir.attachment']
for product in self:
prod_tmpl_attach_count = IrAttachment.search_count([('res_model', '=', 'product.template'), ('res_id', 'in', product.product_tmpl_id.ids), ('product_downloadable', '=', True)])
prod_attach_count = IrAttachment.search_count([('res_model', '=', 'product.product'), ('res_id', 'in', product.ids), ('product_downloadable', '=', True)])
product.attachment_count = prod_tmpl_attach_count + prod_attach_count
@api.multi
def action_open_attachments(self):
self.ensure_one()
return {
'name': _('Digital Attachments'),
'domain': [('product_downloadable', '=', True), '|',
'&', ('res_model', '=', 'product.template'), ('res_id', '=', self.product_tmpl_id.id),
'&', ('res_model', '=', self._name), ('res_id', '=', self.id)],
'res_model': 'ir.attachment',
'type': 'ir.actions.act_window',
'view_mode': 'kanban,form',
'view_type': 'form',
'context': "{'default_res_model': '%s','default_res_id': %d, 'default_product_downloadable': True}" % (self._name, self.id),
'help': """
<p class="oe_view_nocontent_create">Click on create to add attachments for this digital product.</p>
<p>The attached files are the ones that will be purchased and sent to the customer.</p>
""",
}
|
{
"content_hash": "e90da82016698c87a1b1f17e5701ee52",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 188,
"avg_line_length": 50.234375,
"alnum_prop": 0.5660964230171073,
"repo_name": "vileopratama/vitech",
"id": "9ad8dc20c8d3192cc94fb2a640e9e33f1b780cff",
"size": "3315",
"binary": false,
"copies": "35",
"ref": "refs/heads/master",
"path": "src/addons/website_sale_digital/models/product.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "CSS",
"bytes": "2125999"
},
{
"name": "HTML",
"bytes": "252393"
},
{
"name": "Java",
"bytes": "1840167"
},
{
"name": "JavaScript",
"bytes": "6176224"
},
{
"name": "Makefile",
"bytes": "19072"
},
{
"name": "Mako",
"bytes": "7659"
},
{
"name": "NSIS",
"bytes": "16782"
},
{
"name": "Python",
"bytes": "9438805"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "22312"
},
{
"name": "Vim script",
"bytes": "406"
},
{
"name": "XSLT",
"bytes": "11489"
}
],
"symlink_target": ""
}
|
import csv
import json
import math
import os
import time
# Config
BPM = 60 # Beats per minute, e.g. 60, 75, 100, 120, 150
DIVISIONS_PER_BEAT = 4 # e.g. 4 = quarter notes, 8 = eighth notes, etc
PX_PER_BEAT = 40
VARIANCE_MS = 20 # +/- milliseconds an instrument note should be off by to give it a little more "natural" feel
GAIN = 1.0 # base gain
TEMPO = 1.0 # base tempo
PERCENT_TOTAL_NOTE_THRESHOLD = 0.16
BRIGHTNESS_THRESHOLD = 20
SATURATION_THRESHOLD = 20
# Files
INSTRUMENTS_INPUT_FILE = 'data/instruments.csv'
SYNESTHESIA_INPUT_FILE = 'data/synesthesia.csv'
EVENTS_INPUT_FILE = 'data/events.csv'
PAINTING_SAMPLES_INPUT_FILE = 'data/painting_samples.csv'
REPORT_SUMMARY_OUTPUT_FILE = 'data/report_summary.csv'
REPORT_NOTES_OUTPUT_FILE = 'data/report_summary_notes.csv'
REPORT_SEQUENCE_OUTPUT_FILE = 'data/report_sequence.csv'
INSTRUMENTS_OUTPUT_FILE = 'data/ck_instruments.csv'
SEQUENCE_OUTPUT_FILE = 'data/ck_sequence.csv'
VISUALIZATION_OUTPUT_FILE = 'visualization/data/paintings.json'
INSTRUMENTS_DIR = 'instruments/'
# Output options
WRITE_SEQUENCE = True
WRITE_REPORT = True
WRITE_JSON = False
# Calculations
BEAT_MS = round(60.0 / BPM * 1000)
ROUND_TO_NEAREST = round(BEAT_MS/DIVISIONS_PER_BEAT)
PX_PER_MS = PX_PER_BEAT / BEAT_MS
print('Building sequence at '+str(BPM)+' BPM ('+str(BEAT_MS)+'ms per beat)')
# Initialize Variables
instruments = []
paintings = []
events = []
synesthesia = []
notes = []
sequence = []
hindex = 0
total_ms = 0
# For creating pseudo-random numbers
def halton(index, base):
result = 0.0
f = 1.0 / base
i = 1.0 * index
while(i > 0):
result += f * (i % base)
i = math.floor(i / base)
f = f / base
return result
# Find index of first item that matches value
def findInList(list, key, value):
found = -1
for index, item in enumerate(list):
if item[key] == value:
found = index
break
return found
# Mean of list
def mean(data):
if iter(data) is data:
data = list(data)
n = len(data)
if n < 1:
return 0
else:
return sum(data)/n
def variance(data):
if iter(data) is data:
data = list(data)
n = len(data)
if n < 1:
return 0
else:
c = mean(data)
ss = sum((x-c)**2 for x in data)
ss -= sum((x-c) for x in data)**2/len(data)
return ss/n
# Standard deviation of list
def stdev(data):
var = variance(data)
return math.sqrt(var)
# floor {n} to nearest {nearest}
def floorToNearest(n, nearest):
return 1.0 * math.floor(1.0*n/nearest) * nearest
# round {n} to nearest {nearest}
def roundToNearest(n, nearest):
return 1.0 * round(1.0*n/nearest) * nearest
# Read instruments from file
with open(INSTRUMENTS_INPUT_FILE, 'rb') as f:
r = csv.reader(f, delimiter=',')
next(r, None) # remove header
for file,artist,size_min,size_max,bri_min,bri_max,var_min,var_max,year_min,year_max,note,from_gain,to_gain,from_tempo,to_tempo,tempo_offset,interval_phase,interval,interval_offset,active in r:
if int(active):
index = len(instruments)
# build instrument object
_beat_ms = int(round(BEAT_MS/TEMPO))
instrument = {
'index': index,
'file': INSTRUMENTS_DIR + file,
'artist': artist,
'size_min': float(size_min),
'size_max': float(size_max),
'bri_min': float(bri_min),
'bri_max': float(bri_max),
'var_min': float(var_min),
'var_max': float(var_max),
'year_min': int(year_min),
'year_max': int(year_max),
'note': note,
'from_gain': float(from_gain) * GAIN,
'to_gain': float(to_gain) * GAIN,
'from_tempo': float(from_tempo) * TEMPO,
'to_tempo': float(to_tempo) * TEMPO,
'tempo_offset': float(tempo_offset),
'interval_ms': int(int(interval_phase)*_beat_ms),
'interval': int(interval),
'interval_offset': int(interval_offset),
'from_beat_ms': int(round(BEAT_MS/(float(from_tempo)*TEMPO))),
'to_beat_ms': int(round(BEAT_MS/(float(to_tempo)*TEMPO))),
'beat_ms': _beat_ms
}
# add instrument to instruments
instruments.append(instrument)
# Read synesthesia from file
with open(SYNESTHESIA_INPUT_FILE, 'rb') as f:
r = csv.reader(f, delimiter=',')
next(r, None) # remove header
for _hue, _saturation, _color, _note in r:
synesthesia.append({
'hue': int(_hue),
'saturation': int(_saturation),
'color': _color,
'note': _note
})
notes = set([s["note"] for s in synesthesia])
# Distance function
def distance(x1, y1, x2, y2):
sq1 = 1.0*(x1-x2)*(x1-x2)
sq2 = 1.0*(y1-y2)*(y1-y2)
return math.sqrt(sq1 + sq2)
# Retrieves a note based on hue and saturation
def getNote(hue, saturation):
note = synesthesia[0]['note']
minDistance = None
for s in synesthesia:
d = distance(hue, saturation, s['hue'], s['saturation'])
# d = abs(hue-s['hue'])
if minDistance is None or d < minDistance:
minDistance = d
note = s['note']
return note
# Read paintings from file
min_year = None
max_year = None
with open(PAINTING_SAMPLES_INPUT_FILE, 'rb') as f:
r = csv.reader(f, delimiter=',')
next(r, None) # remove header
current_file = None
for _title,_artist,_position,_year,_file,_painting_width,_painting_height,_year_start_ms,_year_stop_ms,_hue,_saturation,_brightness,_x,_y,_width,_height in r:
_area = int(_width) * int(_height)
# Calc min/max
min_year = int(_year) if min_year is None else min_year
max_year = int(_year) if max_year is None else max_year
min_year = min([min_year, int(_year)])
max_year = max([max_year, int(_year)])
# Retrieve note
note = getNote(int(_hue), int(_saturation))
# Init sample
sample = {
'hue': int(_hue),
'saturation': int(_saturation),
'brightness': int(_brightness),
'x': int(_x),
'y': int(_y),
'width': int(_width),
'height': int(_height),
'area': _area,
'note': note
}
# Add painting to list
if current_file != _file:
index = len(paintings)
duration = 1.0 * int(_painting_width) / PX_PER_MS
paintings.append({
'index': index,
'title': _title,
'artist': _artist,
'position': int(_position),
'year': int(_year),
'file': _file,
'width': int(_painting_width),
'height': int(_painting_height),
'start_ms': int(_year_start_ms),
'stop_ms': int(_year_start_ms) + duration,
'samples': [sample],
'notes': [
{'note': note, 'areas': [_area], 'brightnesses': [int(_brightness)]}
]
})
current_file = _file
# Append sample to current painting
else:
paintings[-1]['samples'].append(sample)
note_i = findInList(paintings[-1]['notes'], 'note', note)
if note_i >= 0:
paintings[-1]['notes'][note_i]['areas'].append(_area)
paintings[-1]['notes'][note_i]['brightnesses'].append(int(_brightness))
else:
paintings[-1]['notes'].append({'note': note, 'areas': [_area], 'brightnesses': [int(_brightness)]})
# Aggregate, normalize, sort data
max_area_mean = None
min_area_mean = None
for pi, painting in enumerate(paintings):
sample_count = len(painting['samples'])
paintings[pi]['sample_count'] = sample_count
# Calculate count/sum/mean note size and brightness
for ni, note in enumerate(painting['notes']):
# Calc area
area_count = len(note['areas'])
area_sum = sum(note['areas'])
area_mean = 1.0 * area_sum / area_count
paintings[pi]['notes'][ni]['count_area'] = area_count
paintings[pi]['notes'][ni]['total_area'] = area_sum
paintings[pi]['notes'][ni]['mean_area'] = area_mean
# Calc brightness
brightness_count = len(note['brightnesses'])
brightness_sum = sum(note['brightnesses'])
brightness_mean = 1.0 * brightness_sum / brightness_count
paintings[pi]['notes'][ni]['count_brightness'] = brightness_count
paintings[pi]['notes'][ni]['total_brightness'] = brightness_sum
paintings[pi]['notes'][ni]['mean_brightness'] = brightness_mean
# Percent of total painting samples
paintings[pi]['notes'][ni]['percent_total'] = 1.0 * area_count / sample_count
# Calc min/max
min_area_mean = area_mean if min_area_mean is None else min_area_mean
max_area_mean = area_mean if max_area_mean is None else max_area_mean
min_area_mean = min([min_area_mean, area_mean])
max_area_mean = max([max_area_mean, area_mean])
# Painting calculations
psamples = paintings[pi]['samples']
psamples_len = len(psamples)
mean_brightness = 1.0 * sum([sample['brightness'] for sample in psamples]) / psamples_len
mean_area = 1.0 * sum([sample['area'] for sample in psamples]) / psamples_len
variance_hue = 1.0 * variance([sample['hue'] for sample in psamples if sample['saturation'] > SATURATION_THRESHOLD and sample['brightness'] > BRIGHTNESS_THRESHOLD])
paintings[pi]['mean_brightness'] = mean_brightness
paintings[pi]['mean_area'] = mean_area
paintings[pi]['variance_hue'] = variance_hue
# Determine primary note
sorted_notes = sorted(paintings[pi]['notes'], key=lambda k: k['percent_total'])
paintings[pi]['primary_note'] = sorted_notes[-1]
# Calc min/max
p_min_brightness_mean = min([painting['mean_brightness'] for painting in paintings])
p_max_brightness_mean = max([painting['mean_brightness'] for painting in paintings])
p_min_area_mean = min([painting['mean_area'] for painting in paintings])
p_max_area_mean = max([painting['mean_area'] for painting in paintings])
p_min_variance_hue = min([painting['variance_hue'] for painting in paintings])
p_max_variance_hue = max([painting['variance_hue'] for painting in paintings])
# Normalize values
for pi, painting in enumerate(paintings):
paintings[pi]['mean_brightness_i'] = (1.0 * painting['mean_brightness'] - p_min_brightness_mean) / (p_max_brightness_mean - p_min_brightness_mean) * 100
paintings[pi]['mean_area_i'] = (1.0 * painting['mean_area'] - p_min_area_mean) / (p_max_area_mean - p_min_area_mean) * 100
paintings[pi]['variance_hue_i'] = (1.0 * painting['variance_hue'] - p_min_variance_hue) / (p_max_variance_hue - p_min_variance_hue) * 100
for ni, note in enumerate(painting['notes']):
paintings[pi]['notes'][ni]['mean_area_i'] = (1.0 * note['mean_area'] - min_area_mean) / (max_area_mean - min_area_mean) * 100
# Report painting data
print('Retrieved painting data with '+ str(len(paintings)) + ' paintings and '+ str(sum(len(p['samples']) for p in paintings)) +' samples')
print('Sample area range: ['+str(min_area_mean)+','+str(max_area_mean)+']')
print('Year range: ['+str(min_year)+','+str(max_year)+']')
# Calculate total time
total_ms = max([p['stop_ms'] for p in paintings])
total_seconds = int(1.0*total_ms/1000)
print('Main sequence time: '+time.strftime('%M:%S', time.gmtime(total_seconds)) + ' (' + str(total_seconds) + 's)')
print(str(PX_PER_BEAT)+'px per beat')
# Multiplier based on sine curve
def getMultiplier(percent_complete, rad=1.0):
radians = percent_complete * (math.pi * rad)
multiplier = math.sin(radians)
if multiplier < 0:
multiplier = 0.0
elif multiplier > 1:
multplier = 1.0
return multiplier
# Retrieve gain based on current beat
def getGain(instrument, percent_complete):
multiplier = getMultiplier(percent_complete)
from_gain = instrument['from_gain']
to_gain = instrument['to_gain']
min_gain = min(from_gain, to_gain)
gain = multiplier * (to_gain - from_gain) + from_gain
gain = max(min_gain, round(gain, 2))
return gain
# Get beat duration in ms based on current point in time
def getBeatMs(instrument, percent_complete, round_to):
multiplier = getMultiplier(percent_complete)
from_beat_ms = instrument['from_beat_ms']
to_beat_ms = instrument['to_beat_ms']
ms = multiplier * (to_beat_ms - from_beat_ms) + from_beat_ms
ms = int(roundToNearest(ms, round_to))
return ms
# Return if the instrument should be played in the given interval
def isValidInterval(instrument, elapsed_ms):
interval_ms = instrument['interval_ms']
interval = instrument['interval']
interval_offset = instrument['interval_offset']
return int(math.floor(1.0*elapsed_ms/interval_ms)) % interval == interval_offset
# Add beats to sequence
def addBeatsToSequence(instrument, duration, ms, round_to):
global sequence
global hindex
beat_ms = int(roundToNearest(instrument['beat_ms'], round_to))
offset_ms = int(instrument['tempo_offset'] * beat_ms)
ms += offset_ms
previous_ms = int(ms)
from_beat_ms = instrument['from_beat_ms']
to_beat_ms = instrument['to_beat_ms']
min_ms = min(from_beat_ms, to_beat_ms)
remaining_duration = int(duration)
elapsed_duration = offset_ms
while remaining_duration >= min_ms:
elapsed_ms = int(ms)
elapsed_beat = int((elapsed_ms-previous_ms) / beat_ms)
percent_complete = 1.0 * elapsed_duration / duration
this_beat_ms = getBeatMs(instrument, percent_complete, round_to)
# add to sequence if in valid interval
if isValidInterval(instrument, elapsed_ms):
h = halton(hindex, 3)
variance = int(h * VARIANCE_MS * 2 - VARIANCE_MS)
sequence.append({
'instrument_index': instrument['index'],
'instrument': instrument,
'position': 0,
'rate': 1,
'gain': getGain(instrument, percent_complete),
'elapsed_ms': max([elapsed_ms + variance, 0])
})
hindex += 1
remaining_duration -= this_beat_ms
elapsed_duration += this_beat_ms
ms += this_beat_ms
# Build sequence
for instrument in instruments:
ms = None
queue_duration = 0
# Go through each painting
for painting in paintings:
is_valid = (painting['artist']==instrument['artist'] or instrument['artist']=='any') and (instrument['note']==painting['primary_note']['note'] or instrument['note']=='any') and painting['mean_area_i'] >= instrument['size_min'] and painting['mean_area_i'] < instrument['size_max'] and painting['mean_brightness_i'] >= instrument['bri_min'] and painting['mean_brightness_i'] < instrument['bri_max'] and painting['variance_hue_i'] >= instrument['var_min'] and painting['variance_hue_i'] < instrument['var_max'] and painting['year'] >= instrument['year_min'] and painting['year'] <= instrument['year_max']
# If note is valid, add it to sequence
if not is_valid and queue_duration > 0 and ms != None or is_valid and ms != None and painting['start_ms'] > (ms+queue_duration):
addBeatsToSequence(instrument.copy(), queue_duration, ms, ROUND_TO_NEAREST)
ms = None
queue_duration = 0
if is_valid:
if ms==None:
ms = painting['start_ms']
queue_duration += (painting['stop_ms'] - painting['start_ms'])
if queue_duration > 0 and ms != None:
addBeatsToSequence(instrument.copy(), queue_duration, ms, ROUND_TO_NEAREST)
# Sort sequence
sequence = sorted(sequence, key=lambda k: k['elapsed_ms'])
# Add milliseconds to sequence
elapsed = 0
for index, step in enumerate(sequence):
sequence[index]['milliseconds'] = step['elapsed_ms'] - elapsed
elapsed = step['elapsed_ms']
# Write instruments to file
if WRITE_SEQUENCE and len(instruments) > 0:
with open(INSTRUMENTS_OUTPUT_FILE, 'wb') as f:
w = csv.writer(f)
for index, instrument in enumerate(instruments):
w.writerow([index])
w.writerow([instrument['file']])
f.seek(-2, os.SEEK_END) # remove newline
f.truncate()
print('Successfully wrote instruments to file: '+INSTRUMENTS_OUTPUT_FILE)
# Write sequence to file
if WRITE_SEQUENCE and len(sequence) > 0:
with open(SEQUENCE_OUTPUT_FILE, 'wb') as f:
w = csv.writer(f)
for step in sequence:
w.writerow([step['instrument_index']])
w.writerow([step['position']])
w.writerow([step['gain']])
w.writerow([step['rate']])
w.writerow([step['milliseconds']])
f.seek(-2, os.SEEK_END) # remove newline
f.truncate()
print('Successfully wrote sequence to file: '+SEQUENCE_OUTPUT_FILE)
# Write summary files
if WRITE_REPORT:
# Notes report
with open(REPORT_NOTES_OUTPUT_FILE, 'wb') as f:
w = csv.writer(f)
header = ['Time', 'Title', 'File']
header.extend(notes)
w.writerow(header)
for painting in paintings:
elapsed = painting['start_ms']
elapsed_f = time.strftime('%M:%S', time.gmtime(int(elapsed/1000)))
ms = int(elapsed % 1000)
elapsed_f += '.' + str(ms)
row = [elapsed_f, painting['title'], painting['file']]
for note in notes:
note_i = findInList(painting['notes'], 'note', note)
if note_i >= 0:
# row.append(painting['notes'][note_i]['total_area'])
row.append(round(painting['notes'][note_i]['percent_total']*100, 10))
else:
row.append(0)
w.writerow(row)
print('Successfully wrote summary file: '+REPORT_NOTES_OUTPUT_FILE)
# Paintings report
with open(REPORT_SUMMARY_OUTPUT_FILE, 'wb') as f:
w = csv.writer(f)
artists = set([p['artist'] for p in paintings])
years = set([p['year'] for p in paintings])
header = ['Time', 'Year']
for artist in artists:
header.append(artist + ' Mean Brightness')
header.append(artist + ' Mean Area')
header.append(artist + ' Hue Variance')
w.writerow(header)
for year in years:
y_paintings = [p for p in paintings if p['year'] == year]
elapsed = y_paintings[0]['start_ms']
elapsed_f = time.strftime('%M:%S', time.gmtime(int(elapsed/1000)))
ms = int(elapsed % 1000)
elapsed_f += '.' + str(ms)
row = [elapsed_f, year]
for artist in artists:
a_paintings = [p for p in y_paintings if p['artist'] == artist]
if len(a_paintings) > 0:
p = a_paintings[0]
row.extend([p['mean_brightness_i'], p['mean_area_i'], p['variance_hue_i']])
else:
row.extend(['','',''])
w.writerow(row)
print('Successfully wrote summary file: '+REPORT_SUMMARY_OUTPUT_FILE)
# Write sequence report to file
if WRITE_REPORT and len(sequence) > 0:
with open(REPORT_SEQUENCE_OUTPUT_FILE, 'wb') as f:
w = csv.writer(f)
w.writerow(['Time', 'Instrument', 'Gain'])
for step in sequence:
instrument = instruments[step['instrument_index']]
elapsed = step['elapsed_ms']
elapsed_f = time.strftime('%M:%S', time.gmtime(int(elapsed/1000)))
ms = int(elapsed % 1000)
elapsed_f += '.' + str(ms)
w.writerow([elapsed_f, instrument['file'], step['gain']])
f.seek(-2, os.SEEK_END) # remove newline
f.truncate()
print('Successfully wrote sequence report to file: '+REPORT_SEQUENCE_OUTPUT_FILE)
# Write JSON data for the visualization
if WRITE_JSON:
json_data = {
'paintings': []
}
with open(VISUALIZATION_OUTPUT_FILE, 'w') as outfile:
json.dump(json_data, outfile)
print('Successfully wrote to JSON file: '+VISUALIZATION_OUTPUT_FILE)
|
{
"content_hash": "7c1aca5ae6c475e1c81c6582a687ab32",
"timestamp": "",
"source": "github",
"line_count": 522,
"max_line_length": 603,
"avg_line_length": 34.706896551724135,
"alnum_prop": 0.6694817022685875,
"repo_name": "shawngraham/music-lab-scripts",
"id": "01a0abcddd853ac9d7ef03b89ab31bb28330c660",
"size": "18281",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "05_painters/painters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ChucK",
"bytes": "24295"
},
{
"name": "Processing",
"bytes": "113030"
},
{
"name": "Python",
"bytes": "113868"
}
],
"symlink_target": ""
}
|
import numpy as np
from landlab import Component, RasterModelGrid, CLOSED_BOUNDARY
from landlab import BAD_INDEX_VALUE, FIXED_VALUE_BOUNDARY, FIXED_LINK
from landlab import FIXED_GRADIENT_BOUNDARY
from ...utils.decorators import use_file_name_or_kwds
class KinematicWaveRengers(Component):
"""
This code is based on an overland flow model by Francis Rengers and
colleagues, after Julien et al., 1995. It uses an explicit face-centered
solution to a depth-varying Manning's equation, broadly following, e.g.,
Mugler et al., 2011.
It was implemented in Landlab by DEJH, March '16. Please cite
Rengers et al., 2016, Model Predictions of Water Runoff in Steep
Catchments after Wildfire, WRR.
Note: You will not have a good day if you have pits present in your topo
before routing flow with this component. Fill pits before instantiating
the component (or call :func:`update_topographic_params` once you have
filled after instantiation).
Note this module assumes that the topography DOES NOT change during the
run. If it does, call :func:`update_topographic_params` to update the
component to the new topo.
Boundary condition control can be... interesting with this component.
Be sure to close boundaries you do not wish water to leave - or enter! -
through. To allow free water discharge from the grid edge it is
recommended to use fixed gradient boundary conditions at the open edges.
The component will then set the fixed gradient as equal to the underlying
topographic gradient throughout the run.
It is also possible to fix the water depth at the open edge, but this
is not really recommended.
Construction::
KinematicWaveRengers(grid, mannings_n=0.03, critical_flow_depth=0.003,
mannings_epsilon=0.33333333, dt_max=0.3,
max_courant=0.2, min_surface_water_depth=1.e-8)
Parameters
----------
grid : RasterModelGrid
A grid.
mannings_n : float
A value to use for Manning's n in the Manning discharge equation.
critical_flow_depth : float (m)
An index flow depth for the depth-varying Manning's equation,
controlling the depth at which the effective Manning's n begins to
increase.
mannings_epsilon : float
An exponent for the depth-varying Manning's equation, controlling the
rate of increase of effective Manning's n at small flow depths.
dt_max : float or None (s)
The largest permitted internal timestep for the component. If the
Courant criterion produces a more restrictive condition, that will be
used instead.
max_courant : float
The maximum permitted Courant number for the courant stability
criterion.
min_surface_water_depth : float (m)
A water depth below which surface water thickness may never fall, to
ensure model stabilty.
Examples
--------
>>> from landlab import RasterModelGrid
>>> from landlab import CLOSED_BOUNDARY, FIXED_GRADIENT_BOUNDARY
>>> mg = RasterModelGrid((5, 10), spacing=10.)
>>> mg.status_at_node[mg.nodes_at_left_edge] = FIXED_GRADIENT_BOUNDARY
>>> mg.status_at_node[mg.nodes_at_top_edge] = CLOSED_BOUNDARY
>>> mg.status_at_node[mg.nodes_at_bottom_edge] = CLOSED_BOUNDARY
>>> mg.status_at_node[mg.nodes_at_right_edge] = CLOSED_BOUNDARY
>>> _ = mg.add_field('node', 'topographic__elevation', 0.05*mg.node_x)
>>> _ = mg.add_empty('node', 'surface_water__depth')
>>> mg.at_node['surface_water__depth'].fill(1.e-8)
>>> dt = 60. # 1 min intervals
>>> rain_intensities = (1.e-5, 1.e-5, 1.e-5, 1.e-5, 1.e-5)
>>> kw = KinematicWaveRengers(mg)
>>> for i in rain_intensities:
... kw.run_one_step(dt, rainfall_intensity=i)
>>> mg.at_node['surface_water__depth']
array([ 1.00000000e-08, 1.00000000e-08, 1.00000000e-08,
1.00000000e-08, 1.00000000e-08, 1.00000000e-08,
1.00000000e-08, 1.00000000e-08, 1.00000000e-08,
1.00000000e-08, 2.95578314e-03, 2.95578314e-03,
2.90945761e-03, 2.82912876e-03, 2.70127141e-03,
2.51202011e-03, 2.24617193e-03, 1.88032853e-03,
1.35451064e-03, 1.00000000e-08, 2.95578314e-03,
2.95578314e-03, 2.90945761e-03, 2.82912876e-03,
2.70127141e-03, 2.51202011e-03, 2.24617193e-03,
1.88032853e-03, 1.35451064e-03, 1.00000000e-08,
2.95578314e-03, 2.95578314e-03, 2.90945761e-03,
2.82912876e-03, 2.70127141e-03, 2.51202011e-03,
2.24617193e-03, 1.88032853e-03, 1.35451064e-03,
1.00000000e-08, 1.00000000e-08, 1.00000000e-08,
1.00000000e-08, 1.00000000e-08, 1.00000000e-08,
1.00000000e-08, 1.00000000e-08, 1.00000000e-08,
1.00000000e-08, 1.00000000e-08])
"""
_name = 'KinematicWaveRengers'
_input_var_names = (
'topographic__elevation',
'surface_water__depth',
)
_output_var_names = (
'surface_water__depth',
'surface_water__discharge',
'surface_water__velocity'
)
_var_units = {
'topographic__elevation': 'm',
'surface_water__depth': 'm',
'surface_water__discharge': 'm**3/s',
'surface_water__velocity': 'm/s'
}
_var_mapping = {
'topographic__elevation': 'node',
'surface_water__depth': 'node',
'surface_water__discharge': 'node',
'surface_water__velocity': 'node'
}
_var_doc = {
'topographic__elevation': 'Land surface topographic elevation',
'surface_water__depth': 'Depth of water above the surface',
'surface_water__discharge': ('Magnitude of discharge of water above ' +
'the surface'),
'surface_water__velocity': 'Speed of water flow above the surface'
}
@use_file_name_or_kwds
def __init__(self, grid, mannings_n=0.03, critical_flow_depth=0.003,
mannings_epsilon=0.33333333, dt_max=0.3, max_courant=0.2,
min_surface_water_depth=1.e-8, **kwds):
"""Initialize the kinematic wave approximation overland flow component.
"""
assert isinstance(grid, RasterModelGrid), 'grid must be regular'
self._grid = grid
try:
self.set_new_fields_for_component()
except AttributeError:
self.grid.add_empty('node', 'surface_water__velocity')
self.grid.add_empty('node', 'surface_water__discharge')
active = np.where(self.grid.status_at_node != CLOSED_BOUNDARY)[0]
self._h = self.grid.at_node['surface_water__depth']
self._active = active
self._hc = critical_flow_depth
self._n = mannings_n
self._negepsilon = -mannings_epsilon
assert not np.isclose(dt_max, 0.)
self.dt_max = dt_max
self.min_surface_water_depth = min_surface_water_depth
self._active_depths = self.grid.at_node[
'surface_water__depth'][active]
all_grads = self.grid.calc_grad_at_link('topographic__elevation')
hoz_grads = self.grid.map_mean_of_horizontal_active_links_to_node(
all_grads)
vert_grads = self.grid.map_mean_of_vertical_active_links_to_node(
all_grads)
self.hozslopept5 = np.fabs(hoz_grads[active])**0.5
self.vertslopept5 = np.fabs(vert_grads[active])**0.5
self.velx = self.grid.zeros('node', dtype=float)
self.vely = self.grid.zeros('node', dtype=float)
self.qy = np.zeros(grid.number_of_nodes+1, dtype=float)
self.qx = np.zeros(grid.number_of_nodes+1, dtype=float)
self.poshozgrads = hoz_grads > 0.
self.posvertgrads = vert_grads > 0.
if np.isclose(self.grid.dx, self.grid.dy):
self.equaldims = True
self.courant_prefactor = max_courant*self.grid.dx
else:
self.equaldims = False
self.courant_prefactor = max_courant*self.grid.dx*self.grid.dy
self._neighbors = self.grid.neighbors_at_node.copy()
self._neighbors[self._neighbors == BAD_INDEX_VALUE] = -1
self._water_balance = []
self.actives_BCs = (self.grid.status_at_node[active] ==
FIXED_VALUE_BOUNDARY)
self.actives_BCs_water_depth = self._h[active][self.actives_BCs]
fixed_grad_nodes = self.grid.fixed_gradient_boundary_nodes
fixed_grad_anchors = \
self.grid.fixed_gradient_boundary_node_anchor_node
# ^add this value to the anchor nodes to update the BCs
# these also need to be mapped to active_IDs:
blank_nodes = self.grid.zeros('node', dtype=bool)
blank_nodes[fixed_grad_nodes] = True
self.fixed_grad_nodes_active = np.where(blank_nodes[active])[0]
blank_nodes.fill(False)
blank_nodes[fixed_grad_anchors] = True
self.fixed_grad_anchors_active = np.where(blank_nodes[active])[0]
def run_one_step(self, dt, rainfall_intensity=0.00001,
update_topography=False, track_min_depth=False):
"""Update fields with current hydrologic conditions.
Parameters
----------
rain_intensity : float or array (m/s)
The rainfall intensity across the grid (water input rate at each
node).
update_topography : bool
Set to true if the topography of the grid evolves during the run.
track_min_depth : bool
At *very* low rainfall inputs, there is a possibility this
component could allow creation of small amounts of water mass.
Set to true to track this mass, and use the :func:`water_balance`
property to investigate its evolution through time.
"""
elapsed_time_in_dt = 0. # this is only since the start of the timestep
active = self._active
self.hnew = self._h[active]
hnew = self.hnew
if update_topography:
self.update_topographic_params()
while elapsed_time_in_dt < dt:
internal_dt = self.calc_grads_and_timesteps(
update_topography, track_min_depth)
remaining_dt = dt - elapsed_time_in_dt
# now reduce timestep is needed if limited by total tstep length
internal_dt = min(internal_dt, remaining_dt).clip(0.)
# this section uses our final-array-val-is-zero trick
qx_left = self.qx[self._neighbors[:, 2]].clip(min=0.)
qx_right = self.qx[self._neighbors[:, 0]].clip(max=0.)
qy_top = self.qy[self._neighbors[:, 1]].clip(min=0.)
qy_bottom = self.qy[self._neighbors[:, 3]].clip(max=0.)
# FR's rainfall handling was here. We're going to assume that the
# component is being driven by a "LL style" rainfall record, where
# the provided rainfall_intensity is constant across the provide
# dt. If it's not, it needs to be handled outside the component.
# now add the rainfall input
if type(rainfall_intensity) is not np.ndarray:
hnew += internal_dt * rainfall_intensity
else:
hnew += internal_dt * rainfall_intensity[active]
# set the BCs
hnew[self.actives_BCs] = self.actives_BCs_water_depth
# flux it round
hnew -= internal_dt/self.grid.dx*np.fabs(self.qy[active])
hnew -= internal_dt/self.grid.dy*np.fabs(self.qx[active])
hnew += internal_dt/self.grid.dx*(qy_top - qy_bottom)[active]
hnew += internal_dt/self.grid.dy*(qx_left - qx_right)[active]
hnew[self.fixed_grad_nodes_active] = hnew[
self.fixed_grad_anchors_active]
# update the internal clock
elapsed_time_in_dt += internal_dt
# update the actual fields
self._h[active] = hnew
self.grid.at_node['surface_water__velocity'][:] = np.sqrt(
np.square(self.velx) + np.square(self.vely))
self.grid.at_node['surface_water__discharge'][:] = np.sqrt(
np.square(self.qx[:-1]) + np.square(self.qy[:-1]))
def calc_grads_and_timesteps(self, update_topography, track_min_depth):
"""
Perform the first part of the calculation for the main run, mainly
velocities and fluxes. The main objective of this part of the
calculation is to derive the stable timestep for the run.
Parameters
----------
update_topography : bool
If False, the underlying surface topography is assumed unchanged
since the last run.
track_min_depth : bool
If True, the internal list _water_balance will be appended with
the volumetric fractional change in mass balance during the run.
Returns
-------
internal_dt : float
The internally stable timestep that will be used on this loop.
"""
active = self._active
hnew = self.hnew
if update_topography:
self.update_topographic_params()
# assert the minimum water depth - this could introduce an element of
# mass gain, but should remain minor
hnew.clip(self.min_surface_water_depth, out=hnew)
if track_min_depth:
self._water_balance.append(
(hnew-self._h[active]).sum()/self._h[active].sum())
n = self._n * (hnew/self._hc)**self._negepsilon
twothirdshnewbyn = hnew**0.66666666 / n
self.vely[active] = twothirdshnewbyn * self.vertslopept5
self.velx[active] = twothirdshnewbyn * self.hozslopept5
self.vely[self.posvertgrads] *= -1.
self.velx[self.poshozgrads] *= -1.
self.qy[active] = self.vely[active] * hnew # m**2/s
self.qx[active] = self.velx[active] * hnew # m**2/s
maxvely = np.fabs(self.vely).max()
maxvelx = np.fabs(self.velx).max()
if self.equaldims:
courant_dt = self.courant_prefactor/(maxvelx + maxvely)
else:
# note prefactor is NOT THE SAME as above in this case
courant_dt = self.courant_prefactor/(self.grid.dy*maxvelx +
self.grid.dx*maxvely)
if self.dt_max is not None:
internal_dt = np.min((self.dt_max, courant_dt))
else:
internal_dt = courant_dt
self._internal_dt = internal_dt
return internal_dt
def update_topographic_params(self):
"""
If the topo changes during the run, change the held params used by
:func:`run_one_step`.
"""
active = np.where(self.grid.status_at_node != CLOSED_BOUNDARY)[0]
all_grads = self.grid.calculate_gradients_at_links(
'topographic__elevation')
hoz_grads = self.grid.map_mean_of_horizontal_active_links_to_node(
all_grads)
vert_grads = self.grid.map_mean_of_vertical_active_links_to_node(
all_grads)
self.hozslopept5 = np.fabs(hoz_grads[active])**0.5
self.vertslopept5 = np.fabs(vert_grads[active])**0.5
self.poshozgrads = hoz_grads > 0.
self.posvertgrads = vert_grads > 0.
fixed_grad_nodes = self.grid.fixed_gradient_boundary_nodes
fixed_grad_anchors = \
self.grid.fixed_gradient_boundary_node_anchor_node
# ^add this value to the anchor nodes to update the BCs
# these also need to be mapped to active_IDs:
blank_nodes = self.grid.zeros('node', dtype=bool)
blank_nodes[fixed_grad_nodes] = True
self.fixed_grad_nodes_active = np.where(blank_nodes[active])[0]
blank_nodes.fill(False)
blank_nodes[fixed_grad_anchors] = True
self.fixed_grad_anchors_active = np.where(blank_nodes[active])[0]
# check is the grid topology has changed...
if not np.all(np.equal(self._active, active)):
self._active = active
self.velx.fill(0.)
self.vely.fill(0.)
self.qy.fill(0.)
self.qx.fill(0.)
self._neighbors = self.grid.neighbors_at_node.copy()
self._neighbors[self._neighbors == BAD_INDEX_VALUE] = -1
self.actives_BCs = (self.grid.status_at_node[active] ==
FIXED_VALUE_BOUNDARY)
self.actives_BCs_water_depth = self._h[self.actives_BCs]
@property
def water_balance(self):
"""
Return a list of the fractional gain/loss of water mass during the
run, if it was tracked using the track_min_depth flag.
"""
if self._water_balance == []:
raise ValueError('No record of water balance was found!')
else:
return self._water_balance
@property
def internal_timestep(self):
"""
Return the internal timestep last used by the kinematic wave component.
"""
try:
return self._internal_dt
except AttributeError:
# the component hasn't started running yet
_ = self.calc_grads_and_timesteps(False, False)
return self._internal_dt
|
{
"content_hash": "f3afe13495db4aa170ef3c2f03742638",
"timestamp": "",
"source": "github",
"line_count": 380,
"max_line_length": 79,
"avg_line_length": 45.58684210526316,
"alnum_prop": 0.6090746406511575,
"repo_name": "laijingtao/landlab",
"id": "e3b0395403a8b26360e54bbcd3936c299ca7f0eb",
"size": "17346",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "landlab/components/overland_flow/kinematic_wave_rengers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1452"
},
{
"name": "PowerShell",
"bytes": "7128"
},
{
"name": "Python",
"bytes": "3084328"
},
{
"name": "Shell",
"bytes": "3175"
}
],
"symlink_target": ""
}
|
import types
from text import text_file
class Configuration():
def __init__(self, filename):
self.conf = self.__load(filename)
def __load(self, filename):
conf = {}
lines = text_file.read_file(filename)
for line in lines:
line = line.strip()
if line == '' or line.startswith('#'):
continue
else:
try:
key, value = line.split('=')
conf[key] = value
except:
pass
return conf
def save(self, filename):
lines = []
for key in self.conf:
value = self.conf.get(key)
lines.append('%s = %s', key, value)
text_file.write_file(filename, lines)
def get(self, key, default):
if key is not None:
value = self.conf.get(key)
if value is None:
return default
if type(default) == types.IntType:
return int(value)
elif type(default) == types.FloatType:
return float(value)
elif type(default) == types.BooleanType:
return bool(value)
return str(value)
return None
def set(self, key, value):
if key is not None and value is not None:
self.conf[key] = value
|
{
"content_hash": "62702995fb18851a4410af25fda0aecf",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 52,
"avg_line_length": 27.88235294117647,
"alnum_prop": 0.46905766526019693,
"repo_name": "interhui/py-text",
"id": "5a815a23e608c96d29c3dfdfc90dc2056f1975f3",
"size": "1438",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "text/conf_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "69473"
}
],
"symlink_target": ""
}
|
"""The tests for the Input select component."""
# pylint: disable=too-many-public-methods,protected-access
import unittest
from blumate.components import input_select
from blumate.const import (
ATTR_ICON, ATTR_FRIENDLY_NAME)
from tests.common import get_test_home_assistant
class TestInputSelect(unittest.TestCase):
"""Test the input select component."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_config(self):
"""Test config."""
self.assertFalse(input_select.setup(self.hass, {
'input_select': None
}))
self.assertFalse(input_select.setup(self.hass, {
'input_select': {
}
}))
self.assertFalse(input_select.setup(self.hass, {
'input_select': {
'name with space': None
}
}))
self.assertFalse(input_select.setup(self.hass, {
'input_select': {
'hello': {
'options': None
}
}
}))
self.assertFalse(input_select.setup(self.hass, {
'input_select': {
'hello': None
}
}))
def test_select_option(self):
"""Test select_option methods."""
self.assertTrue(input_select.setup(self.hass, {
'input_select': {
'test_1': {
'options': [
'some option',
'another option',
],
},
}
}))
entity_id = 'input_select.test_1'
state = self.hass.states.get(entity_id)
self.assertEqual('some option', state.state)
input_select.select_option(self.hass, entity_id, 'another option')
self.hass.pool.block_till_done()
state = self.hass.states.get(entity_id)
self.assertEqual('another option', state.state)
input_select.select_option(self.hass, entity_id, 'non existing option')
self.hass.pool.block_till_done()
state = self.hass.states.get(entity_id)
self.assertEqual('another option', state.state)
def test_config_options(self):
"""Test configuration options."""
count_start = len(self.hass.states.entity_ids())
test_2_options = [
'Good Option',
'Better Option',
'Best Option',
]
self.assertTrue(input_select.setup(self.hass, {
'input_select': {
'test_1': {
'options': [
1,
2,
],
},
'test_2': {
'name': 'Hello World',
'icon': 'work',
'options': test_2_options,
'initial': 'Better Option',
},
},
}))
self.assertEqual(count_start + 2, len(self.hass.states.entity_ids()))
state_1 = self.hass.states.get('input_select.test_1')
state_2 = self.hass.states.get('input_select.test_2')
self.assertIsNotNone(state_1)
self.assertIsNotNone(state_2)
self.assertEqual('1', state_1.state)
self.assertEqual(['1', '2'],
state_1.attributes.get(input_select.ATTR_OPTIONS))
self.assertNotIn(ATTR_ICON, state_1.attributes)
self.assertNotIn(ATTR_FRIENDLY_NAME, state_1.attributes)
self.assertEqual('Better Option', state_2.state)
self.assertEqual(test_2_options,
state_2.attributes.get(input_select.ATTR_OPTIONS))
self.assertEqual('Hello World',
state_2.attributes.get(ATTR_FRIENDLY_NAME))
self.assertEqual('work', state_2.attributes.get(ATTR_ICON))
|
{
"content_hash": "d3e486ccc85e6399ad20faee553cb69e",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 79,
"avg_line_length": 31.3953488372093,
"alnum_prop": 0.5232098765432098,
"repo_name": "bdfoster/blumate",
"id": "3643854bfcb4a30ada1a9ff594d4e9b310c1bd86",
"size": "4050",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/components/test_input_select.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1309487"
},
{
"name": "JavaScript",
"bytes": "10846"
},
{
"name": "Python",
"bytes": "2460958"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "6407"
}
],
"symlink_target": ""
}
|
'''
The addresslib package exposes a simple address parsing library that
can handle email addresses and urls.
See the address.py module for the public interfaces to the library and the
parser.py module for the implementation of the recursive descent parser
used to parse email addresses and urls.
To override the default DNS lookup library or MX Cache, use the
set_dns_lookup and set_mx_cache methods. For more details, see the User Manual.
'''
import re
from addresslib.drivers.redis_driver import RedisCache
from addresslib.drivers.dns_lookup import DNSLookup
from addresslib.plugins import yahoo
from addresslib.plugins import aol
from addresslib.plugins import gmail
from addresslib.plugins import icloud
from addresslib.plugins import hotmail
from addresslib.plugins import google
mx_cache = RedisCache()
dns_lookup = DNSLookup()
YAHOO_PATTERN = re.compile(r'''mta[0-9]+\.am[0-9]+\.yahoodns\.net$''')
GMAIL_PATTERN = re.compile(r'''.*gmail-smtp-in\.l\.google.com$''')
AOL_PATTERN = re.compile(r'''.*\.mx\.aol\.com$''')
ICLOUD_PATTERN = re.compile(r'''.*\.mail\.icloud\.com$''')
HOTMAIL_PATTERN = re.compile(r'''mx[0-9]\.hotmail\.com''')
GOOGLE_PATTERN = re.compile(r'''(.*aspmx\.l\.google\.com$)|(aspmx.*\.googlemail.com$)''', re.IGNORECASE)
CUSTOM_GRAMMAR_LIST = [
(YAHOO_PATTERN, yahoo),
(GMAIL_PATTERN, gmail),
(AOL_PATTERN, aol),
(ICLOUD_PATTERN, icloud),
(HOTMAIL_PATTERN, hotmail),
(GOOGLE_PATTERN, google),
]
def set_dns_lookup(dlookup):
global dns_lookup
dns_lookup = dlookup
def set_mx_cache(mcache):
global mx_cache
mx_cache = mcache
|
{
"content_hash": "c7386f6e1abc05c2b993d74028aa263f",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 104,
"avg_line_length": 31.98,
"alnum_prop": 0.7267041901188243,
"repo_name": "EncircleInc/addresslib",
"id": "bcacdb774aaa3123f4b7f1c06f809223ff0c2530",
"size": "1599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "addresslib/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "128277"
}
],
"symlink_target": ""
}
|
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.utils.translation import ugettext_lazy
from mptt import models as mptt_models
from learningprogress.accounts.models import User
class Section(mptt_models.MPTTModel):
"""
Model for sectioning the curriculum.
"""
name = models.CharField(
max_length=255,
verbose_name=ugettext_lazy('Name'))
parent = mptt_models.TreeForeignKey(
'self',
related_name='children',
null=True,
blank=True,
verbose_name=ugettext_lazy('Parent section'))
weight = models.IntegerField(
default=0,
verbose_name=ugettext_lazy('Weight'),
help_text=ugettext_lazy('Use this for ordering elements.'))
scores = models.IntegerField(
default=1,
validators=[MinValueValidator(1), MaxValueValidator(100)],
verbose_name=ugettext_lazy('Learning scores'),
help_text=ugettext_lazy('Choose a value from 1 to 100. Only relevant for elements without children.'))
notes = models.TextField(
blank=True,
verbose_name=ugettext_lazy('Notes'))
users = models.ManyToManyField(
User,
through='UserSectionRelation')
class Meta:
verbose_name = ugettext_lazy('Section')
verbose_name_plural = ugettext_lazy('Sections')
class MPTTMeta:
order_insertion_by = ['weight', 'name']
def __str__(self):
return self.name
def serialize(self, user):
"""
Serializes the progress data of a user for the section. Returns a
dictionary.
"""
data = dict(
name=self.name,
scores=self.scores,
notes=self.notes)
try:
usersectionrelation = self.usersectionrelation.get(user=user)
except UserSectionRelation.DoesNotExist:
data.update(dict(progress=0, comment=''))
else:
data.update(dict(
progress=usersectionrelation.progress,
comment=usersectionrelation.comment))
return data
PROGRESS_CHOICES = (
(0, ugettext_lazy('Nothing done')),
(1, ugettext_lazy('First look')),
(2, ugettext_lazy('Crammed')),
(3, ugettext_lazy('All done')))
class UserSectionRelation(models.Model):
"""
Many-to-many relationship between an user and an section in the
curriculum.
"""
user = models.ForeignKey(User)
section = models.ForeignKey(Section, related_name='usersectionrelation')
progress = models.IntegerField(default=0, choices=PROGRESS_CHOICES)
comment = models.TextField()
class Meta:
unique_together = (('user', 'section'),)
verbose_name = ugettext_lazy('Relation between an user and a section')
def __str__(self):
return '%s – %s' % (self.user, self.section)
class MockExamBranch(models.Model):
"""
Model for branches for mock exams.
"""
name = models.CharField(
max_length=255,
verbose_name=ugettext_lazy('Name'))
weight = models.IntegerField(
default=0,
verbose_name=ugettext_lazy('Weight'),
help_text=ugettext_lazy('Use this for ordering elements.'))
class Meta:
ordering = ('weight', 'name')
verbose_name = ugettext_lazy('Mock exam branch')
verbose_name_plural = ugettext_lazy('Mock exam branches')
def __str__(self):
return self.name
class MockExam(models.Model):
"""
Model for mock exams.
"""
user = models.ForeignKey(User)
branch = models.ForeignKey(
MockExamBranch,
verbose_name=ugettext_lazy('Branch'))
mark = models.IntegerField(
validators=[MinValueValidator(0), MaxValueValidator(18)],
verbose_name=ugettext_lazy('Points'))
date = models.DateField(
verbose_name=ugettext_lazy('Date'))
class Meta:
ordering = ('branch', 'date')
def __str__(self):
return '%s – %s' % (self.user, self.branch)
|
{
"content_hash": "607ea948b6bc23d7308d05587552ff2a",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 110,
"avg_line_length": 30.424242424242426,
"alnum_prop": 0.6277390438247012,
"repo_name": "LearningProgress/LearningProgress",
"id": "f9370f9b76a143cf16d7cb24c4d57fad5b066d59",
"size": "4020",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "learningprogress/progress/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "92326"
},
{
"name": "HTML",
"bytes": "232511"
},
{
"name": "JavaScript",
"bytes": "11550"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "Python",
"bytes": "74296"
}
],
"symlink_target": ""
}
|
# -*- coding: utf-8 -*-
import sphinx_rtd_theme
#
# RTD Spielwiese documentation build configuration file, created by
# sphinx-quickstart on Tue Oct 04 12:39:10 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.4'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.mathjax',
'sphinx.ext.todo'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Gpufit: An open-source toolkit for GPU-accelerated curve fitting'
copyright = 'All rights reserved.'
author = 'Adrian Przybylski, Björn Thiel, Jan Keller-Findeisen, Bernd Stock, and Mark Bates'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.2'
# The full version, including alpha/beta/rc tags.
release = u'1.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# read epilog.rst
with open('epilog.txt') as f:
rst_epilog = f.read()
# default highlight language is cpp
highlight_language = 'cpp'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
numfig = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
#html_theme_options = {
# 'collapse_navigation': False,
# 'display_version': False,
# 'navigation_depth': 3,
#}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'RTD Spielwiese v1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Gpufit'
# -- Options for LaTeX output ---------------------------------------------
# make code smaller in latex output
# see also: http://stackoverflow.com/questions/9899283/how-do-you-change-the-code-example-font-size-in-latex-pdf-output-with-sphinx
from sphinx.highlighting import PygmentsBridge
from pygments.formatters.latex import LatexFormatter
class CustomLatexFormatter(LatexFormatter):
def __init__(self, **options):
super(CustomLatexFormatter, self).__init__(**options)
self.verboptions = r"formatcom=\footnotesize"
PygmentsBridge.latex_formatter = CustomLatexFormatter
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
'papersize': 'a4paper,oneside',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Gpufit.tex', 'Gpufit Documentation',
'Gpufit', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
latex_show_pagerefs = True
# If true, show URL addresses after external links.
#
# latex_show_urls = 'footnote'
latex_show_urls = 'no'
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'gpufit', 'Gpufit Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Gpufit', 'Gpufit Documentation',
author, 'Gpufit', 'Levenberg Marquardt curve fitting in CUDA',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#
# epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#
# epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#
# epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#
# epub_tocdepth = 3
# Allow duplicate toc entries.
#
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#
# epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#
# epub_fix_images = False
# Scale large images.
#
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# epub_show_urls = 'inline'
# If false, no index is generated.
#
# epub_use_index = True
|
{
"content_hash": "d682826a738b9f8e2d8def08b0433f1b",
"timestamp": "",
"source": "github",
"line_count": 457,
"max_line_length": 131,
"avg_line_length": 28.282275711159738,
"alnum_prop": 0.6958607350096712,
"repo_name": "gpufit/Gpufit",
"id": "7369e043835e2be900ec0bd881e0757043153919",
"size": "12928",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9475"
},
{
"name": "C",
"bytes": "910"
},
{
"name": "C++",
"bytes": "167370"
},
{
"name": "CMake",
"bytes": "25022"
},
{
"name": "Cuda",
"bytes": "164374"
},
{
"name": "Java",
"bytes": "31651"
},
{
"name": "MATLAB",
"bytes": "77445"
},
{
"name": "Python",
"bytes": "20745"
}
],
"symlink_target": ""
}
|
__author__ = "geduldig"
__date__ = "June 7, 2013"
__license__ = "MIT"
from .BearerAuth import BearerAuth as OAuth2
from .constants import *
from datetime import datetime
from requests.exceptions import ConnectionError, ReadTimeout, SSLError
from requests.packages.urllib3.exceptions import ReadTimeoutError, ProtocolError
from requests_oauthlib import OAuth1
from .TwitterError import *
import json
import requests
import socket
import ssl
import time
class TwitterAPI(object):
"""Access REST API or Streaming API resources.
:param consumer_key: Twitter application consumer key
:param consumer_secret: Twitter application consumer secret
:param access_token_key: Twitter application access token key
:param access_token_secret: Twitter application access token secret
:param auth_type: "oAuth1" (default) or "oAuth2"
:param proxy_url: HTTPS proxy URL (ex. "https://USER:PASSWORD@SERVER:PORT")
"""
def __init__(
self,
consumer_key=None,
consumer_secret=None,
access_token_key=None,
access_token_secret=None,
auth_type='oAuth1',
proxy_url=None):
"""Initialize with your Twitter application credentials"""
self.proxies = {'https': proxy_url} if proxy_url else None
if auth_type == 'oAuth1':
if not all([consumer_key, consumer_secret, access_token_key, access_token_secret]):
raise Exception('Missing authentication parameter')
self.auth = OAuth1(
consumer_key,
consumer_secret,
access_token_key,
access_token_secret)
elif auth_type == 'oAuth2':
if not all([consumer_key, consumer_secret]):
raise Exception('Missing authentication parameter')
self.auth = OAuth2(
consumer_key,
consumer_secret,
proxies=self.proxies)
else:
raise Exception('Unknown oAuth version')
def _prepare_url(self, subdomain, path):
if subdomain == 'curator':
return '%s://%s.%s/%s/%s.json' % (PROTOCOL,
subdomain,
DOMAIN,
CURATOR_VERSION,
path)
else:
return '%s://%s.%s/%s/%s.json' % (PROTOCOL,
subdomain,
DOMAIN,
VERSION,
path)
def _get_endpoint(self, resource):
"""Substitute any parameters in the resource path with :PARAM."""
if ':' in resource:
parts = resource.split('/')
# embedded parameters start with ':'
parts = [k if k[0] != ':' else ':PARAM' for k in parts]
endpoint = '/'.join(parts)
resource = resource.replace(':', '')
return (resource, endpoint)
else:
return (resource, resource)
def request(self, resource, params=None, files=None, method_override=None):
"""Request a Twitter REST API or Streaming API resource.
:param resource: A valid Twitter endpoint (ex. "search/tweets")
:param params: Dictionary with endpoint parameters or None (default)
:param files: Dictionary with multipart-encoded file or None (default)
:param method_override: Request method to override or None (default)
:returns: TwitterResponse
:raises: TwitterConnectionError
"""
resource, endpoint = self._get_endpoint(resource)
if endpoint not in ENDPOINTS:
raise Exception('Endpoint "%s" unsupported' % endpoint)
with requests.Session() as session:
session.auth = self.auth
session.headers = {'User-Agent': USER_AGENT}
method, subdomain = ENDPOINTS[endpoint]
if method_override:
method = method_override
url = self._prepare_url(subdomain, resource)
if 'stream' in subdomain:
session.stream = True
timeout = STREAMING_TIMEOUT
# always use 'delimited' for efficient stream parsing
if not params:
params = {}
params['delimited'] = 'length'
params['stall_warning'] = 'true'
else:
session.stream = False
timeout = REST_TIMEOUT
if method == 'POST':
data = params
params = None
else:
data = None
try:
r = session.request(
method,
url,
data=data,
params=params,
timeout=(CONNECTION_TIMEOUT,timeout),
files=files,
proxies=self.proxies)
except (ConnectionError, ProtocolError, ReadTimeout, ReadTimeoutError,
SSLError, ssl.SSLError, socket.error) as e:
raise TwitterConnectionError(e)
return TwitterResponse(r, session.stream)
class TwitterResponse(object):
"""Response from either a REST API or Streaming API resource call.
:param response: The requests.Response object returned by the API call
:param stream: Boolean connection type (True if a streaming connection)
"""
def __init__(self, response, stream):
self.response = response
self.stream = stream
@property
def headers(self):
""":returns: Dictionary of API response header contents."""
return self.response.headers
@property
def status_code(self):
""":returns: HTTP response status code."""
return self.response.status_code
@property
def text(self):
""":returns: Raw API response text."""
return self.response.text
def json(self, **kwargs):
"""Get the response as a JSON object.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
:returns: response as JSON object.
:raises: ValueError
"""
return self.response.json(**kwargs)
def get_iterator(self):
"""Get API dependent iterator.
:returns: Iterator for tweets or other message objects in response.
:raises: TwitterConnectionError, TwitterRequestError
"""
if self.response.status_code != 200:
raise TwitterRequestError(self.response.status_code)
if self.stream:
return iter(_StreamingIterable(self.response))
else:
return iter(_RestIterable(self.response))
def __iter__(self):
"""Get API dependent iterator.
:returns: Iterator for tweets or other message objects in response.
:raises: TwitterConnectionError, TwitterRequestError
"""
return self.get_iterator()
def get_rest_quota(self):
"""Quota information in the REST-only response header.
:returns: Dictionary of 'remaining' (count), 'limit' (count), 'reset' (time)
"""
remaining, limit, reset = None, None, None
if self.response:
if 'x-rate-limit-remaining' in self.response.headers:
remaining = int(
self.response.headers['x-rate-limit-remaining'])
if remaining == 0:
limit = int(self.response.headers['x-rate-limit-limit'])
reset = int(self.response.headers['x-rate-limit-reset'])
reset = datetime.fromtimestamp(reset)
return {'remaining': remaining, 'limit': limit, 'reset': reset}
def close(self):
"""Disconnect stream (blocks with Python 3)."""
self.response.raw.close()
class _RestIterable(object):
"""Iterate statuses, errors or other iterable objects in a REST API response.
:param response: The request.Response from a Twitter REST API request
"""
def __init__(self, response):
resp = response.json()
# convert json response into something iterable
if 'errors' in resp:
self.results = resp['errors']
elif 'statuses' in resp:
self.results = resp['statuses']
elif 'users' in resp:
self.results = resp['users']
elif 'ids' in resp:
self.results = resp['ids']
elif 'data' in resp and not isinstance(resp['data'], dict):
self.results = resp['data']
elif hasattr(resp, '__iter__') and not isinstance(resp, dict):
if len(resp) > 0 and 'trends' in resp[0]:
self.results = resp[0]['trends']
else:
self.results = resp
else:
self.results = (resp,)
def __iter__(self):
"""Return a tweet status as a JSON object."""
for item in self.results:
yield item
class _StreamingIterable(object):
"""Iterate statuses or other objects in a Streaming API response.
:param response: The request.Response from a Twitter Streaming API request
"""
def __init__(self, response):
self.stream = response.raw
def _iter_stream(self):
"""Stream parser.
:returns: Next item in the stream (may or may not be 'delimited').
:raises: TwitterConnectionError, StopIteration
"""
while True:
item = None
buf = bytearray()
stall_timer = None
try:
while True:
# read bytes until item boundary reached
buf += self.stream.read(1)
if not buf:
# check for stall (i.e. no data for 90 seconds)
if not stall_timer:
stall_timer = time.time()
elif time.time() - stall_timer > STREAMING_TIMEOUT:
raise TwitterConnectionError('Twitter stream stalled')
elif stall_timer:
stall_timer = None
if buf[-2:] == b'\r\n':
item = buf[0:-2]
if item.isdigit():
# use byte size to read next item
nbytes = int(item)
item = None
item = self.stream.read(nbytes)
break
yield item
except (ConnectionError, ProtocolError, ReadTimeout, ReadTimeoutError,
SSLError, ssl.SSLError, socket.error) as e:
raise TwitterConnectionError(e)
except AttributeError:
# inform iterator to exit when client closes connection
raise StopIteration
def __iter__(self):
"""Iterator.
:returns: Tweet status as a JSON object.
:raises: TwitterConnectionError
"""
for item in self._iter_stream():
if item:
try:
yield json.loads(item.decode('utf8'))
except ValueError as e:
# invalid JSON string (possibly an unformatted error message)
raise TwitterConnectionError(e)
|
{
"content_hash": "b8293a7c60090d58da465e3e8ceb809b",
"timestamp": "",
"source": "github",
"line_count": 311,
"max_line_length": 95,
"avg_line_length": 36.80707395498392,
"alnum_prop": 0.5448589149995632,
"repo_name": "mpvoss/RickAndMortyWeatherTweets",
"id": "a238649d2001eb6429029365c69c216e6125783a",
"size": "11447",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "env/lib/python3.5/site-packages/TwitterAPI/TwitterAPI.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13428"
}
],
"symlink_target": ""
}
|
"""
permissions.py: Permisos adicionales para los Api rest y Apis vies
@author Camilo Ramírez
@contact camilolinchis@gmail.com
camilortte@hotmail.com
@camilortte on Twitter
@copyright Copyright 2014-2015, RecomendadorUD
@license GPL
@date 2014-10-10
@satus Pre-Alpha
@version= 0..215
"""
from rest_framework import permissions
class IsOwnerOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS:
return True
# Write permissions are only allowed to the owner of the snippet.
return obj.owner == request.user
|
{
"content_hash": "10aaa3bca151261647cf3631d1efbad2",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 73,
"avg_line_length": 28.424242424242426,
"alnum_prop": 0.6428571428571429,
"repo_name": "camilortte/RecomendadorUD",
"id": "983e97801869e59d76ffde2c17a10c6bfb15c635",
"size": "965",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/establishment_system/permissions.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "741"
},
{
"name": "CSS",
"bytes": "169155"
},
{
"name": "Go",
"bytes": "7075"
},
{
"name": "HTML",
"bytes": "267644"
},
{
"name": "JavaScript",
"bytes": "1055584"
},
{
"name": "PHP",
"bytes": "52919"
},
{
"name": "Python",
"bytes": "400602"
},
{
"name": "Shell",
"bytes": "924"
}
],
"symlink_target": ""
}
|
"""
App callables used to test post data
"""
def test_return_post_data(environ, start_response):
post_data = environ['wsgi.input'].read()
return_value = repr(post_data)
start_response("200 OK", [('content-length', '%s' % len(return_value))])
return [return_value]
|
{
"content_hash": "c8e707aa14d8b0f24fdf2d90edc2669d",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 76,
"avg_line_length": 31.88888888888889,
"alnum_prop": 0.6445993031358885,
"repo_name": "alvin319/CarnotKE",
"id": "01477019e8bb5c7f136882b606aeded825a6ecb8",
"size": "808",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "jyhton/tests/modjy/test_apps_dir/post_data_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1605"
},
{
"name": "Batchfile",
"bytes": "48974"
},
{
"name": "C",
"bytes": "2514"
},
{
"name": "CSS",
"bytes": "15212"
},
{
"name": "GAP",
"bytes": "129853"
},
{
"name": "Groff",
"bytes": "21"
},
{
"name": "Groovy",
"bytes": "6780"
},
{
"name": "HTML",
"bytes": "33215526"
},
{
"name": "Java",
"bytes": "14173221"
},
{
"name": "JavaScript",
"bytes": "827"
},
{
"name": "Makefile",
"bytes": "2261"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Perl",
"bytes": "19642"
},
{
"name": "Python",
"bytes": "26386087"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Shell",
"bytes": "103473"
},
{
"name": "Visual Basic",
"bytes": "481"
},
{
"name": "XSLT",
"bytes": "436870"
}
],
"symlink_target": ""
}
|
from django.db import migrations
class Migration(migrations.Migration):
"""
Remove the old ClassicalWork and DancePiece relationships to Events,
now we've copied their data to the new version with a through model.
"""
dependencies = [
("spectator_events", "0013_copy_classical_and_dance_data"),
]
operations = [
migrations.RemoveField(
model_name="event",
name="classicalworks",
),
migrations.RemoveField(
model_name="event",
name="dancepieces",
),
]
|
{
"content_hash": "5a2c0695ba92b404fdbf74139631f97d",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 72,
"avg_line_length": 25,
"alnum_prop": 0.5965217391304348,
"repo_name": "philgyford/django-spectator",
"id": "555fea5d61d82cc84b35c8268cb137d159b074e1",
"size": "622",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "spectator/events/migrations/0014_remove_old_classical_dance.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "97"
},
{
"name": "HTML",
"bytes": "57319"
},
{
"name": "JavaScript",
"bytes": "15849"
},
{
"name": "Python",
"bytes": "464674"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
import time
from flexget import plugin
from flexget.event import event
from flexget.utils.log import log_once
from flexget.utils.titles.movie import MovieParser
from flexget.utils.titles.series import SeriesParser
from .parser_common import ParseWarning
log = logging.getLogger('parser_internal')
class ParserInternal(object):
# movie_parser API
@plugin.priority(1)
def parse_movie(self, data, **kwargs):
log.debug('Parsing movie: `%s` kwargs: %s', data, kwargs)
start = time.clock()
parser = MovieParser()
try:
parser.parse(data)
except ParseWarning as pw:
log_once(pw.value, logger=log)
end = time.clock()
log.debug('Parsing result: %s (in %s ms)', parser, (end - start) * 1000)
return parser
# series_parser API
@plugin.priority(1)
def parse_series(self, data, **kwargs):
log.debug('Parsing series: `%s` kwargs: %s', data, kwargs)
start = time.clock()
parser = SeriesParser(**kwargs)
try:
parser.parse(data)
except ParseWarning as pw:
log_once(pw.value, logger=log)
end = time.clock()
log.debug('Parsing result: %s (in %s ms)', parser, (end - start) * 1000)
return parser
@event('plugin.register')
def register_plugin():
plugin.register(ParserInternal, 'parser_internal', interfaces=['movie_parser', 'series_parser'], api_ver=2)
|
{
"content_hash": "f8b5338cd900d712a6875249eff9b2fa",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 111,
"avg_line_length": 32.14,
"alnum_prop": 0.6502800248911015,
"repo_name": "OmgOhnoes/Flexget",
"id": "2695c8c91ec9ae0851fc03199aab3ca0646723a6",
"size": "1607",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "flexget/plugins/parsers/parser_internal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11875"
},
{
"name": "HTML",
"bytes": "79376"
},
{
"name": "JavaScript",
"bytes": "263723"
},
{
"name": "Python",
"bytes": "3324701"
},
{
"name": "SRecode Template",
"bytes": "3"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/armor/shared_armor_segment_bone_advanced.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "19f613eef4df7562a1648b06b304af19",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 88,
"avg_line_length": 24.384615384615383,
"alnum_prop": 0.7003154574132492,
"repo_name": "anhstudios/swganh",
"id": "2719dcb71f0a83ff605af538261d9a1f798de3ce",
"size": "462",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/draft_schematic/armor/shared_armor_segment_bone_advanced.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
"""
Created on Fri May 13 22:59:53 2016
@author: Methinee
"""
import pandas as pd
import numpy as np
import pickle
df_file = pd.read_csv('../data/df_sub_more20_merge.csv',delimiter=",", skip_blank_lines = True,
error_bad_lines=False)
headers=list(df_file.columns.values)
subjects = []
countSub = 0
#Create dictionary of list subjects
for sub in df_file['3COURSEID']:
if sub not in subjects:
subjects.append(sub)
countSub = countSub+1
# Function definition is here
def classify( X ):
with open('tree/treeCS213.pic', 'rb') as pickleFile:
clf2 = pickle.load(pickleFile)
clf2.predict(X)
Grade=['A', 'B', 'C' , 'D' , 'F' , 'W' , 'S' , 'U' ,'na']
grade_predicted = Grade[::-1][clf2.predict(X)]
print "prediction: ",grade_predicted
return
#Example1: Create lable X from Pae's Transcript.. result of CS213 should be "C"
#df_labelX = pd.read_csv('../data/test_labelX.csv',delimiter=",", skip_blank_lines = True,
# error_bad_lines=False)
#B = df_labelX.as_matrix()
#X = B[:,6:209] #get all subject without term,year,province,schGpa
#Example2: Create lable X from first record of csv only cs213.. result of CS213 should be "C"
subject = 'CS213'
print subject
df_sub = df_file[df_file['3COURSEID'] == subject]
df_sub = df_sub.iloc[np.random.permutation(len(df_sub))]
A = df_sub.as_matrix()
X = A[0,6:209]
classify( X );
|
{
"content_hash": "db8681c33bb562c056333fb8cddd7d24",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 96,
"avg_line_length": 29.020408163265305,
"alnum_prop": 0.6427566807313643,
"repo_name": "wasit7/book_pae",
"id": "dc483c94913859de783214d825f6f0d64fe11d11",
"size": "1446",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pae/forcast/src/$RP6EMGT.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "345113"
},
{
"name": "HTML",
"bytes": "217393"
},
{
"name": "JavaScript",
"bytes": "42775"
},
{
"name": "Jupyter Notebook",
"bytes": "3075174"
},
{
"name": "Python",
"bytes": "263859"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, division, absolute_import, print_function
import re
from datetime import datetime
from ._asn1 import Certificate, int_from_bytes, timezone
from ._cipher_suites import CIPHER_SUITE_MAP
from .errors import TLSVerificationError, TLSDisconnectError, TLSError
__all__ = [
'detect_client_auth_request',
'extract_chain',
'get_dh_params_length',
'parse_alert',
'parse_handshake_messages',
'parse_session_info',
'parse_tls_records',
'raise_client_auth',
'raise_dh_params',
'raise_disconnection',
'raise_expired_not_yet_valid',
'raise_handshake',
'raise_hostname',
'raise_no_issuer',
'raise_protocol_error',
'raise_revoked',
'raise_self_signed',
'raise_verification',
'raise_weak_signature',
]
def extract_chain(server_handshake_bytes):
"""
Extracts the X.509 certificates from the server handshake bytes for use
when debugging
:param server_handshake_bytes:
A byte string of the handshake data received from the server
:return:
A list of asn1crypto.x509.Certificate objects
"""
output = []
chain_bytes = None
for record_type, _, record_data in parse_tls_records(server_handshake_bytes):
if record_type != b'\x16':
continue
for message_type, message_data in parse_handshake_messages(record_data):
if message_type == b'\x0b':
chain_bytes = message_data
break
if chain_bytes:
break
if chain_bytes:
# The first 3 bytes are the cert chain length
pointer = 3
while pointer < len(chain_bytes):
cert_length = int_from_bytes(chain_bytes[pointer:pointer + 3])
cert_start = pointer + 3
cert_end = cert_start + cert_length
pointer = cert_end
cert_bytes = chain_bytes[cert_start:cert_end]
output.append(Certificate.load(cert_bytes))
return output
def detect_client_auth_request(server_handshake_bytes):
"""
Determines if a CertificateRequest message is sent from the server asking
the client for a certificate
:param server_handshake_bytes:
A byte string of the handshake data received from the server
:return:
A boolean - if a client certificate request was found
"""
for record_type, _, record_data in parse_tls_records(server_handshake_bytes):
if record_type != b'\x16':
continue
for message_type, message_data in parse_handshake_messages(record_data):
if message_type == b'\x0d':
return True
return False
def get_dh_params_length(server_handshake_bytes):
"""
Determines the length of the DH params from the ServerKeyExchange
:param server_handshake_bytes:
A byte string of the handshake data received from the server
:return:
None or an integer of the bit size of the DH parameters
"""
output = None
dh_params_bytes = None
for record_type, _, record_data in parse_tls_records(server_handshake_bytes):
if record_type != b'\x16':
continue
for message_type, message_data in parse_handshake_messages(record_data):
if message_type == b'\x0c':
dh_params_bytes = message_data
break
if dh_params_bytes:
break
if dh_params_bytes:
output = int_from_bytes(dh_params_bytes[0:2]) * 8
return output
def parse_alert(server_handshake_bytes):
"""
Parses the handshake for protocol alerts
:param server_handshake_bytes:
A byte string of the handshake data received from the server
:return:
None or an 2-element tuple of integers:
0: 1 (warning) or 2 (fatal)
1: The alert description (see https://tools.ietf.org/html/rfc5246#section-7.2)
"""
for record_type, _, record_data in parse_tls_records(server_handshake_bytes):
if record_type != b'\x15':
continue
if len(record_data) != 2:
return None
return (int_from_bytes(record_data[0:1]), int_from_bytes(record_data[1:2]))
return None
def parse_session_info(server_handshake_bytes, client_handshake_bytes):
"""
Parse the TLS handshake from the client to the server to extract information
including the cipher suite selected, if compression is enabled, the
session id and if a new or reused session ticket exists.
:param server_handshake_bytes:
A byte string of the handshake data received from the server
:param client_handshake_bytes:
A byte string of the handshake data sent to the server
:return:
A dict with the following keys:
- "protocol": unicode string
- "cipher_suite": unicode string
- "compression": boolean
- "session_id": "new", "reused" or None
- "session_ticket: "new", "reused" or None
"""
protocol = None
cipher_suite = None
compression = False
session_id = None
session_ticket = None
server_session_id = None
client_session_id = None
for record_type, _, record_data in parse_tls_records(server_handshake_bytes):
if record_type != b'\x16':
continue
for message_type, message_data in parse_handshake_messages(record_data):
# Ensure we are working with a ServerHello message
if message_type != b'\x02':
continue
protocol = {
b'\x03\x00': "SSLv3",
b'\x03\x01': "TLSv1",
b'\x03\x02': "TLSv1.1",
b'\x03\x03': "TLSv1.2",
b'\x03\x04': "TLSv1.3",
}[message_data[0:2]]
session_id_length = int_from_bytes(message_data[34:35])
if session_id_length > 0:
server_session_id = message_data[35:35 + session_id_length]
cipher_suite_start = 35 + session_id_length
cipher_suite_bytes = message_data[cipher_suite_start:cipher_suite_start + 2]
cipher_suite = CIPHER_SUITE_MAP[cipher_suite_bytes]
compression_start = cipher_suite_start + 2
compression = message_data[compression_start:compression_start + 1] != b'\x00'
extensions_length_start = compression_start + 1
extensions_data = message_data[extensions_length_start:]
for extension_type, extension_data in _parse_hello_extensions(extensions_data):
if extension_type == 35:
session_ticket = "new"
break
break
for record_type, _, record_data in parse_tls_records(client_handshake_bytes):
if record_type != b'\x16':
continue
for message_type, message_data in parse_handshake_messages(record_data):
# Ensure we are working with a ClientHello message
if message_type != b'\x01':
continue
session_id_length = int_from_bytes(message_data[34:35])
if session_id_length > 0:
client_session_id = message_data[35:35 + session_id_length]
cipher_suite_start = 35 + session_id_length
cipher_suite_length = int_from_bytes(message_data[cipher_suite_start:cipher_suite_start + 2])
compression_start = cipher_suite_start + 2 + cipher_suite_length
compression_length = int_from_bytes(message_data[compression_start:compression_start + 1])
# On subsequent requests, the session ticket will only be seen
# in the ClientHello message
if server_session_id is None and session_ticket is None:
extensions_length_start = compression_start + 1 + compression_length
extensions_data = message_data[extensions_length_start:]
for extension_type, extension_data in _parse_hello_extensions(extensions_data):
if extension_type == 35:
session_ticket = "reused"
break
break
if server_session_id is not None:
if client_session_id is None:
session_id = "new"
else:
if client_session_id != server_session_id:
session_id = "new"
else:
session_id = "reused"
return {
"protocol": protocol,
"cipher_suite": cipher_suite,
"compression": compression,
"session_id": session_id,
"session_ticket": session_ticket,
}
def parse_tls_records(data):
"""
Creates a generator returning tuples of information about each record
in a byte string of data from a TLS client or server. Stops as soon as it
find a ChangeCipherSpec message since all data from then on is encrypted.
:param data:
A byte string of TLS records
:return:
A generator that yields 3-element tuples:
[0] Byte string of record type
[1] Byte string of protocol version
[2] Byte string of record data
"""
pointer = 0
data_len = len(data)
while pointer < data_len:
# Don't try to parse any more once the ChangeCipherSpec is found
if data[pointer:pointer + 1] == b'\x14':
break
length = int_from_bytes(data[pointer + 3:pointer + 5])
yield (
data[pointer:pointer + 1],
data[pointer + 1:pointer + 3],
data[pointer + 5:pointer + 5 + length]
)
pointer += 5 + length
def parse_handshake_messages(data):
"""
Creates a generator returning tuples of information about each message in
a byte string of data from a TLS handshake record
:param data:
A byte string of a TLS handshake record data
:return:
A generator that yields 2-element tuples:
[0] Byte string of message type
[1] Byte string of message data
"""
pointer = 0
data_len = len(data)
while pointer < data_len:
length = int_from_bytes(data[pointer + 1:pointer + 4])
yield (
data[pointer:pointer + 1],
data[pointer + 4:pointer + 4 + length]
)
pointer += 4 + length
def _parse_hello_extensions(data):
"""
Creates a generator returning tuples of information about each extension
from a byte string of extension data contained in a ServerHello ores
ClientHello message
:param data:
A byte string of a extension data from a TLS ServerHello or ClientHello
message
:return:
A generator that yields 2-element tuples:
[0] Byte string of extension type
[1] Byte string of extension data
"""
if data == b'':
return
extentions_length = int_from_bytes(data[0:2])
extensions_start = 2
extensions_end = 2 + extentions_length
pointer = extensions_start
while pointer < extensions_end:
extension_type = int_from_bytes(data[pointer:pointer + 2])
extension_length = int_from_bytes(data[pointer + 2:pointer + 4])
yield (
extension_type,
data[pointer + 4:pointer + 4 + extension_length]
)
pointer += 4 + extension_length
def raise_hostname(certificate, hostname):
"""
Raises a TLSVerificationError due to a hostname mismatch
:param certificate:
An asn1crypto.x509.Certificate object
:raises:
TLSVerificationError
"""
is_ip = re.match('^\\d+\\.\\d+\\.\\d+\\.\\d+$', hostname) or hostname.find(':') != -1
if is_ip:
hostname_type = 'IP address %s' % hostname
else:
hostname_type = 'domain name %s' % hostname
message = 'Server certificate verification failed - %s does not match' % hostname_type
valid_ips = ', '.join(certificate.valid_ips)
valid_domains = ', '.join(certificate.valid_domains)
if valid_domains:
message += ' valid domains: %s' % valid_domains
if valid_domains and valid_ips:
message += ' or'
if valid_ips:
message += ' valid IP addresses: %s' % valid_ips
raise TLSVerificationError(message, certificate)
def raise_verification(certificate):
"""
Raises a generic TLSVerificationError
:param certificate:
An asn1crypto.x509.Certificate object
:raises:
TLSVerificationError
"""
message = 'Server certificate verification failed'
raise TLSVerificationError(message, certificate)
def raise_weak_signature(certificate):
"""
Raises a TLSVerificationError when a certificate uses a weak signature
algorithm
:param certificate:
An asn1crypto.x509.Certificate object
:raises:
TLSVerificationError
"""
message = 'Server certificate verification failed - weak certificate signature algorithm'
raise TLSVerificationError(message, certificate)
def raise_client_auth():
"""
Raises a TLSError indicating client authentication is required
:raises:
TLSError
"""
message = 'TLS handshake failed - client authentication required'
raise TLSError(message)
def raise_revoked(certificate):
"""
Raises a TLSVerificationError due to the certificate being revoked
:param certificate:
An asn1crypto.x509.Certificate object
:raises:
TLSVerificationError
"""
message = 'Server certificate verification failed - certificate has been revoked'
raise TLSVerificationError(message, certificate)
def raise_no_issuer(certificate):
"""
Raises a TLSVerificationError due to no issuer certificate found in trust
roots
:param certificate:
An asn1crypto.x509.Certificate object
:raises:
TLSVerificationError
"""
message = 'Server certificate verification failed - certificate issuer not found in trusted root certificate store'
raise TLSVerificationError(message, certificate)
def raise_self_signed(certificate):
"""
Raises a TLSVerificationError due to a self-signed certificate
roots
:param certificate:
An asn1crypto.x509.Certificate object
:raises:
TLSVerificationError
"""
message = 'Server certificate verification failed - certificate is self-signed'
raise TLSVerificationError(message, certificate)
def raise_lifetime_too_long(certificate):
"""
Raises a TLSVerificationError due to a certificate lifetime exceeding
the CAB forum certificate lifetime limit
:param certificate:
An asn1crypto.x509.Certificate object
:raises:
TLSVerificationError
"""
message = 'Server certificate verification failed - certificate lifetime is too long'
raise TLSVerificationError(message, certificate)
def raise_expired_not_yet_valid(certificate):
"""
Raises a TLSVerificationError due to certificate being expired, or not yet
being valid
:param certificate:
An asn1crypto.x509.Certificate object
:raises:
TLSVerificationError
"""
validity = certificate['tbs_certificate']['validity']
not_after = validity['not_after'].native
not_before = validity['not_before'].native
now = datetime.now(timezone.utc)
if not_before > now:
formatted_before = not_before.strftime('%Y-%m-%d %H:%M:%SZ')
message = 'Server certificate verification failed - certificate not valid until %s' % formatted_before
elif not_after < now:
formatted_after = not_after.strftime('%Y-%m-%d %H:%M:%SZ')
message = 'Server certificate verification failed - certificate expired %s' % formatted_after
raise TLSVerificationError(message, certificate)
def raise_disconnection():
"""
Raises a TLSDisconnectError due to a disconnection
:raises:
TLSDisconnectError
"""
raise TLSDisconnectError('The remote end closed the connection')
def raise_protocol_error(server_handshake_bytes):
"""
Raises a TLSError due to a protocol error
:param server_handshake_bytes:
A byte string of the handshake data received from the server
:raises:
TLSError
"""
other_protocol = detect_other_protocol(server_handshake_bytes)
if other_protocol:
raise TLSError('TLS protocol error - server responded using %s' % other_protocol)
raise TLSError('TLS protocol error - server responded using a different protocol')
def raise_handshake():
"""
Raises a TLSError due to a handshake error
:raises:
TLSError
"""
raise TLSError('TLS handshake failed')
def raise_protocol_version():
"""
Raises a TLSError due to a TLS version incompatibility
:raises:
TLSError
"""
raise TLSError('TLS handshake failed - protocol version error')
def raise_dh_params():
"""
Raises a TLSError due to weak DH params
:raises:
TLSError
"""
raise TLSError('TLS handshake failed - weak DH parameters')
def detect_other_protocol(server_handshake_bytes):
"""
Looks at the server handshake bytes to try and detect a different protocol
:param server_handshake_bytes:
A byte string of the handshake data received from the server
:return:
None, or a unicode string of "ftp", "http", "imap", "pop3", "smtp"
"""
if server_handshake_bytes[0:5] == b'HTTP/':
return 'HTTP'
if server_handshake_bytes[0:4] == b'220 ':
if re.match(b'^[^\r\n]*ftp', server_handshake_bytes, re.I):
return 'FTP'
else:
return 'SMTP'
if server_handshake_bytes[0:4] == b'220-':
return 'FTP'
if server_handshake_bytes[0:4] == b'+OK ':
return 'POP3'
if server_handshake_bytes[0:4] == b'* OK' or server_handshake_bytes[0:9] == b'* PREAUTH':
return 'IMAP'
return None
|
{
"content_hash": "e55578f869e404938b0abd3cb17306b9",
"timestamp": "",
"source": "github",
"line_count": 603,
"max_line_length": 119,
"avg_line_length": 29.567164179104477,
"alnum_prop": 0.629816590947333,
"repo_name": "wbond/oscrypto",
"id": "260e9cff73772187378e1b96fb7f716ddda9dc74",
"size": "17845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oscrypto/_tls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1104599"
}
],
"symlink_target": ""
}
|
from djam.riffs.models import ModelRiff
from django.template.loader import render_to_string
from stepping_out.forms import (LessonCreateForm, LessonTemplateForm,
ScheduleBaseForm)
from stepping_out.models import Lesson, LessonTemplate, Series
class LessonRiff(ModelRiff):
model = Lesson
create_kwargs = {
'form_class': LessonCreateForm,
'fieldsets': (
(None, {
'fields': ('series', 'template', 'start_day'),
}),
)
}
update_kwargs = {
'fieldsets': (
(None, {
'fields': ('name', 'description', 'series', 'sites'),
}),
('Scheduling', {
'fields': ('location', 'start', 'end', 'dance',
'teachers'),
}),
('Pricing', {
'fields': ('price', 'student_price', 'custom_price',
'dance_included')
})
)
}
list_kwargs = {
'order': ('-start',),
'filters': ('location', 'series'),
'columns': ('name', 'start', 'location', 'series'),
'paginate_by': 20,
}
class LessonTemplateRiff(ModelRiff):
model = LessonTemplate
create_kwargs = {
'form_class': LessonTemplateForm,
'fieldsets': (
(None, {
'fields': ('name', 'description', 'sites'),
}),
('Scheduling', {
'fields': ('location', 'start_time', 'end_time'),
}),
('Pricing', {
'fields': ('price', 'student_price', 'custom_price',
'dance_included')
})
)
}
update_kwargs = create_kwargs.copy()
update_kwargs['fieldsets'][1][1]['fields'] += ('dance_template',)
def _get_schedule(obj):
return render_to_string('stepping_out/scheduleddance/_schedule.html',
{'scheduled_dance': obj})
_get_schedule.short_description = 'Schedule'
class SeriesRiff(ModelRiff):
model = Series
create_kwargs = {
'form_class': ScheduleBaseForm,
'fieldsets': (
(None, {
'fields': ('name', 'banner', 'description'),
}),
('Scheduling', {
'fields': ('weekday', 'weeks', 'start_day', 'end_day',
'lesson_template'),
}),
)
}
update_kwargs = create_kwargs
list_kwargs = {
'filters': ('weekday',),
'columns': ('name', _get_schedule),
}
riffs = [LessonRiff, LessonTemplateRiff, SeriesRiff]
|
{
"content_hash": "7b9b88f7057d25bb4f2f48c14b9b3a87",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 73,
"avg_line_length": 27.46875,
"alnum_prop": 0.4774364808494501,
"repo_name": "melinath/django-stepping-out",
"id": "e6d0b24103c4c4f8a834624289c2cdc5034ea286",
"size": "2637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stepping_out/riffs.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "1323"
},
{
"name": "JavaScript",
"bytes": "151"
},
{
"name": "Python",
"bytes": "418531"
},
{
"name": "Ruby",
"bytes": "778"
}
],
"symlink_target": ""
}
|
from openstack.tests.functional import base
class TestMeter(base.BaseFunctionalTest):
def test_list(self):
names = set([o.name for o in self.conn.telemetry.meters()])
self.assertIn('storage.objects', names)
|
{
"content_hash": "ad9810d0c47c4808d35fab7a706b8d61",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 67,
"avg_line_length": 28.75,
"alnum_prop": 0.7043478260869566,
"repo_name": "sjsucohort6/openstack",
"id": "7c639d5ea3a9fae7b466b894f864ef4da9ffd537",
"size": "776",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/venv/lib/python2.7/site-packages/openstack/tests/functional/telemetry/v2/test_meter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "410"
},
{
"name": "CSS",
"bytes": "144982"
},
{
"name": "FreeMarker",
"bytes": "14104"
},
{
"name": "HTML",
"bytes": "8308"
},
{
"name": "Java",
"bytes": "243125"
},
{
"name": "JavaScript",
"bytes": "1493715"
},
{
"name": "Python",
"bytes": "16921939"
},
{
"name": "Shell",
"bytes": "13926"
}
],
"symlink_target": ""
}
|
"""
WSGI config for graph project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "app.settings")
application = get_wsgi_application()
|
{
"content_hash": "ff5f3e141038cc987ddab790d65c6834",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 24.0625,
"alnum_prop": 0.7662337662337663,
"repo_name": "sergeysynergy/graph",
"id": "5ce6ac50a710e5d123e49e54334e6945718122e0",
"size": "385",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/app/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "22894"
},
{
"name": "HTML",
"bytes": "25174"
},
{
"name": "JavaScript",
"bytes": "1798830"
},
{
"name": "Python",
"bytes": "94985"
},
{
"name": "Shell",
"bytes": "80"
}
],
"symlink_target": ""
}
|
"""
ML Estimation of Spatial Error Model
"""
__author__ = "Luc Anselin luc.anselin@asu.edu,\
Serge Rey srey@asu.edu, \
Levi Wolf levi.john.wolf@asu.edu"
import numpy as np
import numpy.linalg as la
from scipy import sparse as sp
from scipy.sparse.linalg import splu as SuperLU
import pysal as ps
from utils import RegressionPropsY, RegressionPropsVM
import diagnostics as DIAG
import user_output as USER
import summary_output as SUMMARY
import regimes as REGI
from w_utils import symmetrize
try:
from scipy.optimize import minimize_scalar
minimize_scalar_available = True
except ImportError:
minimize_scalar_available = False
__all__ = ["ML_Error"]
class BaseML_Error(RegressionPropsY, RegressionPropsVM, REGI.Regimes_Frame):
"""
ML estimation of the spatial error model (note no consistency
checks, diagnostics or constants added); Anselin (1988) [Anselin1988]_
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
w : Sparse matrix
Spatial weights sparse matrix
method : string
if 'full', brute force calculation (full matrix expressions)
if 'ord', Ord eigenvalue calculation
if 'LU', LU decomposition for sparse matrices
epsilon : float
tolerance criterion in mimimize_scalar function and inverse_product
regimes_att : dictionary
Dictionary containing elements to be used in case of a regimes model,
i.e. 'x' before regimes, 'regimes' list and 'cols2regi'
Attributes
----------
betas : array
kx1 array of estimated coefficients
lam : float
estimate of spatial autoregressive coefficient
u : array
nx1 array of residuals
e_filtered : array
spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant, excluding the rho)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
method : string
log Jacobian method
if 'full': brute force (full matrix computations)
if 'ord' : Ord eigenvalue method
epsilon : float
tolerance criterion used in minimize_scalar function and inverse_product
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (k+1 x k+1) - includes lambda
vm1 : array
2x2 array of variance covariance for lambda, sigma
sig2 : float
Sigma squared used in computations
logll : float
maximized log-likelihood (including constant terms)
Examples
--------
>>> import numpy as np
>>> import pysal as ps
>>> np.set_printoptions(suppress=True) #prevent scientific format
>>> db = ps.open(ps.examples.get_path("south.dbf"),'r')
>>> y_name = "HR90"
>>> y = np.array(db.by_col(y_name))
>>> y.shape = (len(y),1)
>>> x_names = ["RD90","PS90","UE90","DV90"]
>>> x = np.array([db.by_col(var) for var in x_names]).T
>>> x = np.hstack((np.ones((len(y),1)),x))
>>> ww = ps.open(ps.examples.get_path("south_q.gal"))
>>> w = ww.read()
>>> ww.close()
>>> w.transform = 'r'
>>> mlerr = BaseML_Error(y,x,w) #doctest: +SKIP
>>> "{0:.6f}".format(mlerr.lam) #doctest: +SKIP
'0.299078'
>>> np.around(mlerr.betas, decimals=4) #doctest: +SKIP
array([[ 6.1492],
[ 4.4024],
[ 1.7784],
[-0.3781],
[ 0.4858],
[ 0.2991]])
>>> "{0:.6f}".format(mlerr.mean_y) #doctest: +SKIP
'9.549293'
>>> "{0:.6f}".format(mlerr.std_y) #doctest: +SKIP
'7.038851'
>>> np.diag(mlerr.vm) #doctest: +SKIP
array([ 1.06476526, 0.05548248, 0.04544514, 0.00614425, 0.01481356,
0.00143001])
>>> "{0:.6f}".format(mlerr.sig2[0][0]) #doctest: +SKIP
'32.406854'
>>> "{0:.6f}".format(mlerr.logll) #doctest: +SKIP
'-4471.407067'
>>> mlerr1 = BaseML_Error(y,x,w,method='ord') #doctest: +SKIP
>>> "{0:.6f}".format(mlerr1.lam) #doctest: +SKIP
'0.299078'
>>> np.around(mlerr1.betas, decimals=4) #doctest: +SKIP
array([[ 6.1492],
[ 4.4024],
[ 1.7784],
[-0.3781],
[ 0.4858],
[ 0.2991]])
>>> "{0:.6f}".format(mlerr1.mean_y) #doctest: +SKIP
'9.549293'
>>> "{0:.6f}".format(mlerr1.std_y) #doctest: +SKIP
'7.038851'
>>> np.around(np.diag(mlerr1.vm), decimals=4) #doctest: +SKIP
array([ 1.0648, 0.0555, 0.0454, 0.0061, 0.0148, 0.0014])
>>> "{0:.4f}".format(mlerr1.sig2[0][0]) #doctest: +SKIP
'32.4069'
>>> "{0:.4f}".format(mlerr1.logll) #doctest: +SKIP
'-4471.4071'
"""
def __init__(self, y, x, w, method='full', epsilon=0.0000001, regimes_att=None):
# set up main regression variables and spatial filters
self.y = y
if regimes_att:
self.x = x.toarray()
else:
self.x = x
self.n, self.k = self.x.shape
self.method = method
self.epsilon = epsilon
#W = w.full()[0] #wait to build pending what is needed
#Wsp = w.sparse
ylag = ps.lag_spatial(w, self.y)
xlag = self.get_x_lag(w, regimes_att)
# call minimizer using concentrated log-likelihood to get lambda
methodML = method.upper()
if methodML in ['FULL', 'LU', 'ORD']:
if methodML == 'FULL':
W = w.full()[0] # need dense here
res = minimize_scalar(err_c_loglik, 0.0, bounds=(-1.0, 1.0),
args=(self.n, self.y, ylag, self.x,
xlag, W), method='bounded',
tol=epsilon)
elif methodML == 'LU':
I = sp.identity(w.n)
Wsp = w.sparse # need sparse here
res = minimize_scalar(err_c_loglik_sp, 0.0, bounds=(-1.0,1.0),
args=(self.n, self.y, ylag,
self.x, xlag, I, Wsp),
method='bounded', tol=epsilon)
elif methodML == 'ORD':
# check on symmetry structure
if w.asymmetry(intrinsic=False) == []:
ww = symmetrize(w)
WW = ww.todense()
evals = la.eigvalsh(WW)
else:
W = w.full()[0] # need dense here
evals = la.eigvals(W)
res = minimize_scalar(
err_c_loglik_ord, 0.0, bounds=(-1.0, 1.0),
args=(self.n, self.y, ylag, self.x,
xlag, evals), method='bounded',
tol=epsilon)
else:
raise Exception, "{0} is an unsupported method".format(method)
self.lam = res.x
# compute full log-likelihood, including constants
ln2pi = np.log(2.0 * np.pi)
llik = -res.fun - self.n / 2.0 * ln2pi - self.n / 2.0
self.logll = llik
# b, residuals and predicted values
ys = self.y - self.lam * ylag
xs = self.x - self.lam * xlag
xsxs = np.dot(xs.T, xs)
xsxsi = np.linalg.inv(xsxs)
xsys = np.dot(xs.T, ys)
b = np.dot(xsxsi, xsys)
self.betas = np.vstack((b, self.lam))
self.u = y - np.dot(self.x, b)
self.predy = self.y - self.u
# residual variance
self.e_filtered = self.u - self.lam * ps.lag_spatial(w, self.u)
self.sig2 = np.dot(self.e_filtered.T, self.e_filtered) / self.n
# variance-covariance matrix betas
varb = self.sig2 * xsxsi
# variance-covariance matrix lambda, sigma
a = -self.lam * W
np.fill_diagonal(a, 1.0)
ai = la.inv(a)
wai = np.dot(W, ai)
tr1 = np.trace(wai)
wai2 = np.dot(wai, wai)
tr2 = np.trace(wai2)
waiTwai = np.dot(wai.T, wai)
tr3 = np.trace(waiTwai)
v1 = np.vstack((tr2 + tr3,
tr1 / self.sig2))
v2 = np.vstack((tr1 / self.sig2,
self.n / (2.0 * self.sig2 ** 2)))
v = np.hstack((v1, v2))
self.vm1 = np.linalg.inv(v)
# create variance matrix for beta, lambda
vv = np.hstack((varb, np.zeros((self.k, 1))))
vv1 = np.hstack(
(np.zeros((1, self.k)), self.vm1[0, 0] * np.ones((1, 1))))
self.vm = np.vstack((vv, vv1))
def get_x_lag(self, w, regimes_att):
if regimes_att:
xlag = ps.lag_spatial(w, regimes_att['x'])
xlag = REGI.Regimes_Frame.__init__(self, xlag,
regimes_att['regimes'], constant_regi=None, cols2regi=regimes_att['cols2regi'])[0]
xlag = xlag.toarray()
else:
xlag = ps.lag_spatial(w, self.x)
return xlag
class ML_Error(BaseML_Error):
"""
ML estimation of the spatial lag model with all results and diagnostics;
Anselin (1988) [Anselin1988]_
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
w : Sparse matrix
Spatial weights sparse matrix
method : string
if 'full', brute force calculation (full matrix expressions)
if 'ord', Ord eigenvalue method
if 'LU', LU sparse matrix decomposition
epsilon : float
tolerance criterion in mimimize_scalar function and inverse_product
spat_diag : boolean
if True, include spatial diagnostics
vm : boolean
if True, include variance-covariance matrix in summary
results
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
Attributes
----------
betas : array
(k+1)x1 array of estimated coefficients (rho first)
lam : float
estimate of spatial autoregressive coefficient
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant, excluding lambda)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
method : string
log Jacobian method
if 'full': brute force (full matrix computations)
epsilon : float
tolerance criterion used in minimize_scalar function and inverse_product
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
varb : array
Variance covariance matrix (k+1 x k+1) - includes var(lambda)
vm1 : array
variance covariance matrix for lambda, sigma (2 x 2)
sig2 : float
Sigma squared used in computations
logll : float
maximized log-likelihood (including constant terms)
pr2 : float
Pseudo R squared (squared correlation between y and ypred)
utu : float
Sum of squared residuals
std_err : array
1xk array of standard errors of the betas
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
title : string
Name of the regression method used
Examples
--------
>>> import numpy as np
>>> import pysal as ps
>>> np.set_printoptions(suppress=True) #prevent scientific format
>>> db = ps.open(ps.examples.get_path("south.dbf"),'r')
>>> ds_name = "south.dbf"
>>> y_name = "HR90"
>>> y = np.array(db.by_col(y_name))
>>> y.shape = (len(y),1)
>>> x_names = ["RD90","PS90","UE90","DV90"]
>>> x = np.array([db.by_col(var) for var in x_names]).T
>>> ww = ps.open(ps.examples.get_path("south_q.gal"))
>>> w = ww.read()
>>> ww.close()
>>> w_name = "south_q.gal"
>>> w.transform = 'r'
>>> mlerr = ML_Error(y,x,w,name_y=y_name,name_x=x_names,\
name_w=w_name,name_ds=ds_name) #doctest: +SKIP
>>> np.around(mlerr.betas, decimals=4) #doctest: +SKIP
array([[ 6.1492],
[ 4.4024],
[ 1.7784],
[-0.3781],
[ 0.4858],
[ 0.2991]])
>>> "{0:.4f}".format(mlerr.lam) #doctest: +SKIP
'0.2991'
>>> "{0:.4f}".format(mlerr.mean_y) #doctest: +SKIP
'9.5493'
>>> "{0:.4f}".format(mlerr.std_y) #doctest: +SKIP
'7.0389'
>>> np.around(np.diag(mlerr.vm), decimals=4) #doctest: +SKIP
array([ 1.0648, 0.0555, 0.0454, 0.0061, 0.0148, 0.0014])
>>> np.around(mlerr.sig2, decimals=4) #doctest: +SKIP
array([[ 32.4069]])
>>> "{0:.4f}".format(mlerr.logll) #doctest: +SKIP
'-4471.4071'
>>> "{0:.4f}".format(mlerr.aic) #doctest: +SKIP
'8952.8141'
>>> "{0:.4f}".format(mlerr.schwarz) #doctest: +SKIP
'8979.0779'
>>> "{0:.4f}".format(mlerr.pr2) #doctest: +SKIP
'0.3058'
>>> "{0:.4f}".format(mlerr.utu) #doctest: +SKIP
'48534.9148'
>>> np.around(mlerr.std_err, decimals=4) #doctest: +SKIP
array([ 1.0319, 0.2355, 0.2132, 0.0784, 0.1217, 0.0378])
>>> np.around(mlerr.z_stat, decimals=4) #doctest: +SKIP
array([[ 5.9593, 0. ],
[ 18.6902, 0. ],
[ 8.3422, 0. ],
[ -4.8233, 0. ],
[ 3.9913, 0.0001],
[ 7.9089, 0. ]])
>>> mlerr.name_y #doctest: +SKIP
'HR90'
>>> mlerr.name_x #doctest: +SKIP
['CONSTANT', 'RD90', 'PS90', 'UE90', 'DV90', 'lambda']
>>> mlerr.name_w #doctest: +SKIP
'south_q.gal'
>>> mlerr.name_ds #doctest: +SKIP
'south.dbf'
>>> mlerr.title #doctest: +SKIP
'MAXIMUM LIKELIHOOD SPATIAL ERROR (METHOD = FULL)'
"""
def __init__(self, y, x, w, method='full', epsilon=0.0000001,
spat_diag=False, vm=False, name_y=None, name_x=None,
name_w=None, name_ds=None):
n = USER.check_arrays(y, x)
USER.check_y(y, n)
USER.check_weights(w, y, w_required=True)
x_constant = USER.check_constant(x)
method = method.upper()
BaseML_Error.__init__(self, y=y, x=x_constant,
w=w, method=method, epsilon=epsilon)
self.title = "MAXIMUM LIKELIHOOD SPATIAL ERROR" + \
" (METHOD = " + method + ")"
self.name_ds = USER.set_name_ds(name_ds)
self.name_y = USER.set_name_y(name_y)
self.name_x = USER.set_name_x(name_x, x)
self.name_x.append('lambda')
self.name_w = USER.set_name_w(name_w, w)
self.aic = DIAG.akaike(reg=self)
self.schwarz = DIAG.schwarz(reg=self)
SUMMARY.ML_Error(reg=self, w=w, vm=vm, spat_diag=spat_diag)
def err_c_loglik(lam, n, y, ylag, x, xlag, W):
# concentrated log-lik for error model, no constants, brute force
ys = y - lam * ylag
xs = x - lam * xlag
ysys = np.dot(ys.T, ys)
xsxs = np.dot(xs.T, xs)
xsxsi = np.linalg.inv(xsxs)
xsys = np.dot(xs.T, ys)
x1 = np.dot(xsxsi, xsys)
x2 = np.dot(xsys.T, x1)
ee = ysys - x2
sig2 = ee[0][0] / n
nlsig2 = (n / 2.0) * np.log(sig2)
a = -lam * W
np.fill_diagonal(a, 1.0)
jacob = np.log(np.linalg.det(a))
# this is the negative of the concentrated log lik for minimization
clik = nlsig2 - jacob
return clik
def err_c_loglik_sp(lam, n, y, ylag, x, xlag, I, Wsp):
# concentrated log-lik for error model, no constants, LU
if isinstance(lam, np.ndarray):
if lam.shape == (1,1):
lam = lam[0][0] #why does the interior value change?
ys = y - lam * ylag
xs = x - lam * xlag
ysys = np.dot(ys.T, ys)
xsxs = np.dot(xs.T, xs)
xsxsi = np.linalg.inv(xsxs)
xsys = np.dot(xs.T, ys)
x1 = np.dot(xsxsi, xsys)
x2 = np.dot(xsys.T, x1)
ee = ysys - x2
sig2 = ee[0][0] / n
nlsig2 = (n / 2.0) * np.log(sig2)
a = I - lam * Wsp
LU = SuperLU(a.tocsc())
jacob = np.sum(np.log(np.abs(LU.U.diagonal())))
# this is the negative of the concentrated log lik for minimization
clik = nlsig2 - jacob
return clik
def err_c_loglik_ord(lam, n, y, ylag, x, xlag, evals):
# concentrated log-lik for error model, no constants, eigenvalues
ys = y - lam * ylag
xs = x - lam * xlag
ysys = np.dot(ys.T, ys)
xsxs = np.dot(xs.T, xs)
xsxsi = np.linalg.inv(xsxs)
xsys = np.dot(xs.T, ys)
x1 = np.dot(xsxsi, xsys)
x2 = np.dot(xsys.T, x1)
ee = ysys - x2
sig2 = ee[0][0] / n
nlsig2 = (n / 2.0) * np.log(sig2)
revals = lam * evals
jacob = np.log(1 - revals).sum()
if isinstance(jacob, complex):
jacob = jacob.real
# this is the negative of the concentrated log lik for minimization
clik = nlsig2 - jacob
return clik
def _test():
import doctest
start_suppress = np.get_printoptions()['suppress']
np.set_printoptions(suppress=True)
doctest.testmod()
np.set_printoptions(suppress=start_suppress)
|
{
"content_hash": "08c05465e90fdafacb23244d400cf596",
"timestamp": "",
"source": "github",
"line_count": 536,
"max_line_length": 129,
"avg_line_length": 36.44589552238806,
"alnum_prop": 0.5255694906577937,
"repo_name": "schmidtc/pysal",
"id": "ce26a973bdac89e66c3a4bd1f924cf9b3b84c377",
"size": "19535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pysal/spreg/ml_error.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "8941"
},
{
"name": "Makefile",
"bytes": "408"
},
{
"name": "Python",
"bytes": "2547465"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.