text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import os.path
import subprocess
import shutil
from gii.core import *
from tools.ml2fsm import convertGraphMLToFSM
##----------------------------------------------------------------##
def _getModulePath( path ):
import os.path
return os.path.dirname( __file__ ) + '/' + path
##----------------------------------------------------------------##
class FSMSchemeAssetManager(AssetManager):
def getName(self):
return 'asset_manager.fsm_scheme'
def acceptAssetFile( self, filePath ):
if not os.path.isfile(filePath): return False
if not filePath.endswith( '.fsm.graphml' ): return False
return True
def importAsset( self, node, reload = False ):
node.setObjectFile( 'def', node.getCacheFile( 'def' ) )
convertGraphMLToFSM(
node.getAbsFilePath(), #input file
node.getAbsObjectFile( 'def' ) #output file
)
node.assetType = 'fsm_scheme'
return True
# ##----------------------------------------------------------------##
# class FSMSchemeCreator(AssetCreator):
# def getAssetType( self ):
# return 'fsm_scheme'
# def getLabel( self ):
# return 'FSMScheme'
# def createAsset( self, name, contextNode, assetType ):
# ext = '.fsm_scheme'
# filename = name + ext
# if contextNode.isType('folder'):
# nodepath = contextNode.getChildPath( filename )
# else:
# nodepath = contextNode.getSiblingPath( filename )
# fullpath = AssetLibrary.get().getAbsPath( nodepath )
# data={
# '_assetType' : 'fsm_scheme', #checksum
# 'map' :{},
# 'entities':[]
# }
# if os.path.exists(fullpath):
# raise Exception('File already exist:%s'%fullpath)
# fp = open(fullpath,'w')
# json.dump( data, fp, sort_keys=True, indent=2 )
# fp.close()
# return nodepath
# ##----------------------------------------------------------------##
# FSMSchemeCreator().register()
FSMSchemeAssetManager().register()
AssetLibrary.get().setAssetIcon( 'fsm_scheme', 'scheme' )
|
{
"content_hash": "934c1c724de2efe74534b6b8d95ed09a",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 70,
"avg_line_length": 29.09090909090909,
"alnum_prop": 0.5822916666666667,
"repo_name": "tommo/gii",
"id": "6953f02da4900ce0335a037e5cf620ea2c095158",
"size": "1920",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/mock/asset/FSMAsset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "398"
},
{
"name": "C",
"bytes": "1118982"
},
{
"name": "C++",
"bytes": "743466"
},
{
"name": "CSS",
"bytes": "5956"
},
{
"name": "HTML",
"bytes": "126233"
},
{
"name": "JavaScript",
"bytes": "129855"
},
{
"name": "Lua",
"bytes": "1290198"
},
{
"name": "Makefile",
"bytes": "652"
},
{
"name": "Objective-C",
"bytes": "28896"
},
{
"name": "Objective-C++",
"bytes": "129214"
},
{
"name": "Python",
"bytes": "2676186"
},
{
"name": "Shell",
"bytes": "11215"
}
],
"symlink_target": ""
}
|
import random
import string
import testtools
from tempest.api.compute import base
from tempest import config
from tempest import exceptions
from tempest.test import attr
class LiveBlockMigrationV3TestJSON(base.BaseV3ComputeAdminTest):
_host_key = 'os-extended-server-attributes:host'
_interface = 'json'
CONF = config.CONF
@classmethod
def setUpClass(cls):
super(LiveBlockMigrationV3TestJSON, cls).setUpClass()
cls.admin_hosts_client = cls.hosts_admin_client
cls.admin_servers_client = cls.servers_admin_client
cls.created_server_ids = []
def _get_compute_hostnames(self):
_resp, body = self.admin_hosts_client.list_hosts()
return [
host_record['host_name']
for host_record in body
if host_record['service'] == 'compute'
]
def _get_server_details(self, server_id):
_resp, body = self.admin_servers_client.get_server(server_id)
return body
def _get_host_for_server(self, server_id):
return self._get_server_details(server_id)[self._host_key]
def _migrate_server_to(self, server_id, dest_host):
_resp, body = self.admin_servers_client.live_migrate_server(
server_id, dest_host,
self.config.compute_feature_enabled.
block_migration_for_live_migration)
return body
def _get_host_other_than(self, host):
for target_host in self._get_compute_hostnames():
if host != target_host:
return target_host
def _get_non_existing_host_name(self):
random_name = ''.join(
random.choice(string.ascii_uppercase) for x in range(20))
self.assertNotIn(random_name, self._get_compute_hostnames())
return random_name
def _get_server_status(self, server_id):
return self._get_server_details(server_id)['status']
def _get_an_active_server(self):
for server_id in self.created_server_ids:
if 'ACTIVE' == self._get_server_status(server_id):
return server_id
else:
_, server = self.create_test_server(wait_until="ACTIVE")
server_id = server['id']
self.password = server['admin_password']
self.password = 'password'
self.created_server_ids.append(server_id)
return server_id
def _volume_clean_up(self, server_id, volume_id):
resp, body = self.volumes_client.get_volume(volume_id)
if body['status'] == 'in-use':
self.servers_client.detach_volume(server_id, volume_id)
self.volumes_client.wait_for_volume_status(volume_id, 'available')
self.volumes_client.delete_volume(volume_id)
@testtools.skipIf(not CONF.compute_feature_enabled.live_migration,
'Live migration not available')
@attr(type='gate')
def test_live_block_migration(self):
# Live block migrate an instance to another host
if len(self._get_compute_hostnames()) < 2:
raise self.skipTest(
"Less than 2 compute nodes, skipping migration test.")
server_id = self._get_an_active_server()
actual_host = self._get_host_for_server(server_id)
target_host = self._get_host_other_than(actual_host)
self._migrate_server_to(server_id, target_host)
self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
self.assertEqual(target_host, self._get_host_for_server(server_id))
@testtools.skipIf(not CONF.compute_feature_enabled.live_migration,
'Live migration not available')
@attr(type='gate')
def test_invalid_host_for_migration(self):
# Migrating to an invalid host should not change the status
server_id = self._get_an_active_server()
target_host = self._get_non_existing_host_name()
self.assertRaises(exceptions.BadRequest, self._migrate_server_to,
server_id, target_host)
self.assertEqual('ACTIVE', self._get_server_status(server_id))
@testtools.skipIf(not CONF.compute_feature_enabled.live_migration or not
CONF.compute_feature_enabled.
block_migration_for_live_migration,
'Block Live migration not available')
@testtools.skipIf(not CONF.compute_feature_enabled.
block_migrate_cinder_iscsi,
'Block Live migration not configured for iSCSI')
@attr(type='gate')
def test_iscsi_volume(self):
# Live block migrate an instance to another host
if len(self._get_compute_hostnames()) < 2:
raise self.skipTest(
"Less than 2 compute nodes, skipping migration test.")
server_id = self._get_an_active_server()
actual_host = self._get_host_for_server(server_id)
target_host = self._get_host_other_than(actual_host)
resp, volume = self.volumes_client.create_volume(1,
display_name='test')
self.volumes_client.wait_for_volume_status(volume['id'],
'available')
self.addCleanup(self._volume_clean_up, server_id, volume['id'])
# Attach the volume to the server
self.servers_client.attach_volume(server_id, volume['id'],
device='/dev/xvdb')
self.volumes_client.wait_for_volume_status(volume['id'], 'in-use')
self._migrate_server_to(server_id, target_host)
self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
self.assertEqual(target_host, self._get_host_for_server(server_id))
@classmethod
def tearDownClass(cls):
for server_id in cls.created_server_ids:
cls.servers_client.delete_server(server_id)
super(LiveBlockMigrationV3TestJSON, cls).tearDownClass()
class LiveBlockMigrationV3TestXML(LiveBlockMigrationV3TestJSON):
_host_key = (
'{http://docs.openstack.org/compute/ext/'
'extended_server_attributes/api/v3}host')
_interface = 'xml'
|
{
"content_hash": "510734cd378b6c3ecaaa2a5ee43f7d71",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 78,
"avg_line_length": 39.46153846153846,
"alnum_prop": 0.6198830409356725,
"repo_name": "BeenzSyed/tempest",
"id": "50a5f3c607f972420f1eaff83ad1d39652398850",
"size": "6792",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/api/compute/v3/test_live_block_migration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2613370"
},
{
"name": "Shell",
"bytes": "8687"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
packages = find_packages()
namespace = packages[0]
setup(
name = namespace,
version = __import__(namespace).get_version(),
url = 'https://github.com/jimzhan/rex',
author = 'Jim Zhan',
author_email = 'jim.zhan@me.com',
packages = find_packages(),
description = 'A high-level Python Tools Set.',
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
entry_points = {
'console_scripts': [
]
},
install_requires=[
'clint >= 0.3.1',
'Unidecode >= 0.04.9',
],
)
|
{
"content_hash": "8afe761aad86e2e656c343e1849571ac",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 79,
"avg_line_length": 29.7,
"alnum_prop": 0.57996632996633,
"repo_name": "jimzhan/pyx",
"id": "4aa32f4cd97224545b85078361403d3f09150b78",
"size": "1212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "22481"
}
],
"symlink_target": ""
}
|
"""This module contains a collection of integration test utilities
that are intended to make writing integration tests very, very easy.
"""
from ConfigParser import ConfigParser
import httplib
import os
import re
import shutil
import signal
import subprocess
import tempfile
import time
import unittest
import requests
import tor_async_util
import tor_async_util.nose_plugins
def _delete_file(filename):
if filename:
if os.path.exists(filename):
os.unlink(filename)
return None
def _delete_directory(dir_name):
if dir_name:
shutil.rmtree(dir_name, ignore_errors=True)
return None
class ServiceConfig(object):
def __init__(self, section, ip, port):
object.__init__(self)
self.section = section
self._ip = ip
self._port = port
self.endpoint = 'http://%s:%d' % (
self._ip,
self._port,
)
cp = ConfigParser()
cp.add_section(self.section)
self.add_config_options(cp)
self.filename = tempfile.mktemp()
with open(self.filename, 'w+') as fp:
cp.write(fp)
tor_async_util.nose_plugins.FileCapture.watch(
self.filename,
type(self).__name__)
return self
def add_config_options(self, cp):
cp.set(self.section, 'ip', self._ip)
cp.set(self.section, 'port', self._port)
cp.set(self.section, 'log_level', 'info')
cp.set(self.section, 'max_concurrent_executing_http_requests', 250)
def destroy(self):
self.filename = _delete_file(self.filename)
class Service(object):
def __init__(self, config, cmd, path):
object.__init__(self)
self.config = config
self.cmd = cmd
self.path = path
self.stdout_file = tempfile.mktemp()
tor_async_util.nose_plugins.FileCapture.watch(
self.stdout_file,
type(self).__name__)
self.process = subprocess.Popen(
self.cmd,
stdout=open(self.stdout_file, 'w+'),
stderr=subprocess.STDOUT,
preexec_fn=os.setsid,
)
url = '%s%s' % (self.config.endpoint, self.path)
for i in range(0, 10):
try:
response = requests.get(url)
if response.status_code == httplib.OK:
return
except Exception:
pass
time.sleep(0.5)
ex_msg = 'Could not confirm service started @ %s' % url
ex = Exception(ex_msg)
raise ex
def destroy(self):
if self.process:
os.killpg(self.process.pid, signal.SIGKILL)
self.process = None
self.stdout_file = _delete_file(self.stdout_file)
class IntegrationTestEnv(object):
def __init__(self, service_config_class, service_class):
object.__init__(self)
self.endpoint = os.environ.get('CLF_ENDPOINT', None)
key = os.environ.get('CLF_KEY', None)
secret = os.environ.get('CLF_SECRET', None)
self.auth = requests.auth.HTTPBasicAuth(key, secret) if key and secret else None
if not self.endpoint:
self.service_config = service_config_class()
self.service = service_class(self.service_config)
self.endpoint = self.service_config.endpoint
else:
self.service_config = None
self.service = None
return self
def destroy(self):
if self.service:
self.service = self.service.destroy()
self.service = None
if self.service_config:
self.service_config = self.service_config.destroy()
self.service_config = None
self.auth = None
self.secret = None
self.key = None
self.endpoint = None
class IntegrationTestCase(unittest.TestCase):
def setUp(self):
# sleep one second to avoid rate limiting
one_second = 1.0
time.sleep(one_second)
self.clf_test_env = self.clf_test_env_class()
def tearDown(self):
self.clf_test_env.destroy()
self.clf_test_env = None
def assertDebugDetail(self, response, expected_value):
"""Assert a debug failure detail HTTP header appears in
```response``` with a value equal to ```expected_value```.
"""
value = response.headers.get(
tor_async_util.debug_details_header_name,
None)
self.assertIsNotNone(value)
self.assertTrue(value.startswith("0x"))
self.assertEqual(int(value, 16), expected_value)
def assertNoDebugDetail(self, response):
"""Assert *no* debug failure detail HTTP header appears
in ```response```.
"""
value = response.headers.get(
tor_async_util.debug_details_header_name,
None)
self.assertIsNone(value)
def assertEmptyJsonResponseBody(self, response):
self.assertTrue('Content-Type' in response.headers)
json_utf8_content_type_reg_ex = re.compile(
r'^\s*application/json(;\s+charset\=utf-{0,1}8){0,1}\s*$',
re.IGNORECASE)
self.assertIsNotNone(json_utf8_content_type_reg_ex.match(response.headers['Content-Type']))
self.assertEqual(response.json(), {})
|
{
"content_hash": "5df16ed66ee2e10f2907275e52ac56dd",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 99,
"avg_line_length": 27.502590673575128,
"alnum_prop": 0.591183119819141,
"repo_name": "simonsdave/cloudfeaster-services",
"id": "6becb2324607145a1bfa0d4f89672842dccde52d",
"size": "5308",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/integration/integration_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1995"
},
{
"name": "JavaScript",
"bytes": "1177"
},
{
"name": "Python",
"bytes": "602059"
},
{
"name": "RAML",
"bytes": "33777"
},
{
"name": "Shell",
"bytes": "17929"
}
],
"symlink_target": ""
}
|
params = {
# Whether to save the results in results.txt.
'save results': True,
# Whether to plot the results in an interactive window.
'show results': True,
# Whether to plot the decay rates as a function of the max angular mode order.
# The plot is made for the smallest given distance and emission parameter.
'show convergence': True,
# Maximum angular mode order used in the computations.
'n_max': 111,
}
# MATERIALS PARAMETERS
materials = {
# Permittivity of the embedding medium.
'eps_medium': 1.0,
# Material of the metal sphere.
# Data files for Olmon and Yang should be put in the Metals directory.
# Only one of the options below should be uncommented:
'metal': 'Drude',
# 'metal': 'Olmon evaporated gold',
# 'metal': 'Olmon template-stripped gold',
# 'metal': 'Olmon single-crystal gold',
# 'metal': 'Yang silver',
# Whether to enable nonlocality.
# Only used if nonlocality is enabled or if the metal is 'Drude'.
'nonlocal': True,
# Permittivity contribution due to the bound response.
# Only used if the metal is 'Drude'.
'eps_inf': 1.0,
# Plasma frequency in eV.
# Only used if nonlocality is enabled or if the metal is 'Drude'.
'hbar omega_p': 8.1,
# Damping rate in eV.
'hbar gamma': 0.047,
# Fermi velocity in metres per second.
# Only used if nonlocality is enabled.
'v_F': 1.40e6,
# Diffusion constant in metres squared per second.
# Only used if nonlocality is enabled.
# Setting D to zero uses the Hydrodynamic Theory.
# Setting D to a higher value uses the Generalized Nonlocal Optical Response.
'D': 8.62e-4,
}
# GEOMETRY PARAMETERS
geometry = {
# Radius of the metal sphere.
'radius': 30,
# Unit in which the radius is given.
# Valid values: 'm', 'cm', 'mm', 'um', 'nm', 'A'.
'unit': 'nm',
}
# DIPOLE PARAMETERS
dipole = {
# Orientation of the dipole with respect to the sphere.
# Valid values: 'radial', 'tangential', 'averaged'.
'orientation': 'averaged',
# Quantum efficiency of the dipole in vacuum.
'q_0': 1.0,
}
# DIPOLE-SPHERE DISTANCE PARAMETERS
distance = {
# Smallest distance between the dipole and the surface of the sphere.
'min': 1.0,
# Largest distance between the dipole and the surface of the sphere.
'max': 10.0,
# Number of points between the smallest and largest distances.
# If it is 1, only the smallest distance is considered.
'n': 20,
# Unit in which the distances are given.
# Valid values: 'm', 'cm', 'mm', 'um', 'nm', 'A'.
'unit': 'nm',
}
# EMISSION PARAMETERS
emission = {
# Smallest value of the emission parameter of the dipole.
'min': 1.0,
# Largest value of the emission parameter of the dipole.
'max': 4.0,
# Number of points between the smallest and largest emission parameter values.
# If it is 1, only the smallest emission parameter value is considered.
'n': 20,
# Type of the given emission parameter.
# Valid values: 'omega', 'hbar omega (J)', 'hbar omega (eV)', 'frequency (Hz)',
# 'wavelength (m)', 'wavelength (nm)'.
'label': 'hbar omega (eV)',
}
|
{
"content_hash": "a13ea6bc8230d3cea0b58a951637d830",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 83,
"avg_line_length": 27.18487394957983,
"alnum_prop": 0.6420401854714065,
"repo_name": "rjurga/plasmon-fluorescence",
"id": "5708f1b3fdb65d75834baa4d319af9d28337551a",
"size": "3257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parameters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35716"
}
],
"symlink_target": ""
}
|
from django import forms
from crawler import models
class AppForm(forms.ModelForm):
class Meta:
model = models.App
fields = '__all__'
class AppDescriptionForm(forms.ModelForm):
class Meta:
model = models.AppDescription
fields = '__all__'
|
{
"content_hash": "17b8f416b9ace68bee97e940fbaed962",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 42,
"avg_line_length": 18.866666666666667,
"alnum_prop": 0.6431095406360424,
"repo_name": "bkosawa/admin-recommendation",
"id": "4e2b473ae4106dced6cd574518ca13078eeaf9d1",
"size": "283",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crawler/forms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "75800"
}
],
"symlink_target": ""
}
|
__author__ = 'Sam Brooke'
__date__ = 'September 2017'
__copyright__ = '(C) 2017, Sam Brooke'
__email__ = "sbrooke@tuta.io"
import os
import fnmatch
import csv
import gdal
import georasters as gr
import re
from optparse import OptionParser
from dateutil.parser import parse
parser = OptionParser()
parser.add_option("-c", "--coords", dest="coords", help="Location of coordinates csv file", metavar="COORDS")
parser.add_option("-d", "--dir", dest="dir", help="Search directory", metavar="DIR")
parser.add_option("-o", "--output", dest="output", help="Output directory", metavar="OUT")
parser.add_option("-n", "--csv", dest="name", help="Output csv name", metavar="CSV")
(options, args) = parser.parse_args()
coord_file = False
raster_dir = False
output_dir = './'
csv_name = 'untitled'
if options.coords:
if os.path.isfile(options.coords):
coord_file = options.coords
if options.dir:
if os.path.isdir(options.dir):
raster_dir = options.dir
if options.output:
if os.path.isdir(options.output):
output_dir = options.output
if options.name:
csv_name = options.name
print('Coord file: '+coord_file)
print('Raster directory: '+raster_dir)
print('Output directory: '+output_dir)
print('CSV Name: '+csv_name)
rasters = []
coordinates = []
with open(coord_file, 'rb') as csvfile:
firstline = True
csvr = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in csvr:
if firstline: #skip first line
firstline = False
continue
coordinates.append(row)
matches = []
for root, dirnames, filenames in os.walk(raster_dir):
for filename in fnmatch.filter(filenames, '*.tif'):
rasters.append(os.path.join(root, filename))
spectral_data = {}
# Original Landsat Meta
pattern = '^(LC[0-9]+)_(L[0-9a-zA-Z]+)_([0-9]+)_(?P<datestamp>[0-9]+)_([0-9]+)_([0-9]+)_([A-Za-z0-9]+)_(?P<band>B[0-9]+).(tif|TIF)$'
ids = 0
landsat_bands = {
'B1': [0.435, 0.451],
'B2': [0.452, 0.512],
'B3': [0.533, 0.590],
'B4': [0.636, 0.673],
'B5': [0.851, 0.879],
'B6': [1.566, 1.651],
'B7': [2.107, 2.294]
}
if len(rasters) > 0 and len(coordinates) > 0:
csv_name = os.path.join(output_dir,csv_name+'.csv')
with open(csv_name,'wb') as f:
csvw = csv.writer(f, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
csvw.writerow(['id', 'Label', 'Easting', 'Northing', 'Reflectance', 'Band', 'Wavelength', 'Date', 'Projection'])
for r in rasters:
raster_obj = gr.from_file(r)
print(r)
ds = gdal.Open(r, gdal.GA_Update)
original_landsat = ds.GetMetadataItem('ORIGINAL_LANDSAT')
print(original_landsat)
m = re.match(pattern,original_landsat)
d = parse(m.group('datestamp'))
datestamp = d.strftime('%Y-%m-%d')
band = m.group('band')
ndv, xsize, ysize, geot, projection, datatype = gr.get_geo_info(r) # Raster information
band_spectral_data = []
wavelengths = landsat_bands[band]
w = (wavelengths[0]+wavelengths[1])/2 # Get average
for c in coordinates:
rstar = raster_obj.map_pixel(float(c[0]),float(c[1]))
csvw.writerow([ids,c[2], float(c[0]), float(c[1]), rstar, band, w, datestamp, '"'+projection.ExportToProj4()+'"'])
ids = ids+1
|
{
"content_hash": "b80619e630bc60c5b376d268ec61aeea",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 132,
"avg_line_length": 27.05982905982906,
"alnum_prop": 0.6411876184459886,
"repo_name": "hijinks/python-bcet",
"id": "8a152612b84c40a439e1bba560c7a227ba28a8bc",
"size": "3240",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spectral-sig.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43899"
},
{
"name": "R",
"bytes": "6689"
},
{
"name": "Shell",
"bytes": "6923"
}
],
"symlink_target": ""
}
|
import logging
import re
from tempest_lib import exceptions
import testtools
from tempest import cli
from tempest import clients
from tempest import config
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class SimpleReadOnlyCinderClientTest(cli.ClientTestBase):
"""Basic, read-only tests for Cinder CLI client.
Checks return values and output of read-only commands.
These tests do not presume any content, nor do they create
their own. They only verify the structure of output if present.
"""
@classmethod
def resource_setup(cls):
if not CONF.service_available.cinder:
msg = ("%s skipped as Cinder is not available" % cls.__name__)
raise cls.skipException(msg)
super(SimpleReadOnlyCinderClientTest, cls).resource_setup()
id_cl = clients.AdminManager().identity_client
tenant = id_cl.get_tenant_by_name(CONF.identity.admin_tenant_name)
cls.admin_tenant_id = tenant['id']
def cinder(self, *args, **kwargs):
return self.clients.cinder(*args,
endpoint_type=CONF.volume.endpoint_type,
**kwargs)
@test.idempotent_id('229bc6dc-d804-4668-b753-b590caf63061')
def test_cinder_fake_action(self):
self.assertRaises(exceptions.CommandFailed,
self.cinder,
'this-does-not-exist')
@test.idempotent_id('77140216-14db-4fc5-a246-e2a587e9e99b')
def test_cinder_absolute_limit_list(self):
roles = self.parser.listing(self.cinder('absolute-limits'))
self.assertTableStruct(roles, ['Name', 'Value'])
@test.idempotent_id('2206b9ce-1a36-4a0a-a129-e5afc7cee1dd')
def test_cinder_backup_list(self):
backup_list = self.parser.listing(self.cinder('backup-list'))
self.assertTableStruct(backup_list, ['ID', 'Volume ID', 'Status',
'Name', 'Size', 'Object Count',
'Container'])
@test.idempotent_id('c7f50346-cd99-4e0b-953f-796ff5f47295')
def test_cinder_extra_specs_list(self):
extra_specs_list = self.parser.listing(self.cinder('extra-specs-list'))
self.assertTableStruct(extra_specs_list, ['ID', 'Name', 'extra_specs'])
@test.idempotent_id('9de694cb-b40b-442c-a30c-5f9873e144f7')
def test_cinder_volumes_list(self):
list = self.parser.listing(self.cinder('list'))
self.assertTableStruct(list, ['ID', 'Status', 'Name', 'Size',
'Volume Type', 'Bootable',
'Attached to'])
self.cinder('list', params='--all-tenants 1')
self.cinder('list', params='--all-tenants 0')
self.assertRaises(exceptions.CommandFailed,
self.cinder,
'list',
params='--all-tenants bad')
@test.idempotent_id('56f7c15c-ee82-4f23-bbe8-ce99b66da493')
def test_cinder_quota_class_show(self):
"""This CLI can accept and string as param."""
roles = self.parser.listing(self.cinder('quota-class-show',
params='abc'))
self.assertTableStruct(roles, ['Property', 'Value'])
@test.idempotent_id('a919a811-b7f0-47a7-b4e5-f3eb674dd200')
def test_cinder_quota_defaults(self):
"""This CLI can accept and string as param."""
roles = self.parser.listing(self.cinder('quota-defaults',
params=self.admin_tenant_id))
self.assertTableStruct(roles, ['Property', 'Value'])
@test.idempotent_id('18166673-ffa8-4df3-b60c-6375532288bc')
def test_cinder_quota_show(self):
"""This CLI can accept and string as param."""
roles = self.parser.listing(self.cinder('quota-show',
params=self.admin_tenant_id))
self.assertTableStruct(roles, ['Property', 'Value'])
@test.idempotent_id('b2c66ed9-ca96-4dc4-94cc-8083e664e516')
def test_cinder_rate_limits(self):
rate_limits = self.parser.listing(self.cinder('rate-limits'))
self.assertTableStruct(rate_limits, ['Verb', 'URI', 'Value', 'Remain',
'Unit', 'Next_Available'])
@test.idempotent_id('7a19955b-807c-481a-a2ee-9d76733eac28')
@testtools.skipUnless(CONF.volume_feature_enabled.snapshot,
'Volume snapshot not available.')
def test_cinder_snapshot_list(self):
snapshot_list = self.parser.listing(self.cinder('snapshot-list'))
self.assertTableStruct(snapshot_list, ['ID', 'Volume ID', 'Status',
'Name', 'Size'])
@test.idempotent_id('6e54ecd9-7ba9-490d-8e3b-294b67139e73')
def test_cinder_type_list(self):
type_list = self.parser.listing(self.cinder('type-list'))
self.assertTableStruct(type_list, ['ID', 'Name'])
@test.idempotent_id('2c363583-24a0-4980-b9cb-b50c0d241e82')
def test_cinder_list_extensions(self):
roles = self.parser.listing(self.cinder('list-extensions'))
self.assertTableStruct(roles, ['Name', 'Summary', 'Alias', 'Updated'])
@test.idempotent_id('691bd6df-30ad-4be7-927b-a02d62aaa38a')
def test_cinder_credentials(self):
credentials = self.parser.listing(self.cinder('credentials'))
self.assertTableStruct(credentials, ['User Credentials', 'Value'])
@test.idempotent_id('5c6d71a3-4904-4a3a-aec9-7fd4aa830e95')
def test_cinder_availability_zone_list(self):
zone_list = self.parser.listing(self.cinder('availability-zone-list'))
self.assertTableStruct(zone_list, ['Name', 'Status'])
@test.idempotent_id('9b0fd5a6-f955-42b9-a42f-6f542a80b9a3')
def test_cinder_endpoints(self):
out = self.cinder('endpoints')
tables = self.parser.tables(out)
for table in tables:
headers = table['headers']
self.assertTrue(2 >= len(headers))
self.assertEqual('Value', headers[1])
@test.idempotent_id('301b5ae1-9591-4e9f-999c-d525a9bdf822')
def test_cinder_service_list(self):
service_list = self.parser.listing(self.cinder('service-list'))
self.assertTableStruct(service_list, ['Binary', 'Host', 'Zone',
'Status', 'State', 'Updated_at'])
@test.idempotent_id('7260ae52-b462-461e-9048-36d0bccf92c6')
def test_cinder_transfer_list(self):
transfer_list = self.parser.listing(self.cinder('transfer-list'))
self.assertTableStruct(transfer_list, ['ID', 'Volume ID', 'Name'])
@test.idempotent_id('0976dea8-14f3-45a9-8495-3617fc4fbb13')
def test_cinder_bash_completion(self):
self.cinder('bash-completion')
@test.idempotent_id('b7c00361-be80-4512-8735-5f98fc54f2a9')
def test_cinder_qos_list(self):
qos_list = self.parser.listing(self.cinder('qos-list'))
self.assertTableStruct(qos_list, ['ID', 'Name', 'Consumer', 'specs'])
@test.idempotent_id('2e92dc6e-22b5-4d94-abfc-b543b0c50a89')
def test_cinder_encryption_type_list(self):
encrypt_list = self.parser.listing(self.cinder('encryption-type-list'))
self.assertTableStruct(encrypt_list, ['Volume Type ID', 'Provider',
'Cipher', 'Key Size',
'Control Location'])
@test.idempotent_id('0ee6cb4c-8de6-4811-a7be-7f4bb75b80cc')
def test_admin_help(self):
help_text = self.cinder('help')
lines = help_text.split('\n')
self.assertFirstLineStartsWith(lines, 'usage: cinder')
commands = []
cmds_start = lines.index('Positional arguments:')
cmds_end = lines.index('Optional arguments:')
command_pattern = re.compile('^ {4}([a-z0-9\-\_]+)')
for line in lines[cmds_start:cmds_end]:
match = command_pattern.match(line)
if match:
commands.append(match.group(1))
commands = set(commands)
wanted_commands = set(('absolute-limits', 'list', 'help',
'quota-show', 'type-list', 'snapshot-list'))
self.assertFalse(wanted_commands - commands)
# Optional arguments:
@test.idempotent_id('2fd6f530-183c-4bda-8918-1e59e36c26b9')
def test_cinder_version(self):
self.cinder('', flags='--version')
@test.idempotent_id('306bac51-c443-4426-a6cf-583a953fcd68')
def test_cinder_debug_list(self):
self.cinder('list', flags='--debug')
@test.idempotent_id('6d97fcd2-5dd1-429d-af70-030c949d86cd')
def test_cinder_retries_list(self):
self.cinder('list', flags='--retries 3')
@test.idempotent_id('95a2850c-35b4-4159-bb93-51647a5ad232')
def test_cinder_region_list(self):
region = CONF.volume.region
if not region:
region = CONF.identity.region
self.cinder('list', flags='--os-region-name ' + region)
|
{
"content_hash": "bc51a797567d45cac2286f2162c30e20",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 79,
"avg_line_length": 44.40975609756097,
"alnum_prop": 0.6124780316344464,
"repo_name": "hpcloud-mon/tempest",
"id": "cb29cc89d3d4546d21a09e6e5682f4605f0c922a",
"size": "9740",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tempest/cli/simple_read_only/volume/test_cinder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2804899"
},
{
"name": "Shell",
"bytes": "8560"
}
],
"symlink_target": ""
}
|
import os, sys
import glob
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--root', dest='root', help='root of your dataset')
parser.add_argument('-y', '--year', dest='year', help='the year for your dataset')
args = parser.parse_args()
if not os.path.exists(args.root):
print("The path doesn't exist\n")
sys.exit(1)
dest_dir = os.path.join(args.root, 'VOCdevKit')
train_data_f = 'train_data_'+args.year+'.txt'
train_anno_f = 'train_anno_'+args.year+'.txt'
val_data_f = 'val_data_'+args.year+'.txt'
val_anno_f = 'val_anno_'+args.year+'.txt'
imlst = glob.glob(os.path.join(args.root, 'VOCdevkit', 'VOC'+args.year, 'ImageSets', '*.jpg'))
lablst = glob.glob(os.path.join(args.root, 'VOCdevkit', 'VOC'+args.year, 'labels', '*.txt'))
with open(train_data_f, 'w') as df:
for im in sorted(imlst):
f.write("%s\n", im)
|
{
"content_hash": "6cafdfc71008b938bab3bba15f33b333",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 95,
"avg_line_length": 27.64516129032258,
"alnum_prop": 0.6604434072345391,
"repo_name": "CUFCTL/DLBD",
"id": "a9d2f0fb69aafc6b1b52beba63c5b0fdde50fd7c",
"size": "857",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Fall2017/create_train_files.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "244643"
},
{
"name": "Python",
"bytes": "2520279"
},
{
"name": "Shell",
"bytes": "3463"
},
{
"name": "TeX",
"bytes": "6468"
}
],
"symlink_target": ""
}
|
import kindred
def test_pubannotation():
corpus = kindred.pubannotation.load('bionlp-st-gro-2013-development')
assert isinstance(corpus,kindred.Corpus)
fileCount = len(corpus.documents)
entityCount = sum([ len(d.entities) for d in corpus.documents ])
relationCount = sum([ len(d.relations) for d in corpus.documents ])
assert fileCount > 0
assert relationCount > 0
assert entityCount > 0
if __name__ == '__main__':
test_pubannotation()
|
{
"content_hash": "2c5cbba9acc75752b54d5868e31e1aba",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 70,
"avg_line_length": 26.529411764705884,
"alnum_prop": 0.729490022172949,
"repo_name": "jakelever/kindred",
"id": "c6369a91e8bc0345a2d1dd07dc859a26bc4cb682",
"size": "452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_pubannotation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "263958"
},
{
"name": "Shell",
"bytes": "309"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from ideas.models import Category
@admin.register(Category)
class IdeaModelAdmin(admin.ModelAdmin):
list_display = admin.ModelAdmin.list_display + (
'name',
'slug',
'created_at',
'updated_at',
)
|
{
"content_hash": "c1d3f59e27ce310fba277681ff64375b",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 52,
"avg_line_length": 18.2,
"alnum_prop": 0.6410256410256411,
"repo_name": "paulsoh/moxie",
"id": "18afcf8157b5c494ad24c69b7371547f985719c3",
"size": "273",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moxie/ideas/admin/category.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12850"
},
{
"name": "HTML",
"bytes": "58007"
},
{
"name": "JavaScript",
"bytes": "23689"
},
{
"name": "Makefile",
"bytes": "91"
},
{
"name": "Python",
"bytes": "719646"
},
{
"name": "Shell",
"bytes": "67"
}
],
"symlink_target": ""
}
|
from django.forms.utils import ErrorList
from django.template import Context, Template, TemplateSyntaxError
# from django.template.loader import render_to_string
# from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
# from froala_editor.fields import FroalaField
#from pennyblack import settings
#from ..models.link import check_if_redirect_url, is_link
# from feincms.content.richtext.models import RichTextContentAdminForm, RichTextContent
from feincms.content.richtext.models import RichTextContent
# from feincms.module.medialibrary.models import MediaFile
import re
# import os
# from PIL import Image
#import exceptions
HREF_RE = re.compile(r'href\="((\{\{[^}]+\}\}|[^"><])+)"')
# class FroalaContent(models.Model):
# content = FroalaField()
#
# class Meta:
# abstract = True
# #app_label = 'wienfluss'
#
# def render(self, **kwargs):
# request = kwargs.get('request')
# return render_to_string('content/markupmirror/default.html', {
# 'content': self,
# 'request': request
# })
# class NewsletterSectionAdminForm(RichTextContentAdminForm):
# def clean(self):
# cleaned_data = super(NewsletterSectionAdminForm, self).clean()
# try:
# t = Template(cleaned_data['text'])
# except TemplateSyntaxError as e:
# self._errors["text"] = ErrorList([e])
# except KeyError:
# pass
# return cleaned_data
# class Meta:
# exclude = ('image_thumb', 'image_width', 'image_height', 'image_url_replaced')
# def __init__(self, *args, **kwargs):
# super(NewsletterSectionAdminForm, self).__init__(*args, **kwargs)
# self.fields.insert(0, 'title', self.fields.pop('title'))
class TextOnlyDripContent(RichTextContent):
#form = NewsletterSectionAdminForm
#feincms_item_editor_form = NewsletterSectionAdminForm
# feincms_item_editor_includes = {
# 'head': [ settings.TINYMCE_CONFIG_URL ],
# }
# baselayout = "content/text_only/section.html"
class Meta:
abstract = True
app_label = 'driprichtext'
verbose_name = _('text only content')
verbose_name_plural = _('text only contents')
|
{
"content_hash": "fe2d88ef618a7606f76c3741c99690cc",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 88,
"avg_line_length": 32.58571428571429,
"alnum_prop": 0.655414291977203,
"repo_name": "rorito/django-squeezemail",
"id": "7521642d52ce0315ac538e369c2db8a31fd8cac6",
"size": "2391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "squeezemail/content/richtext.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "301"
},
{
"name": "HTML",
"bytes": "3246"
},
{
"name": "JavaScript",
"bytes": "1320"
},
{
"name": "Python",
"bytes": "119084"
}
],
"symlink_target": ""
}
|
"""Univariate features selection."""
# Authors: V. Michel, B. Thirion, G. Varoquaux, A. Gramfort, E. Duchesnay.
# L. Buitinck, A. Joly
# License: BSD 3 clause
import numpy as np
import warnings
from scipy import special, stats
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..preprocessing import LabelBinarizer
from ..utils import (as_float_array, check_array, check_X_y, safe_sqr,
safe_mask)
from ..utils.extmath import safe_sparse_dot, row_norms
from ..utils.validation import check_is_fitted
from .base import SelectorMixin
def _clean_nans(scores):
"""
Fixes Issue #1240: NaNs can't be properly compared, so change them to the
smallest value of scores's dtype. -inf seems to be unreliable.
"""
# XXX where should this function be called? fit? scoring functions
# themselves?
scores = as_float_array(scores, copy=True)
scores[np.isnan(scores)] = np.finfo(scores.dtype).min
return scores
######################################################################
# Scoring functions
# The following function is a rewriting of scipy.stats.f_oneway
# Contrary to the scipy.stats.f_oneway implementation it does not
# copy the data while keeping the inputs unchanged.
def f_oneway(*args):
"""Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that 2 or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
sample1, sample2, ... : array_like, sparse matrices
The sample measurements should be given as arguments.
Returns
-------
F-value : float
The computed F-value of the test.
p-value : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent
2. Each sample is from a normally distributed population
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
See ``scipy.stats.f_oneway`` that should give the same results while
being less efficient.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
"""
n_classes = len(args)
args = [as_float_array(a) for a in args]
n_samples_per_class = np.array([a.shape[0] for a in args])
n_samples = np.sum(n_samples_per_class)
ss_alldata = sum(safe_sqr(a).sum(axis=0) for a in args)
sums_args = [np.asarray(a.sum(axis=0)) for a in args]
square_of_sums_alldata = sum(sums_args) ** 2
square_of_sums_args = [s ** 2 for s in sums_args]
sstot = ss_alldata - square_of_sums_alldata / float(n_samples)
ssbn = 0.
for k, _ in enumerate(args):
ssbn += square_of_sums_args[k] / n_samples_per_class[k]
ssbn -= square_of_sums_alldata / float(n_samples)
sswn = sstot - ssbn
dfbn = n_classes - 1
dfwn = n_samples - n_classes
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
constant_features_idx = np.where(msw == 0.)[0]
if (np.nonzero(msb)[0].size != msb.size and constant_features_idx.size):
warnings.warn("Features %s are constant." % constant_features_idx,
UserWarning)
f = msb / msw
# flatten matrix to vector in sparse case
f = np.asarray(f).ravel()
prob = special.fdtrc(dfbn, dfwn, f)
return f, prob
def f_classif(X, y):
"""Compute the ANOVA F-value for the provided sample.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} shape = [n_samples, n_features]
The set of regressors that will be tested sequentially.
y : array of shape(n_samples)
The data matrix.
Returns
-------
F : array, shape = [n_features,]
The set of F values.
pval : array, shape = [n_features,]
The set of p-values.
See also
--------
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'])
args = [X[safe_mask(X, y == k)] for k in np.unique(y)]
return f_oneway(*args)
def _chisquare(f_obs, f_exp):
"""Fast replacement for scipy.stats.chisquare.
Version from https://github.com/scipy/scipy/pull/2525 with additional
optimizations.
"""
f_obs = np.asarray(f_obs, dtype=np.float64)
k = len(f_obs)
# Reuse f_obs for chi-squared statistics
chisq = f_obs
chisq -= f_exp
chisq **= 2
with np.errstate(invalid="ignore"):
chisq /= f_exp
chisq = chisq.sum(axis=0)
return chisq, special.chdtrc(k - 1, chisq)
def chi2(X, y):
"""Compute chi-squared stats between each non-negative feature and class.
This score can be used to select the n_features features with the
highest values for the test chi-squared statistic from X, which must
contain only non-negative features such as booleans or frequencies
(e.g., term counts in document classification), relative to the classes.
Recall that the chi-square test measures dependence between stochastic
variables, so using this function "weeds out" the features that are the
most likely to be independent of class and therefore irrelevant for
classification.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features_in)
Sample vectors.
y : array-like, shape = (n_samples,)
Target vector (class labels).
Returns
-------
chi2 : array, shape = (n_features,)
chi2 statistics of each feature.
pval : array, shape = (n_features,)
p-values of each feature.
Notes
-----
Complexity of this algorithm is O(n_classes * n_features).
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
f_regression: F-value between label/feature for regression tasks.
"""
# XXX: we might want to do some of the following in logspace instead for
# numerical stability.
X = check_array(X, accept_sparse='csr')
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative.")
Y = LabelBinarizer().fit_transform(y)
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
observed = safe_sparse_dot(Y.T, X) # n_classes * n_features
feature_count = X.sum(axis=0).reshape(1, -1)
class_prob = Y.mean(axis=0).reshape(1, -1)
expected = np.dot(class_prob.T, feature_count)
return _chisquare(observed, expected)
def f_regression(X, y, center=True):
"""Univariate linear regression tests.
Linear model for testing the individual effect of each of many regressors.
This is a scoring function to be used in a feature selection procedure, not
a free standing feature selection procedure.
This is done in 2 steps:
1. The correlation between each regressor and the target is computed,
that is, ((X[:, i] - mean(X[:, i])) * (y - mean_y)) / (std(X[:, i]) *
std(y)).
2. It is converted to an F score then to a p-value.
For more on usage see the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} shape = (n_samples, n_features)
The set of regressors that will be tested sequentially.
y : array of shape(n_samples).
The data matrix
center : True, bool,
If true, X and y will be centered.
Returns
-------
F : array, shape=(n_features,)
F values of features.
pval : array, shape=(n_features,)
p-values of F-scores.
See also
--------
mutual_info_regression: Mutual information for a continuous target.
f_classif: ANOVA F-value between label/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
SelectPercentile: Select features based on percentile of the highest
scores.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64)
n_samples = X.shape[0]
# compute centered values
# note that E[(x - mean(x))*(y - mean(y))] = E[x*(y - mean(y))], so we
# need not center X
if center:
y = y - np.mean(y)
if issparse(X):
X_means = X.mean(axis=0).getA1()
else:
X_means = X.mean(axis=0)
# compute the scaled standard deviations via moments
X_norms = np.sqrt(row_norms(X.T, squared=True) -
n_samples * X_means ** 2)
else:
X_norms = row_norms(X.T)
# compute the correlation
corr = safe_sparse_dot(y, X)
corr /= X_norms
corr /= np.linalg.norm(y)
# convert to p-value
degrees_of_freedom = y.size - (2 if center else 1)
F = corr ** 2 / (1 - corr ** 2) * degrees_of_freedom
pv = stats.f.sf(F, 1, degrees_of_freedom)
return F, pv
######################################################################
# Base classes
class _BaseFilter(BaseEstimator, SelectorMixin):
"""Initialize the univariate feature selection.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues) or a single array with scores.
"""
def __init__(self, score_func):
self.score_func = score_func
def fit(self, X, y):
"""Run score function on (X, y) and get the appropriate features.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
Returns
-------
self : object
"""
X, y = check_X_y(X, y, ['csr', 'csc'], multi_output=True)
if not callable(self.score_func):
raise TypeError("The score function should be a callable, %s (%s) "
"was passed."
% (self.score_func, type(self.score_func)))
self._check_params(X, y)
score_func_ret = self.score_func(X, y)
if isinstance(score_func_ret, (list, tuple)):
self.scores_, self.pvalues_ = score_func_ret
self.pvalues_ = np.asarray(self.pvalues_)
else:
self.scores_ = score_func_ret
self.pvalues_ = None
self.scores_ = np.asarray(self.scores_)
return self
def _check_params(self, X, y):
pass
######################################################################
# Specific filters
######################################################################
class SelectPercentile(_BaseFilter):
"""Select features according to a percentile of the highest scores.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues) or a single array with scores.
Default is f_classif (see below "See also"). The default function only
works with classification tasks.
percentile : int, optional, default=10
Percent of features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores, None if `score_func` returned only scores.
Notes
-----
Ties between features with equal scores will be broken in an unspecified
way.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
mutual_info_classif: Mutual information for a discrete target.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
mutual_info_regression: Mutual information for a continuous target.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, percentile=10):
super(SelectPercentile, self).__init__(score_func)
self.percentile = percentile
def _check_params(self, X, y):
if not 0 <= self.percentile <= 100:
raise ValueError("percentile should be >=0, <=100; got %r"
% self.percentile)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
# Cater for NaNs
if self.percentile == 100:
return np.ones(len(self.scores_), dtype=np.bool)
elif self.percentile == 0:
return np.zeros(len(self.scores_), dtype=np.bool)
scores = _clean_nans(self.scores_)
threshold = stats.scoreatpercentile(scores,
100 - self.percentile)
mask = scores > threshold
ties = np.where(scores == threshold)[0]
if len(ties):
max_feats = int(len(scores) * self.percentile / 100)
kept_ties = ties[:max_feats - mask.sum()]
mask[kept_ties] = True
return mask
class SelectKBest(_BaseFilter):
"""Select features according to the k highest scores.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues) or a single array with scores.
Default is f_classif (see below "See also"). The default function only
works with classification tasks.
k : int or "all", optional, default=10
Number of top features to select.
The "all" option bypasses selection, for use in a parameter search.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores, None if `score_func` returned only scores.
Notes
-----
Ties between features with equal scores will be broken in an unspecified
way.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
mutual_info_classif: Mutual information for a discrete target.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
mutual_info_regression: Mutual information for a continuous target.
SelectPercentile: Select features based on percentile of the highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, k=10):
super(SelectKBest, self).__init__(score_func)
self.k = k
def _check_params(self, X, y):
if not (self.k == "all" or 0 <= self.k <= X.shape[1]):
raise ValueError("k should be >=0, <= n_features = %d; got %r. "
"Use k='all' to return all features."
% (X.shape[1], self.k))
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
if self.k == 'all':
return np.ones(self.scores_.shape, dtype=bool)
elif self.k == 0:
return np.zeros(self.scores_.shape, dtype=bool)
else:
scores = _clean_nans(self.scores_)
mask = np.zeros(scores.shape, dtype=bool)
# Request a stable sort. Mergesort takes more memory (~40MB per
# megafeature on x86-64).
mask[np.argsort(scores, kind="mergesort")[-self.k:]] = 1
return mask
class SelectFpr(_BaseFilter):
"""Filter: Select the pvalues below alpha based on a FPR test.
FPR test stands for False Positive Rate test. It controls the total
amount of false detections.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
Default is f_classif (see below "See also"). The default function only
works with classification tasks.
alpha : float, optional
The highest p-value for features to be kept.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
mutual_info_classif:
f_regression: F-value between label/feature for regression tasks.
mutual_info_regression: Mutual information between features and the target.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFpr, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
return self.pvalues_ < self.alpha
class SelectFdr(_BaseFilter):
"""Filter: Select the p-values for an estimated false discovery rate
This uses the Benjamini-Hochberg procedure. ``alpha`` is an upper bound
on the expected false discovery rate.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
Default is f_classif (see below "See also"). The default function only
works with classification tasks.
alpha : float, optional
The highest uncorrected p-value for features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
References
----------
https://en.wikipedia.org/wiki/False_discovery_rate
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
mutual_info_classif: Mutual information for a discrete target.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
mutual_info_regression: Mutual information for a contnuous target.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFdr, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
n_features = len(self.pvalues_)
sv = np.sort(self.pvalues_)
selected = sv[sv <= float(self.alpha) / n_features *
np.arange(1, n_features + 1)]
if selected.size == 0:
return np.zeros_like(self.pvalues_, dtype=bool)
return self.pvalues_ <= selected.max()
class SelectFwe(_BaseFilter):
"""Filter: Select the p-values corresponding to Family-wise error rate
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
Default is f_classif (see below "See also"). The default function only
works with classification tasks.
alpha : float, optional
The highest uncorrected p-value for features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFwe, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
return (self.pvalues_ < self.alpha / len(self.pvalues_))
######################################################################
# Generic filter
######################################################################
# TODO this class should fit on either p-values or scores,
# depending on the mode.
class GenericUnivariateSelect(_BaseFilter):
"""Univariate feature selector with configurable strategy.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues). For modes 'percentile' or 'kbest' it can return
a single array scores.
mode : {'percentile', 'k_best', 'fpr', 'fdr', 'fwe'}
Feature selection mode.
param : float or int depending on the feature selection mode
Parameter of the corresponding mode.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores, None if `score_func` returned scores only.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
mutual_info_classif: Mutual information for a discrete target.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
mutual_info_regression: Mutual information for a continuous target.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
"""
_selection_modes = {'percentile': SelectPercentile,
'k_best': SelectKBest,
'fpr': SelectFpr,
'fdr': SelectFdr,
'fwe': SelectFwe}
def __init__(self, score_func=f_classif, mode='percentile', param=1e-5):
super(GenericUnivariateSelect, self).__init__(score_func)
self.mode = mode
self.param = param
def _make_selector(self):
selector = self._selection_modes[self.mode](score_func=self.score_func)
# Now perform some acrobatics to set the right named parameter in
# the selector
possible_params = selector._get_param_names()
possible_params.remove('score_func')
selector.set_params(**{possible_params[0]: self.param})
return selector
def _check_params(self, X, y):
if self.mode not in self._selection_modes:
raise ValueError("The mode passed should be one of %s, %r,"
" (type %s) was passed."
% (self._selection_modes.keys(), self.mode,
type(self.mode)))
self._make_selector()._check_params(X, y)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
selector = self._make_selector()
selector.pvalues_ = self.pvalues_
selector.scores_ = self.scores_
return selector._get_support_mask()
|
{
"content_hash": "bbf7134f375ffd14f9e806c301cdda64",
"timestamp": "",
"source": "github",
"line_count": 753,
"max_line_length": 80,
"avg_line_length": 34.92828685258964,
"alnum_prop": 0.6300140679061632,
"repo_name": "BiaDarkia/scikit-learn",
"id": "612f61028e2a477f401485dc20e2e4d237f400f7",
"size": "26301",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "sklearn/feature_selection/univariate_selection.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "394787"
},
{
"name": "C++",
"bytes": "140225"
},
{
"name": "Makefile",
"bytes": "1588"
},
{
"name": "PowerShell",
"bytes": "17312"
},
{
"name": "Python",
"bytes": "6330849"
},
{
"name": "Shell",
"bytes": "6748"
}
],
"symlink_target": ""
}
|
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_workspaceMergerDialog(object):
def setupUi(self, workspaceMergerDialog):
workspaceMergerDialog.setObjectName(_fromUtf8("workspaceMergerDialog"))
workspaceMergerDialog.resize(400, 344)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/icons/merger.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
workspaceMergerDialog.setWindowIcon(icon)
self.gridLayout_3 = QtGui.QGridLayout(workspaceMergerDialog)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.groupBox = QtGui.QGroupBox(workspaceMergerDialog)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout_2 = QtGui.QGridLayout(self.groupBox)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.inputLineEdit = QtGui.QLineEdit(self.groupBox)
self.inputLineEdit.setObjectName(_fromUtf8("inputLineEdit"))
self.gridLayout_2.addWidget(self.inputLineEdit, 1, 0, 1, 3)
self.addButton = QtGui.QPushButton(self.groupBox)
self.addButton.setObjectName(_fromUtf8("addButton"))
self.gridLayout_2.addWidget(self.addButton, 2, 0, 1, 1)
self.inputToolButton = QtGui.QToolButton(self.groupBox)
self.inputToolButton.setObjectName(_fromUtf8("inputToolButton"))
self.gridLayout_2.addWidget(self.inputToolButton, 1, 3, 1, 1)
self.removeButton = QtGui.QPushButton(self.groupBox)
self.removeButton.setObjectName(_fromUtf8("removeButton"))
self.gridLayout_2.addWidget(self.removeButton, 2, 1, 1, 1)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem, 2, 2, 1, 1)
self.inputListView = QtGui.QListView(self.groupBox)
self.inputListView.setObjectName(_fromUtf8("inputListView"))
self.gridLayout_2.addWidget(self.inputListView, 0, 0, 1, 4)
self.gridLayout_3.addWidget(self.groupBox, 0, 0, 2, 2)
self.groupBox_2 = QtGui.QGroupBox(workspaceMergerDialog)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.gridLayout = QtGui.QGridLayout(self.groupBox_2)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.outputToolButton = QtGui.QToolButton(self.groupBox_2)
self.outputToolButton.setObjectName(_fromUtf8("outputToolButton"))
self.gridLayout.addWidget(self.outputToolButton, 0, 1, 1, 1)
self.outputLineEdit = QtGui.QLineEdit(self.groupBox_2)
self.outputLineEdit.setObjectName(_fromUtf8("outputLineEdit"))
self.gridLayout.addWidget(self.outputLineEdit, 0, 0, 1, 1)
self.gridLayout_3.addWidget(self.groupBox_2, 2, 0, 1, 2)
self.buttonBox = QtGui.QDialogButtonBox(workspaceMergerDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.gridLayout_3.addWidget(self.buttonBox, 3, 0, 1, 1)
self.retranslateUi(workspaceMergerDialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), workspaceMergerDialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), workspaceMergerDialog.reject)
QtCore.QMetaObject.connectSlotsByName(workspaceMergerDialog)
def retranslateUi(self, workspaceMergerDialog):
workspaceMergerDialog.setWindowTitle(_translate("workspaceMergerDialog", "Workspace Merger", None))
self.groupBox.setTitle(_translate("workspaceMergerDialog", "Input workspaces", None))
self.addButton.setText(_translate("workspaceMergerDialog", "Add", None))
self.inputToolButton.setText(_translate("workspaceMergerDialog", "...", None))
self.removeButton.setText(_translate("workspaceMergerDialog", "Remove", None))
self.groupBox_2.setTitle(_translate("workspaceMergerDialog", "Output workspace", None))
self.outputToolButton.setText(_translate("workspaceMergerDialog", "...", None))
import resources_rc
|
{
"content_hash": "2a203f42c0585dadbc45aeb00ae06941",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 116,
"avg_line_length": 58.278481012658226,
"alnum_prop": 0.720894874022589,
"repo_name": "xcgspring/XSTAF",
"id": "749e732315a0f6690af109040fd381eeeb07b40d",
"size": "4842",
"binary": false,
"copies": "1",
"ref": "refs/heads/ver0.1",
"path": "XSTAF/tools/workspace_merger/ui/ui_workspace_merger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "7227"
},
{
"name": "Python",
"bytes": "980326"
}
],
"symlink_target": ""
}
|
"""Class containing info about a full trace."""
import collections
import sys
from common import endpoint_cmp
from connection_info import ConnectionInfo
class TraceInfo(object):
"""A class containing a summary about a full packet trace."""
ANALYSIS_TYPES = ['flow', 'packet']
def __init__(self, f, analysis_type, debug=0):
self._f = f
assert analysis_type in self.ANALYSIS_TYPES
self._analysis_type = analysis_type
self._debug = debug
self._conn = collections.OrderedDict()
self._f.write(ConnectionInfo.header(self._analysis_type) + '\n')
def __del__(self):
# print connection data to out file
for connhash in self._conn.keys():
self._conn[connhash].print_connection_info()
@classmethod
def get_hash(cls, packet):
return (('%s:%s-%s:%s-%s' % (packet.ip_src, packet.sport, packet.ip_dst,
packet.dport, packet.ip_proto))
if (endpoint_cmp(packet.ip_src, packet.sport, packet.ip_dst,
packet.dport) <= 0) else
('%s:%s-%s:%s-%s' % (packet.ip_dst, packet.dport, packet.ip_src,
packet.sport, packet.ip_proto)))
def process_packet(self, packet):
"""Process a packet."""
# get a 4-tuple hash
connhash = self.get_hash(packet)
if self._debug > 0:
sys.stderr.write('%s %s %s %s %s %s %s\n' % (
connhash, packet.ip_src, packet.ip_dst, packet.sport, packet.dport,
packet.timestamp, packet.ip_len))
# only process tcp, udp, and sctp packets
if (packet.ip_proto != 6 and packet.ip_proto != 17 and
packet.ip_proto != 132):
return
# process the packet
if connhash not in self._conn:
self._conn[connhash] = ConnectionInfo(self._analysis_type,
connhash, self._f, self._debug)
self._conn[connhash].process_packet(packet)
|
{
"content_hash": "58d84b8fb64cef2775a191fe31218d67",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 77,
"avg_line_length": 35.370370370370374,
"alnum_prop": 0.6010471204188481,
"repo_name": "google/rttcp",
"id": "2d32ea3deb121a975f84c9a4872e4f07b7576175",
"size": "2528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trace_info.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "70823"
}
],
"symlink_target": ""
}
|
import numpy as np
import sys
import json
import pandas as pd
import re
import glob as glob
import os
from collections import defaultdict
from xml.etree import cElementTree as ET
from io import StringIO
#from metatlas.datastructures import metatlas_objects as metob
#from metatlas.io import metatlas_get_data_helper_fun as ma_data
#from metatlas.plots import dill2plots as dp
from metatlas.io.update_lcmsfiles_in_lims import EXTENSIONS
# imports for the xml to dictionary round trip
from collections import Mapping
import six
from pathlib2 import PurePath
import time
BATCH_FILE_PATH = '/global/common/software/m2650/mzmine_parameters/batch_files/'
BINARY_PATH = '/global/common/software/m2650/mzmine_parameters/MZmine'
# new stuff:
import sys
import os
import pathlib
import argparse
from subprocess import call
# you need this!
# https://github.com/LabKey/labkey-api-python
#
# sys.path.insert(0,'/Users/bpb/repos/labkey-api-python/')
# sys.path.insert(0,'/global/homes/b/bpb/repos/labkey-api-python/')
# import labkey as lk
from labkey.api_wrapper import APIWrapper
import hashlib
import requests
import json
import pandas as pd
# import hashlib
import numpy as np
import re
from datetime import datetime, time as dtime
from subprocess import check_output
import time
import math
# sys.path.insert(0,'/global/homes/b/bpb/repos/metatlas')
# from metatlas.untargeted import mzmine_batch_tools_adap as mzm
#from metatlas.plots import dill2plots as dp
import collections
from ast import literal_eval
from copy import deepcopy
import xmltodict
import zipfile
import re
from rdkit import Chem
def get_monoisotopic_mass(formula):
parts = re.findall("[A-Z][a-z]?|[0-9]+", formula)
mass = 0
for index in range(len(parts)):
if parts[index].isnumeric():
continue
multiplier = int(parts[index + 1]) if len(parts) > index + 1 and parts[index + 1].isnumeric() else 1
isotopeMass = Chem.PeriodicTable.GetMostCommonIsotopeMass(Chem.GetPeriodicTable(), parts[index])
mass += isotopeMass * multiplier
return mass
def get_google_sheet(notebook_name = "Sheet name",
token='/global/cfs/cdirs/metatlas/projects/google_sheets_auth/ipython to sheets demo-9140f8697062.json',
sheet_name = 'Sheet1',
literal_cols=None):
"""
Returns a pandas data frame from the google sheet.
Assumes header row is first row.
To use the token hard coded in the token field,
the sheet must be shared with:
metatlas-ipython-nersc@ipython-to-sheets-demo.iam.gserviceaccount.com
Unique sheet names are a requirement of this approach.
"""
import gspread
# from oauth2client.client import SignedJwtAssertionCredentials
from oauth2client.service_account import ServiceAccountCredentials
# scope = ['https://spreadsheets.google.com/feeds']
# scope = ['https://www.googleapis.com/auth/spreadsheets']
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
#this is deprecated as of january, but we have pinned the version of oauth2.
#see https://github.com/google/oauth2client/issues/401
# json_key = json.load(open(token))
# credentials = SignedJwtAssertionCredentials(json_key['client_email'], json_key['private_key'].encode(), scope)
credentials = ServiceAccountCredentials.from_json_keyfile_name(token, scope)
#here is the new way incase the version pin is removed
#credentials = ServiceAccountCredentials.from_json_keyfile_name(token, scope)
gc = gspread.authorize(credentials)
wks = gc.open(notebook_name)
istd_qc_data = wks.worksheet(sheet_name).get_all_values()
headers = istd_qc_data.pop(0)
df = pd.DataFrame(istd_qc_data,columns=headers)
# Use round trip through read_csv to infer dtypes
s = StringIO()
df.to_csv(s)
df2 = pd.read_csv(StringIO(s.getvalue()))
if 'Unnamed: 0' in df2.columns:
df2.drop(columns=['Unnamed: 0'],inplace=True)
#turn list elements into lists instead of strings
if literal_cols is not None:
for col in literal_cols:
df2[col] = df2[col].apply(literal_eval)
df2 = df2.fillna('')
return df2
key_file = '/global/cfs/cdirs/metatlas/labkey_user.txt'
with open(key_file,'r') as fid:
api_key = fid.read().strip()
labkey_server='metatlas.nersc.gov'
project_name='LIMS/'
api = APIWrapper(labkey_server, project_name, use_ssl=True,api_key=api_key)
def get_parent_folders_from_lcmsruns(get_groups=False):
# SELECT DISTINCT parent_dir FROM lcmsrun_plus
sql = """SELECT DISTINCT parent_dir FROM lcmsrun_plus"""
if get_groups==True:
sql = """SELECT
lcmsrun.mzml_file.filename AS mzml_file,
regexp_replace(lcmsrun.name, '.*/([^/]*)/[^/]*$', '\1') AS parent_dir,
split_part(regexp_replace(lcmsrun.name, '.*/[^/]*/([^/]*)$', '\1'), '_', 13) AS file_name_field_12
FROM lcmsrun"""
schema = 'lists'
sql_result = api.query.execute_sql(schema, sql,max_rows=1e6)
if sql_result is None:
print(('execute_sql: Failed to load results from ' + schema + '.' + table))
return None
else:
df = pd.DataFrame(sql_result['rows'])
df = df[[c for c in df.columns if not c.startswith('_')]]
return df
def get_files_from_disk(directory,extension):
"""
Get on disk with date
"""
get_with_date = ''.join(['find %s -iname "*%s"' % (directory,extension),' -printf "%Ts SplitThat%p\n"'])
files = check_output(get_with_date, shell=True)
files = files.decode('utf-8').splitlines()
files = [f.split('SplitThat') for f in files]
dates = [int(f[0].strip()) for f in files]
files = [f[1].strip() for f in files]
return dates,files
def complex_name_splitter(filename,
extensions=set(['raw', 'tab', 'gz', 'pactolus', 'mzML', 'd','h5']),
strippath='/global/project/projectdirs/metatlas/raw_data'):
#Get the filename
basename = os.path.basename(filename)
#Path is everything not filename
pathname = filename.replace(basename,'')
#Don't store the basepath since files will likely move
pathname = pathname.replace(strippath,'')
pathname = pathname.strip('/')
#remove extension, but keep any internal . separeted content
pieces = set(basename.split('.')) - extensions
name = '.'.join(pieces)
name = name.replace('_spectral-hits','')
#this will be a basename that has typically two folders
#ths should not have an extension
new_name = os.path.join(pathname,name)
return new_name
def hash_bytestr_iter(bytesiter, hasher, ashexstr=False):
for block in bytesiter:
hasher.update(block)
return hasher.hexdigest() if ashexstr else hasher.digest()
def make_sha256(afile, blocksize=65536):
sha = hashlib.sha256()
with open(afile, 'rb') as f:
while True:
data = f.read(blocksize)
if not data:
break
sha.update(data)
return format(sha.hexdigest())
# def make_sha256(fname):
# return hash_bytestr_iter(file_as_blockiter(open(fname, 'rb')), hashlib.sha256())
def get_acqtime_from_mzml(mzml_file):
startTimeStamp=None
with open(mzml_file) as mzml:
for line in mzml:
if 'startTimeStamp' in line:
startTimeStamp = line.split('startTimeStamp="')[1].split('"')[0].replace('T',' ').rstrip('Z')
break
# print startTimeStamp
if not '-infinity' in startTimeStamp:
date_object = datetime.strptime(startTimeStamp, '%Y-%m-%d %H:%M:%S')
utc_timestamp = int(time.mktime(date_object.timetuple()))
else:
utc_timestamp = int(0)
return utc_timestamp
def get_table_from_lims(table,columns=None,max_rows=1e6):
if columns is None:
sql = """SELECT * FROM %s;"""%table
else:
sql = """SELECT %s FROM %s;"""%(','.join(columns),table)
# base execute_sql
schema = 'lists'
sql_result = api.query.execute_sql(schema, sql,max_rows=max_rows)
if sql_result is None:
print(('execute_sql: Failed to load results from ' + schema + '.' + table))
return None
else:
df = pd.DataFrame(sql_result['rows'])
df = df[[c for c in df.columns if not c.startswith('_')]]
return df
def update_table_in_lims(df,table,method='update',max_size=1000):
"""
Note: Do ~1000 rows at a time. Any more and you get a 504 error. Maybe increasing the timeout would help.
In the header, timeout is a variable that gets set. Check to see what its set to. Maybe increasing it would let
more rows be updated at a time
Use it like this:
update_table_in_lims(df_lims,'mzml_files')
whatever is in 'name' or 'Key' will replace whatever used to be there with the other columns
"""
# if columns is None:
# cols = df.columns
# else:
# cols = pd.unique([index_column] + columns)
# One of the cols needs to be the index column (almost always: Key or Name)
N = math.ceil(float(df.shape[0]) / max_size)
for sub_df in np.array_split(df, N):
payload = sub_df.to_dict('records')
if method=='update':
api.query.update_rows('lists', table, payload,timeout=10000)
elif method=='insert':
api.query.insert_rows('lists', table, payload,timeout=10000)
elif method=='delete':
api.query.delete_rows('lists', table, payload,timeout=10000)
else:
print(('ERROR: Nothing to do. Method %s is not programmed'%method))
print('updated %d rows in %s'%(df.shape[0],table))
def get_union_of_all_lcms_names(tables=['mzml_file','hdf5_file','pactolus_file','raw_file','spectralhits_file']):
# sort out the lcmsrun table
sql = ['select name from %s'%t for t in tables]
sql = ' union '.join(sql)
# con = lk.utils.create_server_context(labkey_server, project_name, use_ssl=True,)
# base execute_sql
schema = 'lists'
sql_result = api.query.execute_sql(schema, sql,max_rows=1e6)
if sql_result is None:
print(('execute_sql: Failed to load results from ' + schema + '.' + table))
else:
return [r['name'] for r in sql_result['rows']]
def update_lcmsrun_names(tables=['mzml_file','hdf5_file','pactolus_file','raw_file','spectralhits_file']):
#get all the names in the various raw data tables
names = get_union_of_all_lcms_names(tables)
#get all the names in lcmsrun (rawdata relationship) table
lcmsruns = get_table_from_lims('lcmsrun',columns=['name'])
lcmsruns = lcmsruns['name'].tolist()
# this is likeley a recently uploaded file that was just created
missing_from_lcmsruns = list(set(names) - set(lcmsruns))
#hopefully there aren't any of these, but always good to check
extra_in_lcmsruns = list(set(lcmsruns) - set(names))
#add missing ones
if len(missing_from_lcmsruns)>0:
temp = pd.DataFrame()
temp['name'] = missing_from_lcmsruns
update_table_in_lims(temp,'lcmsrun',method='insert')
#remove extra ones
if len(extra_in_lcmsruns)>0:
sql = """SELECT Key FROM lcmsrun where name IN (%s);"""%','.join(['\'%s\''%e for e in extra_in_lcmsruns])
# print(sql)
schema = 'lists'
sql_result = api.query.execute_sql(schema, sql,max_rows=1e6)
if sql_result is None:
print(('execute_sql: Failed to load results from ' + schema + '.' + table))
# return None
else:
temp = pd.DataFrame(sql_result['rows'])
temp = temp[[c for c in temp.columns if not c.startswith('_')]]
# return df
if temp.shape[0]>0:
update_table_in_lims(temp,'lcmsrun',method='delete')
return missing_from_lcmsruns,extra_in_lcmsruns
def update_lcmsrun_matrix(file_type):
lcmsruns = get_table_from_lims('lcmsrun',columns=['Key','name',file_type])
lcmsruns.fillna(-1,inplace=True) #replace None indices so absolute value below has something to work on
lcmsruns.rename(columns={file_type:'%s_existing'%file_type},inplace=True)
data = get_table_from_lims(file_type,columns=['Key','name'])
df = pd.merge(lcmsruns,data,on='name',how='inner')
df.rename(columns={'Key_x':'Key','Key_y':file_type},inplace=True)
df = df[abs(df['%s_existing'%file_type]-df[file_type])>0]
df.drop(columns=['name','%s_existing'%file_type],inplace=True)
print((df.shape))
if df.shape[0]>0:
update_table_in_lims(df,'lcmsrun',method='update')#,index_column='Key',columns=None,labkey_server='metatlas-dev.nersc.gov',project_name='/LIMS'):
print('done updating')
def get_lcmsrun_matrix():#labkey_server='metatlas-dev.nersc.gov',project_name='/LIMS'):
sql = 'select '
for f in ['mzml','hdf5','raw','spectralhits','pactolus']:
sql = '%s %s_file.filename as %s_filename,'%(sql,f,f)
sql = '%s from lcmsrun'%sql
# base execute_sql
schema = 'lists'
sql_result = api.query.execute_sql(schema, sql,max_rows=1e8)
if sql_result is None:
print(('execute_sql: Failed to load results from ' + schema + '.' + table))
return None
else:
lcmsruns = pd.DataFrame(sql_result['rows'])
return lcmsruns
def update_file_conversion_tasks(task,lcmsruns=None,file_conversion_tasks=None):#,labkey_server='metatlas-dev.nersc.gov',project_name='/LIMS'):
"""
gets current tasks and current files and determines if new tasks need to be made:
task will be:['mzml_to_hdf5','raw_to_mzml','mzml_to_spectralhits','mzml_to_pactolus']
"""
input_type = task.split('_')[0]
output_type = task.split('_')[-1]
if file_conversion_tasks is None:
file_conversion_tasks = get_table_from_lims('file_conversion_task',columns=['Key','input_file','output_file','task','status'])
# task_idx = file_conversion_tasks['task']==task
if lcmsruns is None:
lcmsruns = get_lcmsrun_matrix()
done_input_files = lcmsruns.loc[pd.notna(lcmsruns['%s_filename'%input_type]),'%s_filename'%input_type]
done_output_files = lcmsruns.loc[pd.notna(lcmsruns['%s_filename'%output_type]),'%s_filename'%output_type]
task_idx = file_conversion_tasks['task']==task
inputfile_idx = file_conversion_tasks['input_file'].isin(done_input_files)
outputfile_idx = file_conversion_tasks['output_file'].isin(done_output_files)
# This finds where output file exists
done_tasks_idx = (task_idx) & (outputfile_idx)
if sum(done_tasks_idx)>0:
update_table_in_lims(file_conversion_tasks.loc[done_tasks_idx,['Key']],'file_conversion_task',method='delete')#,labkey_server=labkey_server,project_name=project_name)
print(('%s: There are %d tasks where output file exist and will be removed'%(task,file_conversion_tasks[done_tasks_idx].shape[0])))
# This finds where input file is missing
done_tasks_idx = (task_idx) & (~inputfile_idx)
if sum(done_tasks_idx)>0:
update_table_in_lims(file_conversion_tasks.loc[done_tasks_idx,['Key']],'file_conversion_task',method='delete')#,labkey_server=labkey_server,project_name=project_name)
print(('%s: There are %d tasks where input file is missing and will be removed'%(task,file_conversion_tasks[done_tasks_idx].shape[0])))
right_now_str = datetime.now().strftime("%Y%m%d %H:%M:%S")
idx = (pd.notna(lcmsruns['%s_filename'%input_type])) & (pd.isna(lcmsruns['%s_filename'%output_type]))
temp = pd.DataFrame()
temp['input_file'] = lcmsruns.loc[idx,'%s_filename'%input_type]
temp['output_file'] = temp['input_file'].apply(lambda x: re.sub('\%s$'%EXTENSIONS[input_type],'%s'%EXTENSIONS[output_type],x))
temp['task'] = task
temp['status'] = STATUS['initiation']
temp['log'] = 'detected: %s'%right_now_str
temp.reset_index(drop=True,inplace=True)
cols = temp.columns
temp = pd.merge(temp,file_conversion_tasks.add_suffix('_task'),left_on=['input_file','output_file'],right_on=['input_file_task','output_file_task'],how='outer',indicator=True)
new_tasks = temp[temp['_merge']=='left_only'].copy()
new_tasks = new_tasks[cols]
new_tasks.reset_index(drop=True,inplace=True)
print(("There are %d new tasks"%new_tasks.shape[0]))
if new_tasks.shape[0]>0:
update_table_in_lims(new_tasks,'file_conversion_task',method='insert')
def update_file_table(file_table):
file_type = file_table.split('_')[0]
v = GETTER_SPEC[file_type]
print(('Getting %s files from disk'%(file_type)))
dates,files = get_files_from_disk(PROJECT_DIRECTORY,v['extension'])
if len(files)>0:
df = pd.DataFrame(data={'filename':files,'file_type':file_type,'timeepoch':dates})
df['basename'] = df['filename'].apply(os.path.basename)
df['name'] = df['filename'].apply(complex_name_splitter) #make a name for grouping associated content
else:
df = pd.DataFrame()
df['filename'] = 'None'
df['file_type'] = file_type
df['timeepoch'] = 0
df['basename'] = 'None'
df['name'] = 'None'
print(('\tThere were %d files on disk'%len(files)))
cols = ['filename','name','Key']
df_lims = get_table_from_lims(v['lims_table'],columns=cols)
print(('\tThere were %d files from LIMS table %s'%(df_lims.shape[0],v['lims_table'])))
diff_df = pd.merge(df, df_lims,on=['filename','name'], how='outer', indicator='Exist')
diff_df = diff_df.loc[diff_df['Exist'] != 'both'] #(left_only, right_only, or both)
print(('\tThere are %d different'%diff_df.shape[0]))
print('')
# diff_df.fillna('',inplace=True)
diff_df['parameters'] = 1
cols = ['file_type','filename','timeepoch','basename','name']
temp = diff_df.loc[diff_df['Exist']=='left_only',cols]
if temp.shape[0]>0:
update_table_in_lims(temp,file_table,method='insert')#,index_column='Key',columns=None,labkey_server='metatlas-dev.nersc.gov',project_name='/LIMS'):
cols = ['Key','filename']
temp = diff_df.loc[diff_df['Exist']=='right_only',cols]
temp['Key'] = temp['Key'].astype(int)
if temp.shape[0]>0:
update_table_in_lims(temp,file_table,method='delete')#,index_column='Key',columns=None,labkey_server='metatlas-dev.nersc.gov',project_name='/LIMS'):
# df.to_csv('/global/homes/b/bpb/Downloads/%s_files.tab'%k,index=None,sep='\t')
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def build_untargeted_filename(output_dir,parent_dir,polarity,file_type):
"""
file_spec = {'peak-area-mzmine':'peak-area.csv',
'mzmine-runner':'mzmine.sh',
'msms-mzmine':'_MSMS.mgf',
'peak-height-mzmine':'_peak-height.csv',
'gnps-uuid-fbmn':'_gnps-uuid.txt',
'fbmn-runner':'fbmn.sh',
'fbmn-sbatch':'fbmn-sbatch.sbatch',
'mzmine-outlog':'-mzmine.out',
'batch-params-mzmine':'_batch-params.xml',
'quant-fbmn':'quant.csv',
'gnps-fbmn-network':'_gnps-fbmn-network.graphml',
'mzmine-sbatch':'mzmine-sbatch.sbatch',
'mzmine-errlog':'-mzmine.err',
'metadata':'_metadata.tab',
'fbmn-errlog':'fbmn.err',
'fbmn-outlog':'fbmn.out',
'gnps-download':'_gnps-download.zip'}
"""
file_spec = {'peak-area-mzmine':'peak-area.csv',
'mzmine-runner':'_mzmine.sh',
'msms-mzmine':'_MSMS.mgf',
'peak-height-mzmine':'_peak-height.csv',
'gnps-uuid-fbmn':'_gnps-uuid.txt',
'fbmn-runner':'fbmn.sh',
'fbmn-sbatch':'fbmn-sbatch.sbatch',
'mzmine-outlog':'-mzmine.out',
'batch-params-mzmine':'_batch-params.xml',
'quant-fbmn':'quant.csv',
'gnps-fbmn-network':'_gnps-fbmn-network.graphml',
'mzmine-sbatch':'_mzmine-sbatch.sbatch',
'mzmine-errlog':'-mzmine.err',
'metadata':'_metadata.tab',
'fbmn-errlog':'fbmn.err',
'fbmn-outlog':'fbmn.out',
'gnps-download':'_gnps-download.zip',
'blink-hits':'_blinkhits.csv.gz',
'blink-network-hits':'_blinknetworkhits.csv.gz',
'simile-hits':'_similehits.csv.gz'}
pathname = os.path.join(output_dir,'%s_%s'%(parent_dir,polarity))
filename = '%s_%s%s'%(parent_dir,polarity,file_spec[file_type])
filename = os.path.join(pathname,filename)
return filename
def check_gnps_status(taskid):
url = 'https://gnps.ucsd.edu/ProteoSAFe/status_json.jsp?task=%s'%taskid
d = requests.get(url)
try:
d = json.loads(d.text)
if 'status' in d:
return d['status'], float(d['workflow_version'].split('_')[-1])
else:
return 'N/A',0.0
except:
return 'N/A',0.0
def download_gnps_graphml(taskid,outfile):
url = "https://gnps.ucsd.edu/ProteoSAFe/DownloadResultFile?task=%s&block=main&file=gnps_molecular_network_graphml/" % (taskid)
positive_graphml = "%s.graphml"%taskid
d = requests.get(url)
with open(outfile, "w") as fid:
fid.write(d.text)
def submit_mzmine_jobs(polarity='positive',polarity_short='pos'):
"""
finds initiated mzmine tasks
submit them
changes to running
"""
tasktype='mzmine'
df = get_table_from_lims('untargeted_tasks')
update_df = []
for i,row in df[df['%s_%s_status'%(tasktype,polarity_short)]=='01 initiation'].iterrows():
pathname = os.path.join(row['output_dir'],'%s_%s'%(row['parent_dir'],polarity))
runner_filename = os.path.join(pathname,'%s_%s_mzmine.sh'%(row['parent_dir'],polarity))
if os.path.isfile(runner_filename)==True:
with open(runner_filename,'r') as fid:
task = call(fid.read(),shell=True)
print(task)
df.loc[i,'%s_%s_status'%(tasktype,polarity_short)] = '04 running'
update_df.append(i)
if len(update_df)>0:
cols = ['Key',
'%s_%s_status'%(tasktype,polarity_short)]
update_table_in_lims(df.loc[df.index.isin(update_df),cols],'untargeted_tasks',method='update')
def update_mzmine_status_in_untargeted_tasks(polarity='positive',polarity_short='pos'):
"""
finds running mzmine tasks
checks if they have output
changes to complete if yes
sets fbmn to initiation
"""
tasktype='mzmine'
df = get_table_from_lims('untargeted_tasks')
update_df = []
c1 = df['%s_%s_status'%(tasktype,polarity_short)]=='04 running'
c2 = df['%s_%s_status'%(tasktype,polarity_short)]=='01 initiation'
for i,row in df[(c1) | (c2)].iterrows():
pathname = os.path.join(row['output_dir'],'%s_%s'%(row['parent_dir'],polarity))
peakheight_filename = os.path.join(pathname,'%s_%s_peak-height.csv'%(row['parent_dir'],polarity))
if os.path.isfile(peakheight_filename)==True:
#the job was a success
df.loc[i,'%s_%s_status'%(tasktype,polarity_short)] = '07 complete'
df.loc[i,'%s_%s_status'%('fbmn',polarity_short)] = '01 initiation'
update_df.append(i)
if len(update_df)>0:
cols = ['Key',
'%s_%s_status'%(tasktype,polarity_short),
'%s_%s_status'%('fbmn',polarity_short)]
update_table_in_lims(df.loc[df.index.isin(update_df),cols],'untargeted_tasks',method='update')
def update_fbmn_status_in_untargeted_tasks(polarity='positive',polarity_short='pos',latest_version=28.2):
"""
finds running fbmn tasks
checks if they have uuid
checks their status
changes to complete if yes
sets spectral hits to initiation
"""
tasktype='fbmn'
df = get_table_from_lims('untargeted_tasks')
update_df = []
c1 = df['%s_%s_status'%(tasktype,polarity_short)]=='04 running'
c2 = df['%s_%s_status'%(tasktype,polarity_short)]=='01 initiation'
c3 = df['%s_%s_status'%(tasktype,polarity_short)]=='08 hold'
for i,row in df[(c1) | (c2) | (c3)].iterrows():
pathname = os.path.join(row['output_dir'],'%s_%s'%(row['parent_dir'],polarity))
fbmn_filename = os.path.join(pathname,'%s_%s_gnps-uuid.txt'%(row['parent_dir'],polarity))
graphml_filename = os.path.join(pathname,'%s_%s_gnps-fbmn-network.graphml'%(row['parent_dir'],polarity))
if os.path.isfile(fbmn_filename)==True:
#the job was submitted
with open(fbmn_filename,'r') as fid:
my_text = fid.read().strip()
taskid = my_text.split('=')[-1]
status,version = check_gnps_status(taskid)
print('%s %.2f for %s'%(status,version,fbmn_filename))
if version<latest_version:
df.loc[i,'%s_%s_status'%(tasktype,polarity_short)] = '01 initiation'
# df.loc[i,'%s_%s_status'%('gnps_msms_hits',polarity_short)] = '01 initiation'
download_gnps_graphml(taskid,graphml_filename)
update_df.append(i)
elif status=='DONE':
df.loc[i,'%s_%s_status'%(tasktype,polarity_short)] = '07 complete'
# df.loc[i,'%s_%s_status'%('gnps_msms_hits',polarity_short)] = '01 initiation'
download_gnps_graphml(taskid,graphml_filename)
update_df.append(i)
elif status=='FAILED':
df.loc[i,'%s_%s_status'%(tasktype,polarity_short)] = '09 error'
# df.loc[i,'%s_%s_status'%('gnps_msms_hits',polarity_short)] = '08 hold'
update_df.append(i)
elif status=='RUNNING':
df.loc[i,'%s_%s_status'%(tasktype,polarity_short)] = '04 running'
# df.loc[i,'%s_%s_status'%('gnps_msms_hits',polarity_short)] = '08 hold'
update_df.append(i)
else:
df.loc[i,'%s_%s_status'%(tasktype,polarity_short)] = '01 initiation'
# df.loc[i,'%s_%s_status'%('gnps_msms_hits',polarity_short)] = '08 hold'
update_df.append(i)
# else:
# print('%s is not a file'%fbmn_filename)
if len(update_df)>0:
cols = ['Key',
'%s_%s_status'%(tasktype,polarity_short)] # , '%s_%s_status'%('gnps_msms_hits',polarity_short)
update_table_in_lims(df.loc[df.index.isin(update_df),cols],'untargeted_tasks',method='update')
def write_new_mzmine_params(gsheet_params,my_polarity,files,basepath,parent_dir):
"""
takes the generic mzmine parameters
changes it to the correct polarity
adds in the files
saves the xml file
"""
params = deepcopy(gsheet_params)
# new_basename = os.path.join(m['basedir'],'output')
#replace the polarity in the crop filter module
for k,v in params.items():
if 'polarity' in k:
params[k] = my_polarity.upper()
# #rename all the output files
for k,v in params.items():
try:
if 'placeholder_filename' in v:
if 'gnps-job' in v:
params[k] = v.replace('placeholder_filename',parent_dir)
else:
params[k] = v.replace('placeholder_filename',os.path.join(basepath,parent_dir))
except TypeError:
pass
# #This is a good place to make the values strings. The xml maker needs strings later on so might as well do it here
str_d = {}
for k,v in params.items():
str_d[k] = str(v)
# # #unflatten it
param_dict_unflat = unflatten(str_d)
files_filename = '%s_filelist.txt'%os.path.join(basepath,parent_dir)
file_list = [f for f in files.tolist() if f is not None]
if len(file_list)>0:
with open(files_filename,'w') as fid:
fid.write('%s'%'\n'.join(file_list))
new_raw_data = {'@method': 'net.sf.mzmine.modules.rawdatamethods.rawdataimport.RawDataImportModule',
'parameter': {'@name': 'Raw data file names',
'file': file_list}}
# new_raw_data['parameter']['file'] = mzmine_things[i]['file_list']
param_dict_unflat['batch']['batchstep'].insert(0,new_raw_data)# str_d.keys()
xml_string = xmltodict.unparse(param_dict_unflat)
batch_filename = '%s_batch-params.xml'%os.path.join(basepath,parent_dir)
with open(batch_filename,'w') as fid:
fid.write('%s'%xml_string)
return batch_filename
def submit_fbmn_jobs(polarity='positive',polarity_short='pos',N=15):
"""
finds initiated mzmine tasks
submit them
changes to running
"""
tasktype='fbmn'
df = get_table_from_lims('untargeted_tasks')
# df = df[(df['parent_dir'].str.contains('202'))] # & (df['parent_dir'].str.contains('AK'))
update_df = []
count = 0
for i,row in df[df['%s_%s_status'%(tasktype,polarity_short)]=='01 initiation'].iterrows():
pathname = os.path.join(row['output_dir'],'%s_%s'%(row['parent_dir'],polarity))
runner_filename = os.path.join(pathname,'%s_%s_fbmn.sh'%(row['parent_dir'],polarity))
if os.path.isfile(runner_filename)==True:
with open(runner_filename,'r') as fid:
task = call(fid.read(),shell=True)
time.sleep(5)
print(task)
df.loc[i,'%s_%s_status'%(tasktype,polarity_short)] = '04 running'
update_df.append(i)
count += 1
if count==N:
break
if len(update_df)>0:
cols = ['Key',
'%s_%s_status'%(tasktype,polarity_short)]
update_table_in_lims(df.loc[df.index.isin(update_df),cols],'untargeted_tasks',method='update')
def get_mzmine_param_dict(gdrive_file='params20190719_v2p39_IsotopeFilter_ADAP_DeDup',param_id=2):
gsheet_params = get_google_sheet(notebook_name=gdrive_file,sheet_name='Sheet1')
new_cols = []
for c in gsheet_params.columns:
if c.startswith('('):
new_cols.append(literal_eval(c))
else:
new_cols.append(c)
gsheet_params.columns=new_cols
gsheet_params = gsheet_params[gsheet_params['param_id']==param_id]
gsheet_params = gsheet_params.to_dict(orient='records')[-1]
gsheet_params.pop('param_id',None)
gsheet_params = collections.OrderedDict(gsheet_params)
return gsheet_params
gsheet_params = get_mzmine_param_dict()
gsheet_params_idx = get_mzmine_param_dict(param_id=5)
def write_mzmine_sbatch_and_runner(basepath,batch_filename,parent_dir,num_files):
mzmine_launcher = get_latest_mzmine_binary(version='MZmine-2.39')
sbatch_filename = '%s_mzmine-sbatch.sbatch'%os.path.join(basepath,parent_dir)
runner_filename = '%s_mzmine.sh'%os.path.join(basepath,parent_dir)
s = '%s %s'%(mzmine_launcher,batch_filename)
with open(sbatch_filename,'w') as fid:
if num_files<51:
fid.write('%s\n%s\n'%(SLURM_HEADER.replace('slurm','%s-%s'%(os.path.join(basepath,parent_dir),'mzmine')),s))
else:
print('asdfasdfasdf')
fid.write('%s\n%s\n'%(SLURM_BIGMEM_HEADER.replace('slurm','%s-%s'%(os.path.join(basepath,parent_dir),'mzmine')),s))
with open(runner_filename,'w') as fid:
if num_files<51:
fid.write('sbatch %s'%sbatch_filename)
else:
fid.write('module load esslurm\n')
fid.write('sbatch %s\n'%sbatch_filename)
fid.write('module unload esslurm\n')
def write_fbmn_sbatch_and_runner(basepath,parent_dir):
runner_filename = '%s_fbmn.sh'%os.path.join(basepath,parent_dir)
python_binary = '/global/common/software/m2650/python3-metatlas-cori/bin/python'
python_file = '/global/homes/b/bpb/repos/metatlas/metatlas/untargeted/send_to_gnps.py'
python_args = '--basedir %s --basename %s --override True'%(basepath,parent_dir)
with open(runner_filename,'w') as fid:
fid.write('%s %s %s\n'%(python_binary,python_file,python_args))
def update_num_features():
import subprocess
df_tasks = get_table_from_lims('untargeted_tasks')
# Get num features
found = 0
keep_rows = []
for i,row in df_tasks.iterrows():
for polarity in ['positive','negative']:
peakheight = build_untargeted_filename(row['output_dir'],row['parent_dir'],polarity,'peak-height-mzmine')
if os.path.isfile(peakheight):
cmd = ['wc','-l','%s'%peakheight]
result = subprocess.run(cmd, stdout=subprocess.PIPE)
# n = int(subprocess.check_output().split()[0])
num = result.stdout.split()[0]
if len(num)>0:
num = int(int(num) - 1)
if num != row['num_%s_features'%polarity[:3]]:
df_tasks.loc[i,'num_%s_features'%polarity[:3]] = num
keep_rows.append(i)
print(num,row['num_%s_features'%polarity[:3]],result.stderr,result.stdout)
print('')
cols = [c for c in df_tasks.columns if (c.endswith('_features')) & (c.startswith('num_'))]
cols = cols + ['Key']
print(cols)
if len(keep_rows)>0:
temp = df_tasks.loc[df_tasks.index.isin(keep_rows),cols].copy()
temp.fillna(0,inplace=True)
update_table_in_lims(temp,'untargeted_tasks',method='update')
from pyteomics import mgf
import numpy as np
import scipy.sparse as sp
def read_mgf(filename):
df = []
with mgf.MGF(filename) as reader:
for spectrum in reader:
# count += 1
d = spectrum['params']
d['spectrum'] = np.array([spectrum['m/z array'],spectrum['intensity array']])
d['pepmass'] = d['pepmass'][0]
df.append(d)
ref_df = pd.DataFrame(df)
return ref_df
def update_num_msms():
import subprocess
df_tasks = get_table_from_lims('untargeted_tasks')
# Get num features
found = 0
keep_rows = []
for i,row in df_tasks.iterrows():
for polarity in ['positive','negative']:
filename = build_untargeted_filename(row['output_dir'],row['parent_dir'],polarity,'msms-mzmine')
if os.path.isfile(filename):
cmd = 'cat %s | grep FEATURE_ID| wc -l'%filename
result = subprocess.run(cmd,stdout=subprocess.PIPE, shell=True)
num = result.stdout.strip()
if len(num)>0:
num = int(num)
if num != row['num_%s_msms'%polarity[:3]]:
df_tasks.loc[i,'num_%s_msms'%polarity[:3]] = num
keep_rows.append(i)
print(num,row['num_%s_msms'%polarity[:3]],result.stderr,result.stdout)
# print('')
cols = [c for c in df_tasks.columns if (c.endswith('_msms')) & (c.startswith('num_'))]
cols = cols + ['Key']
print(cols)
if len(keep_rows)>0:
temp = df_tasks.loc[df_tasks.index.isin(keep_rows),cols].copy()
temp.fillna(0,inplace=True)
update_table_in_lims(temp,'untargeted_tasks',method='update')
def update_new_untargeted_tasks(update_lims=True):
"""
given all directories that are in the table for potential untargeted tasks
and all the directories in the raw data folders
return all the directories in the raw data folders
that are not in the untargeted tasks
The strip command is because there is one folder that ends in a
space and labkey doesn't allow this
"""
df = get_table_from_lims('lcmsrun_plus')
df = df[pd.notna(df['mzml_file'])]
df.drop(columns=['mzml_file_container'],inplace=True)
df.replace('',np.nan,inplace=True)
#Check that files have been sitting around for at least 3 hours (note time zones may vary)
time_check = df.groupby('parent_dir')['timeepoch'].max() < (time.time()-3*60*60)
time_check_folders = time_check[time_check==True].index.tolist()
df_untargeted = get_table_from_lims('untargeted_tasks')
all_folders = df.loc[df['polarity'].isin(['POS','NEG']),'parent_dir'].unique()
all_folders = [a.strip() for a in all_folders]
if df_untargeted.shape[0]>0:
folders_in_tasks = df_untargeted['parent_dir']
folders_in_tasks = [a.strip() for a in folders_in_tasks]
else:
folders_in_tasks = []
print(len(all_folders),len(folders_in_tasks))
new_folders = np.setdiff1d(all_folders,folders_in_tasks,)
print(len(new_folders),len(all_folders),len(folders_in_tasks))
new_folders = list(set(new_folders) & set(time_check_folders))
print(len(new_folders),len(all_folders),len(folders_in_tasks))
# get file counts
pos_count = df[df['polarity']=='POS'].groupby('parent_dir')['mzml_file'].count()
neg_count = df[df['polarity']=='NEG'].groupby('parent_dir')['mzml_file'].count()
missing = np.setdiff1d(new_folders,pos_count.index.tolist())
for m in missing:
pos_count[m] = 0
missing = np.setdiff1d(new_folders,neg_count.index.tolist())
for m in missing:
neg_count[m] = 0
outdir = '/project/projectdirs/metatlas/projects/untargeted_tasks'
# idx1 = ~df_untargeted['mzmine_pos_status'].str.contains('complete')
# idx2 = ~df_untargeted['mzmine_pos_status'].str.contains('not relevant')
# idx3 = ~df_untargeted['mzmine_neg_status'].str.contains('complete')
# idx4 = ~df_untargeted['mzmine_neg_status'].str.contains('not relevant')
# df_untargeted_new = df_untargeted[(idx1) | (idx2)]
# df_untargeted_new = df_untargeted[(idx3) | (idx4)]
# print(df.shape[0])
# df = df[df['parent_dir'].isin(df_untargeted_new['parent_dir'])]
# print(df.shape[0])
print('making metadata pos')
# make the metadata sheets
files = df[df['polarity']=='POS'].groupby('parent_dir')
files = [(d,g) for d,g in files]
pos_metadata_files = {}
pos_filelist = {}
for block in files:
my_polarity = 'positive'
polarity_short = 'pos'
parent_dir = '%s_%s'%(block[0],my_polarity)
basepath = os.path.join(outdir,parent_dir)
if not os.path.isdir(basepath):
os.mkdir(basepath)
metadata_filename = '%s_%s.tab'%(parent_dir,'metadata')
metadata_filename = os.path.join(basepath,metadata_filename)
temp = block[1][['mzml_file','sample_group']].copy()
temp['mzml_file'] = temp['mzml_file'].fillna('')
temp['sample_group'] = temp['sample_group'].fillna('')
temp['sample_group'] = temp['sample_group'].apply(lambda x: x.lower())
ugroups = temp['sample_group'].unique()
ugroups = [g for g in ugroups if 'exctrl' in g]
temp.rename(columns={'mzml_file':'filename','sample_group':'ATTRIBUTE_sampletype'},inplace=True)
temp['filename'] = temp['filename'].apply(lambda x: os.path.basename(x))
cols = ['CONTROL','CASE','ATTRIBUTE_media']
for i,g in enumerate(ugroups):
if i<(len(cols)):
temp[cols[i]] = g
# else:
# print('too many controls!!! %s'%g)
temp.to_csv(metadata_filename,sep='\t',index=False)
pos_metadata_files[block[0]] = metadata_filename
pos_filelist[block[0]] = block[1]['mzml_file']
print('making metadata neg')
# make the metadata sheets
files = df[df['polarity']=='NEG'].groupby('parent_dir')
files = [(d,g) for d,g in files]
neg_metadata_files = {}
neg_filelist = {}
for block in files:
my_polarity = 'negative'
polarity_short = 'neg'
parent_dir = '%s_%s'%(block[0],my_polarity)
basepath = os.path.join(outdir,parent_dir)
if not os.path.isdir(basepath):
os.mkdir(basepath)
metadata_filename = '%s_%s.tab'%(parent_dir,'metadata')
metadata_filename = os.path.join(basepath,metadata_filename)
temp = block[1][['mzml_file','sample_group']].copy()
temp['mzml_file'] = temp['mzml_file'].fillna('')
temp['sample_group'] = temp['sample_group'].fillna('')
temp['sample_group'] = temp['sample_group'].apply(lambda x: x.lower())
ugroups = temp['sample_group'].unique()
ugroups = [g for g in ugroups if 'exctrl' in g]
temp.rename(columns={'mzml_file':'filename','sample_group':'ATTRIBUTE_sampletype'},inplace=True)
temp['filename'] = temp['filename'].apply(lambda x: os.path.basename(x))
cols = ['CONTROL','CASE','ATTRIBUTE_media']
for i,g in enumerate(ugroups):
if i<(len(cols)):
temp[cols[i]] = g
# else:
# print('too many controls!!! %s'%g)
temp.to_csv(metadata_filename,sep='\t',index=False)
neg_metadata_files[block[0]] = metadata_filename
neg_filelist[block[0]] = block[1]['mzml_file']
print('There are %d new_folders'%len(new_folders))
if len(new_folders)>0:
new_folders = pd.DataFrame(data={'parent_dir':new_folders,'num_pos_files':pos_count[new_folders],'num_neg_files':neg_count[new_folders]})
new_folders['pos_metadata_file'] = ''
new_folders['neg_metadata_file'] = ''
for i,row in new_folders.iterrows():
if row['parent_dir'] in pos_metadata_files.keys():
new_folders.loc[i,'pos_metadata_file'] = pos_metadata_files[row['parent_dir']]
basepath = os.path.join(outdir,'%s_%s'%(row['parent_dir'],'positive'))
parent_dir = '%s_%s'%(row['parent_dir'],'positive')
if '_idx_' in parent_dir.lower():
batch_filename = write_new_mzmine_params(gsheet_params_idx,'positive',pos_filelist[row['parent_dir']],basepath,parent_dir)
else:
batch_filename = write_new_mzmine_params(gsheet_params,'positive',pos_filelist[row['parent_dir']],basepath,parent_dir)
write_mzmine_sbatch_and_runner(basepath,batch_filename,parent_dir,pos_filelist[row['parent_dir']].shape[0])
write_fbmn_sbatch_and_runner(basepath,parent_dir)
if row['parent_dir'] in neg_metadata_files.keys():
new_folders.loc[i,'neg_metadata_file'] = neg_metadata_files[row['parent_dir']]
basepath = os.path.join(outdir,'%s_%s'%(row['parent_dir'],'negative'))
parent_dir = '%s_%s'%(row['parent_dir'],'negative')
if '_idx_' in parent_dir.lower():
batch_filename = write_new_mzmine_params(gsheet_params_idx,'negative',neg_filelist[row['parent_dir']],basepath,parent_dir)
else:
batch_filename = write_new_mzmine_params(gsheet_params,'negative',neg_filelist[row['parent_dir']],basepath,parent_dir)
write_mzmine_sbatch_and_runner(basepath,batch_filename,parent_dir,neg_filelist[row['parent_dir']].shape[0])
write_fbmn_sbatch_and_runner(basepath,parent_dir)
new_folders['file_conversion_complete'] = False
new_folders['conforming_filenames'] = False
new_folders['mzmine_pos_status'] = '01 initiation'
new_folders['mzmine_neg_status'] = '01 initiation'
new_folders['fbmn_pos_status'] = '13 waiting'
new_folders['fbmn_neg_status'] = '13 waiting'
new_folders['gnps_msms_hits_pos_status'] = '08 hold'
new_folders['gnps_msms_hits_neg_status'] = '08 hold'
new_folders['output_dir'] = outdir
new_folders['mzmine_parameter_sheet'] = 'params20190719_v2p39_IsotopeFilter_ADAP_DeDup'
if '_idx_' in parent_dir.lower():
new_folders['mzmine_parameter_row'] = 5
else:
new_folders['mzmine_parameter_row'] = 2
new_folders['conforming_filenames'] = True
new_folders['file_conversion_complete'] = True
cols = [c for c in new_folders.columns if c.endswith('_pos_status')]
new_folders.loc[new_folders['num_pos_files']==0,cols] = '12 not relevant'
cols = [c for c in new_folders.columns if c.endswith('_neg_status')]
new_folders.loc[new_folders['num_neg_files']==0,cols] = '12 not relevant'
if update_lims==True:
update_table_in_lims(new_folders,'untargeted_tasks',method='insert',max_size=1000)
return new_folders
def count_scans_by_class(x):
d = {}
for i in [0.4,0.5,0.7,0.9]:
d['MQScore_gte_%.1f'%i] = len(x.loc[x['MQScore']>=i,'#Scan#'].unique())
for i in [3,6,9,12,15]:
d['SharedPeaks_gte_%d'%i] = len(x.loc[x['SharedPeaks']>=i,'#Scan#'].unique())
return pd.Series(d, index=d.keys())
def get_gnps_hits(parent_dir,output_dir,polarity,status,override=False):
gnps_uuid_file = build_untargeted_filename(output_dir,
parent_dir,
polarity,
'gnps-uuid-fbmn')
gnps_zip_output_file = os.path.join(os.path.dirname(gnps_uuid_file),'%s_%s_gnps-download.zip'%(parent_dir,polarity))
if (os.path.isfile(gnps_uuid_file)) & ('complete' in status):
with open(gnps_uuid_file,'r') as fid:
url = fid.read()
gnps_uuid = url.split('=')[-1]
if (override==True) | (not os.path.isfile(gnps_zip_output_file)):
with zipfile.ZipFile(gnps_zip_output_file, 'r') as archive:
hits_file = [f for f in archive.namelist() if 'DB_result' in f]
if len(hits_file)>0:
hits_file = hits_file[-1]
hits_fid = archive.open(hits_file)
df = pd.read_csv(hits_fid,sep='\t')
return df
return None
def download_file_from_server_endpoint(server_endpoint,local_file_path):
response=requests.post(server_endpoint)
if response.status_code==200:
# Write the file contents in the response to a file specified by local_file_path
with open(local_file_path,'wb') as local_file:
for chunk in response.iter_content(chunk_size=128):
local_file.write(chunk)
else:
print('Can not do this one: %s'%os.path.basename(local_file_path))
def get_gnps_zipfile(parent_dir,output_dir,polarity,status,override=False):
gnps_uuid_file = build_untargeted_filename(output_dir,
parent_dir,
polarity,
'gnps-uuid-fbmn')
# print(gnps_uuid_file)
# print(polarity)
gnps_zip_output_file = os.path.join(os.path.dirname(gnps_uuid_file),'%s_%s_gnps-download.zip'%(parent_dir,polarity))
if (os.path.isfile(gnps_uuid_file)) & ('complete' in status):
if (override==True) | (not os.path.isfile(gnps_zip_output_file)):
with open(gnps_uuid_file,'r') as fid:
url = fid.read()
my_uuid = url.split('=')[-1]
gnps_url = "https://gnps.ucsd.edu/ProteoSAFe/DownloadResult?task=%s&view=download_cytoscape_data&show=true"%my_uuid
print(gnps_url)
print(gnps_zip_output_file)
download_file_from_server_endpoint(gnps_url, gnps_zip_output_file)
print('')
else:
print('FAIL',parent_dir,status)
# <batchstep method="net.sf.mzmine.modules.peaklistmethods.orderpeaklists.OrderPeakListsModule">
# <parameter name="Peak lists" type="BATCH_LAST_PEAKLISTS"/>
# </batchstep>
def melt_dataframe(ph,md):
# df.drop(columns=['filename','sample'],inplace=True)
# df.fillna(0.0,inplace=True)
feature_cols = ['feature_id','mz','rt']
var_cols = [c for c in ph.columns if c.endswith('mzML')]
df = ph.melt(id_vars=feature_cols,value_vars=var_cols)#,var_name='value')
# df = df[~pd.isna(df['value'])]
df['value'].fillna(0.0,inplace=True)
df['value'] = df['value'].astype(float)
df = pd.merge(df,md,left_on='variable',right_on='filename')
df.drop(columns=['variable','filename'],inplace=True)
df.reset_index(inplace=True,drop=True)
return df
def calc_background(df,background='exctrl',background_ratio=3.0):
exctrl = df[df['sampletype'].str.contains(background.lower())].groupby(['feature_id','sampletype'])['value'].max().reset_index()
sample = df[~df['sampletype'].str.contains(background.lower())].groupby(['feature_id','sampletype'])['value'].max().reset_index()
ratio_df = pd.merge(exctrl.add_suffix('_exctrl'),sample.add_suffix('_sample'),left_on='feature_id_exctrl',right_on='feature_id_sample')
ratio_df['ratio'] = ratio_df['value_sample']/(1+ratio_df['value_exctrl'])
good_features = ratio_df.loc[ratio_df['ratio']>background_ratio,'feature_id_sample'].unique()
all_features = ratio_df['feature_id_sample'].unique()
bad_features = list(set(all_features) - set(good_features))
rm_count = len(all_features) - len(good_features)
print('Please remove %d features out of %d'%(rm_count,len(all_features)))
return good_features,bad_features
#copy files here to keep I/O off low-performance filesystems
DATA_PATH = '/global/cscratch1/sd/bpb/raw_data'
#we don't need to request haswell on genepool partition of Cori
#remove this line
#SBATCH -C haswell
# /////////////////////////////////////////////////////////////////////
# /////////////////// REALTIME QUEUE SBATCH PARAMS ////////////////////
# # /////////////////////////////////////////////////////////////////////
SLURM_HEADER = """#!/bin/bash
#SBATCH -t 04:00:00
#SBATCH -C haswell
#SBATCH -N 1
#SBATCH --error="slurm.err"
#SBATCH --output="slurm.out"
#SBATCH -q realtime
#SBATCH -A m1541
#SBATCH --exclusive
module load java
"""
# /////////////////////////////////////////////////////////////////////
# # /////////////////// CORI BIGMEME QUEUE SBATCH PARAMS ////////////////////
# # /////////////////////////////////////////////////////////////////////
# SLURM_HEADER = """#!/bin/bash
# #SBATCH -N 1
# #SBATCH -A m1541
# #SBATCH -t 00:30:00
# #SBATCH --clusters=escori
# #SBATCH --qos=bigmem
# #SBATCH --job-name=my_big_job
# #SBATCH --mem=550GB
# """
# /////////////////////////////////////////////////////////////////////
# /////////////////// CORI REGULAR SBATCH PARAMS //////////////////////
# /////////////////////////////////////////////////////////////////////
# SLURM_HEADER = """#!/bin/bash
# #SBATCH -N 1 -c 64
# #SBATCH --exclusive
# #SBATCH --error="slurm.err"
# #SBATCH --output="slurm.out"
# #SBATCH --qos=genepool
# #SBATCH -A pkscell
# #SBATCH -t 24:00:00
# #SBATCH -L project
# """
#Alicia Clum: The best nodes we have right now are ExVivo, they are 1.5 Tb nodes and very fast you can submit there by changing to --qos=jgi_shared and adding -C skylake. Prior to submitting you must type "module load esslurm" since these nodes are controlled by a different scheduler.
# Set the python to this one:
#/global/common/software/m2650/mzmine_parameters/MZmine/MZmine-2.39/startMZmine_NERSC_Headless_Cori_exvivo.sh
# /////////////////////////////////////////////////////////////////////
# /////////////////// SKYLAKE 1.5TB QUEUE SBATCH PARAMS ///////////////
# /////////////////////////////////////////////////////////////////////
SLURM_BIGMEM_HEADER = """#!/bin/bash
#SBATCH -N 1
#SBATCH --exclusive
#SBATCH --error="slurm.err"
#SBATCH --output="slurm.out"
#SBATCH --qos=jgi_shared
#SBATCH -A pkscell
#SBATCH -C skylake
#SBATCH -t 8:00:00
#SBATCH -L project
"""
def calc_hit_vector(n,df):
"""
for 0,1,2 n will be 3
for 0,1,2,3 n will be 4
df is the count from true_positives
this function makes it a percentation of hits will sum n or more hits in last element
"""
m = np.zeros((n))
s = df['count']/df['count'].sum()
nf = df['num_features']
for i in s.index:
if (i>(len(m)-1)) & (len(m) >1):
m_idx = len(m)-1
else:
m_idx = nf[i]
m[m_idx] = m[m_idx] + s[i]
return m
def summarize_results(n,true_pos_filename,base_path,project_path,feature_table_extension,rt_column,mz_column,headerrows,sep,mz_tolerance,rt_tolerance):
"""
"""
path = os.path.join(base_path,project_path)
feature_file = glob.glob(os.path.join(path,'*%s'%feature_table_extension))
if len(feature_file)>0:
feature_file = feature_file[-1]
else:
return np.zeros(n),np.zeros(n), 0
new_path = os.path.join(path,'true_pos_results')
if not os.path.isdir(new_path):
os.mkdir(new_path)
new_basename = os.path.basename(feature_file).replace(feature_table_extension,'height.xlsx')
output_filename = os.path.join(new_path,new_basename)
if os.path.isfile(feature_file): #has the file been made already?
with open(feature_file,'r') as fid:
s = fid.read()
if len(s)>0: #does the file have anything in it?
df_experimental = pd.read_csv(feature_file,sep=sep,skiprows=headerrows)
if df_experimental.shape[0] > 0: #are there any rows?
if 'hilic' in feature_file.lower():
sheetname = 'HILIC_POS'
df_true_pos = pd.read_excel(true_pos_filename,sheet_name=sheetname)
else:
sheetname = 'CSH_POS'
df_true_pos = pd.read_excel(true_pos_filename,sheet_name=sheetname)
istd_count,bio_count,df_grouped,df_hits,total_count = prepare_true_positive_and_export(output_filename,df_experimental,df_true_pos,rt_column=rt_column,mz_column=mz_column,mz_tolerance=mz_tolerance,rt_tolerance=rt_tolerance)
return calc_hit_vector(n,istd_count), calc_hit_vector(n,bio_count), total_count.loc[0,'total']
return np.zeros(n),np.zeros(n), 0
def make_count_of_knowns(df_hits,df_true_pos):
df_grouped = df_hits[['true_pos_index','CompoundName_truepos','experimental_feature_idx']]
df_grouped.set_index(['CompoundName_truepos'],inplace=True)
df_grouped = df_grouped.groupby(['true_pos_index']).count()
df_grouped = pd.merge(df_grouped,df_true_pos,left_index=True,right_index=True,how='outer')
df_grouped.rename(columns={'experimental_feature_idx':'num_features'},inplace=True)
return df_grouped
def map_features_to_known(df_experimental,df_true_pos,rt_column='row retention time',mz_column='row m/z',mz_tolerance=0.01,rt_tolerance=0.1):
feature_array = df_experimental[[mz_column,rt_column]].values
reference_array = df_true_pos[['MZ','RT']].values
idx = np.isclose(feature_array[:,None,:], reference_array, rtol=0.0, atol=[mz_tolerance,rt_tolerance]).all(axis=2) #order is m/z, rt, polarity
feature_idx, reference_idx = np.where(idx)
df_hits = df_true_pos.loc[reference_idx].copy()
df_hits['experimental_feature_idx'] = feature_idx
df_hits = pd.merge(df_hits,df_true_pos,left_index=True,right_index=True,how='outer',suffixes=['_x','_truepos'])
df_hits.drop(columns=['%s_x'%c for c in df_true_pos.columns],inplace=True)
df_hits = pd.merge(df_hits,df_experimental,how='left',left_on='experimental_feature_idx',right_index=True,suffixes=['_truepos','_experimental'])
df_hits.index.name = 'true_pos_index'
df_hits.reset_index(inplace=True)
return df_hits
def summarize_count_per_type(df_grouped,cpd_type='ISTD'):
"""
cpd_type is either 'ISTD' or 'TargetCPD'
"""
cpd_count = df_grouped[df_grouped['Type']==cpd_type][['num_features','MZ']].groupby('num_features').count()
cpd_count.reset_index(inplace=True)
cpd_count.rename(columns={'MZ':'count'},inplace=True)
return cpd_count
def prepare_true_positive_and_export(output_filename,df_experimental,df_true_pos,rt_column='row retention time',mz_column='row m/z',mz_tolerance=0.01,rt_tolerance=0.1):
df_hits = map_features_to_known(df_experimental,df_true_pos,rt_column=rt_column,mz_column=mz_column,mz_tolerance=0.01,rt_tolerance=0.1)
df_grouped = make_count_of_knowns(df_hits,df_true_pos)
istd_count = summarize_count_per_type(df_grouped,cpd_type='ISTD')
bio_count = summarize_count_per_type(df_grouped,cpd_type='TargetCPD')
params = pd.DataFrame(columns=['mz_tolerance','rt_tolerance'],data=[[mz_tolerance,rt_tolerance]])
total_count = pd.DataFrame(columns=['total'],data=[[df_experimental.shape[0]]])
# Create a Pandas Excel writer using XlsxWriter as the engine.
writer = pd.ExcelWriter(output_filename, engine='xlsxwriter')
# Write each dataframe to a different worksheet.
params.to_excel(writer, sheet_name='params')
istd_count.to_excel(writer, sheet_name='istd_count')
bio_count.to_excel(writer, sheet_name='bio_count')
df_grouped.to_excel(writer, sheet_name='df_grouped')
df_hits.to_excel(writer, sheet_name='df_hits')
total_count.to_excel(writer, sheet_name='total')
return istd_count,bio_count,df_grouped,df_hits,total_count
# Close the Pandas Excel writer and output the Excel file.
# writer.save()
def mzmine_xml_to_csv(xml_file,csv_file=None,pop_input_files=True,return_df=True):
"""
given an xml file, turn it into a csv
optionally return either a dict of the steps or a dataframe of the steps
"""
with open(xml_file,'r') as fid:
xml_str = fid.read()
d = xml_to_dict(xml_str)
# t = dict_to_etree(d)
# indent_tree(t)
# s1 = tree_to_xml(t)
# pop out the files
if pop_input_files==True:
raw_data_import = d['batch']['batchstep'].pop(0)
original_file_list = raw_data_import['parameter']['file']
# This is a dict representation of all the steps
dflat = flatten(d,enumerate_types=(list,))
# This is a tabular representation of all the steps
df = pd.DataFrame([(k,v) for (k,v) in dflat.items()],columns=['parameter','value']).sort_values('parameter').set_index('parameter',drop=True)
if csv_file is not None:
df.to_csv(csv_file)
if return_df==True:
return df #return the dataframe of the steps
else:
return dflat #return the dict of the steps
def make_task_and_job(params):#basedir,basename,polarity,files):
if not os.path.exists(params['basedir']):
os.mkdir(params['basedir'])
xml_str = get_batch_file_template()
d = xml_to_dict(xml_str)
# # Initialize the task and give it values from the user supplied form
task = metob.MZMineTask()
task.polarity = params['polarity']
task.lcmsruns = params['files']
task.min_peak_duration = params['min_peak_duration']
task.max_peak_duration = params['max_peak_duration']
task.rt_tol_perfile = params['rt_tol_perfile']
task.rt_tol_multifile = params['rt_tol_multifile']
task.min_num_scans = params['min_num_scans']
task.smoothing_scans = params['smoothing_scans']
task.group_intensity_threshold = params['group_intensity_threshold']
task.min_peak_height = params['min_peak_height']
task.ms1_noise_level = params['ms1_noise_level']
task.ms2_noise_level = params['ms2_noise_level']
task.mz_tolerance = params['mz_tolerance']
task.peak_to_valley_ratio = params['peak_to_valley_ratio']
task.min_rt = params['min_rt']
task.max_rt = params['max_rt']
task.representative_isotope = params['representative_isotope']
task.remove_isotopes = params['remove_isotopes']
task.min_peaks_in_row = params['min_peaks_in_row']
task.peak_with_msms = params['peak_with_msms']
task.chromatographic_threshold = params['chromatographic_threshold']
task.search_for_minimum_rt_range = params['search_for_minimum_rt_range']
task.minimum_relative_height = params['minimum_relative_height']
task.mz_range_scan_pairing = params['mz_range_scan_pairing']
task.rt_range_scan_pairing = params['rt_range_scan_pairing']
task.gapfill_intensity_tolerance = params['gapfill_intensity_tolerance']
task.output_csv_height = os.path.join(params['basedir'],'%s_%s_peak_height.csv'%(params['basename'],task.polarity))
task.output_csv_area = os.path.join(params['basedir'],'%s_%s_peak_area.csv'%(params['basename'],task.polarity))
task.output_workspace = os.path.join(params['basedir'],'%s_%s.mzmine'%(params['basename'],task.polarity))
task.output_mgf = os.path.join(params['basedir'],'%s_%s.mgf'%(params['basename'],task.polarity))
task.input_xml = os.path.join(params['basedir'],'logs','%s_%s.xml'%(params['basename'],task.polarity))
task.mzmine_launcher = get_latest_mzmine_binary(version=params['mzmine_version'])
new_d = replace_files(d,params['files'])
new_d = configure_crop_filter(new_d,task.polarity,params['files'],min_rt=task.min_rt,max_rt=task.max_rt)
new_d = configure_mass_detection(new_d,task.ms1_noise_level,task.ms2_noise_level)
new_d = configure_chromatogram_builder(new_d,task.min_num_scans,task.group_intensity_threshold,task.min_peak_height,task.mz_tolerance)
new_d = configure_smoothing(new_d,task.smoothing_scans)
new_d = configure_peak_deconvolution(new_d,
task.min_peak_height,
task.minimum_relative_height,
task.search_for_minimum_rt_range,
task.chromatographic_threshold,
task.peak_to_valley_ratio,
task.min_peak_duration,
task.max_peak_duration)
new_d = configure_isotope_search(new_d,
task.mz_tolerance,
task.rt_tol_perfile,
task.representative_isotope,
task.remove_isotopes,
task.polarity)
new_d = configure_join_aligner(new_d,task.mz_tolerance,task.rt_tol_multifile)
new_d = configure_gap_filling(new_d,task.mz_tolerance,task.rt_tol_multifile,task.gapfill_intensity_tolerance)
new_d = configure_rows_filter(new_d,task.min_peaks_in_row,task.peak_with_msms)
new_d = configure_output(new_d,
task.output_csv_height,
task.output_csv_area,
task.output_workspace,
task.output_mgf)
t = dict_to_etree(new_d)
indent_tree(t)
xml_batch_str = tree_to_xml(t,filename=task.input_xml)
job_runner = '%s %s'%(task.mzmine_launcher,task.input_xml)
return job_runner
def create_job_script(m):
"""
This is the first function that runs when a user initializes a new untargeted workflow
"""
#setup directories
if not os.path.isdir(m['basedir']):
os.mkdir(m['basedir'])
dirs_to_make = ['job_scripts','logs','intermediate_results','%s_%s'%(m['basename'],m['polarity'])]
for d in dirs_to_make:
if not os.path.isdir(os.path.join(m['basedir'],d)):
os.mkdir(os.path.join(m['basedir'],d))
job_cmd = make_task_and_job(m)#['basedir'],m['basename'],m['polarity'],m['files'])
sbatch_file_name = os.path.join(m['basedir'],'job_scripts','%s_%s.sbatch'%(m['basename'],m['polarity']))
denovo_sbatch_file_name = os.path.join(m['basedir'],'job_scripts','%s_%s_denovo.sbatch'%(m['basename'],m['polarity']))
err_file_name = os.path.join(m['basedir'],'logs','%s_%s.err'%(m['basename'],m['polarity']))
out_file_name = os.path.join(m['basedir'],'logs','%s_%s.out'%(m['basename'],m['polarity']))
# job_cmd_filtered = make_targeted_mzmine_job(m['basedir'],m['basename'],m['polarity'],m['files'])
params_filename = os.path.join(m['basedir'],'logs','%s_%s_params.json'%(m['basename'],m['polarity']))
new_params_filename = os.path.join(m['basedir'],'logs','%s_%s_params-used.json'%(m['basename'],m['polarity']))
copy_params_command = "cp '%s' '%s'"%(params_filename,new_params_filename)
with open(sbatch_file_name,'w') as fid:
fid.write('%s\n'%SLURM_HEADER.replace('slurm.err',err_file_name).replace('slurm.out',out_file_name))
fid.write('%s\n'%copy_params_command)
fid.write('%s\n'%job_cmd)
# bad_words = ['qos', '-p','-C','-L','-t','-N']
# bad_time = '#SBATCH -t 24:00:00'
# good_time = '#SBATCH -t 24:00:00\n'
# bad_node = '-N 1 -c 64'
# good_node = '#SBATCH -N 1 -c 64\n'
# with open(sbatch_file_name) as oldfile, open(denovo_sbatch_file_name, 'w') as newfile:
# for line in oldfile:
# if not any(bad_word in line for bad_word in bad_words):
# newfile.write(line)
# if bad_time in line:
# newfile.write(good_time)
# if bad_node in line:
# newfile.write(good_node)
# newfile.write('#SBATCH --mem=494G\n')
return sbatch_file_name
#####################################################
#####################################################
######## mzmine setup scripts ########
#####################################################
#####################################################
def remove_duplicate_files(files):
file_names = []
unique_files = []
for f in files:
if not f.name in file_names:
unique_files.append(f.mzml_file)
file_names.append(f.name)
return unique_files
def get_files(groups,filename_substring,file_filters,keep_strings,is_group=False,return_mzml=True):
"""
if is_group is False, gets files from the experiment/folder name and filters with file_filters
if is_group is True, gets files from the metatlas group name and filters with file_filters
"""
for i,g in enumerate(groups):
if is_group == True:
# get files as a metatlas group
groups = dp.select_groups_for_analysis(name = g,do_print=False,
most_recent = True,
remove_empty = True,
include_list = [], exclude_list = file_filters)#['QC','Blank'])
new_files = []
for each_g in groups:
for f in each_g.items:
new_files.append(f)
else:
new_files = metob.retrieve('Lcmsruns',experiment=g,name=filename_substring,username='*')
if i == 0:
all_files = new_files
else:
all_files.extend(new_files)
if len(new_files) == 0:
print('##### %s has ZERO files!'%g)
# only keep files that don't have substrings in list
if len(file_filters) > 0:
for i,ff in enumerate(file_filters):
if i == 0:
files = [f for f in all_files if not ff in f.name]
else:
files = [f for f in files if not ff in f.name]
else:
files = all_files
# kick out any files that don't match atleast one of the keep_strings
keep_this = []
filter_used = [] #good to keep track if a filter isn't used. likely a typo
if len(keep_strings) > 0:
for i,ff in enumerate(files):
keep_this.append(any([True if f in ff.name else False for f in keep_strings]))
for i,ff in enumerate(keep_strings):
filter_used.append(any([True if ff in f.name else False for f in files]))
if not all(filter_used):
for i,f in enumerate(filter_used):
if f==False:
print('%s keep string is not used'%keep_strings[i])
files = [files[i] for i,j in enumerate(keep_this) if j==True]
files = remove_duplicate_files(files)
return files
def make_targeted_mzmine_job(basedir,basename,polarity,files):
if not os.path.exists(basedir):
os.mkdir(basedir)
xml_str = get_targeted_batch_file_template()
d = xml_to_dict(xml_str)
task = metob.MZMineTask()
task.polarity = polarity
task.lcmsruns = files
new_d = replace_files(d,files)
project_name = '%s_%s'%(basename,task.polarity)
task.output_workspace = os.path.join(basedir,project_name,'%s_%s.mzmine'%(basename,task.polarity))
task.input_xml = os.path.join(basedir,'logs','%s_%s_filtered.xml'%(basename,task.polarity))
task.mzmine_launcher = get_latest_mzmine_binary()
# new_d = configure_crop_filter(new_d,task.polarity,files)
# new_d = configure_targeted_peak_detection(new_d,peak_list_filename,intensity_tolerance=1e-4,noise_level=1e4,mz_tolerance=20,rt_tolerance=0.5)
new_d = configure_workspace_output(new_d,task.output_workspace)
t = dict_to_etree(new_d)
indent_tree(t)
xml_batch_str = tree_to_xml(t,filename=task.input_xml)
job_runner = '%s %s'%(task.mzmine_launcher,task.input_xml)
return job_runner
def configure_targeted_peak_detection(new_d,peak_list_filename,intensity_tolerance=1e-4,noise_level=1e4,mz_tolerance=20,rt_tolerance=0.5):
"""
Name suffix: Suffix to be added to the peak list name.
Peak list file: Path of the csv file containing the list of peaks to be detected. The csv file should have three columns.
The first column should contain the expected M/Z, the second column the expected RT and the third the peak name. Each peak should be in a different row.
Field separator: Character(s) used to separate fields in the peak list file.
Ignore first line: Check to ignore the first line of peak list file.
Intensity tolerance: This value sets the maximum allowed deviation from expected shape of a peak in chromatographic direction.
Noise level: The minimum intensity level for a data point to be considered part of a chromatogram. All data points below this intensity level are ignored.
MZ Tolerance: Maximum allowed m/z difference to find the peak
RT tolerance: Maximum allowed retention time difference to find the peak
"""
# Set the noise floor
idx = [i for i,d in enumerate(new_d['batch']['batchstep']) if 'TargetedPeakDetectionModule' in d['@method']][0]
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'Peak list file' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['#text'] = '%s'%peak_list_filename
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'm/z tolerance' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['ppmtolerance'] = '%.3f'%(mz_tolerance)
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'Intensity tolerance' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['#text'] = '%.6f'%(intensity_tolerance)
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'Noise level' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['#text'] = '%.6f'%(noise_level)
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'Retention time tolerance' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['#text'] = '%.6f'%(rt_tolerance)
return new_d
def configure_crop_filter(new_d,polarity,files,min_rt=0.01,max_rt=100,fps_string='FPS'):
"""
"""
# identify the element for this change
idx = [i for i,d in enumerate(new_d['batch']['batchstep']) if 'CropFilterModule' in d['@method']][0]
# Set the filter string
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'Raw data files' in d['@name']][0]
if any([fps_string in f for f in files]):
new_d['batch']['batchstep'][idx]['parameter'][idx2]['name_pattern'] = '*FPS*'
else:
new_d['batch']['batchstep'][idx]['parameter'][idx2]['name_pattern'] = '*'
# Set the polarity
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'Scans' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['polarity'] = polarity.upper()
#set the rt min and rt max use the same idx2 as polarity
new_d['batch']['batchstep'][idx]['parameter'][idx2]['retention_time'] = {'max':'%.4f'%max_rt,'min':'%.4f'%min_rt}
# new_d['batch']['batchstep'][idx]['parameter'][idx2]['ms_level'] = '1-2'
return new_d
def configure_mass_detection(new_d,ms1_noise_level=1e4,ms2_noise_level=1e2):
"""
"""
# Find the module
idx = [i for i,d in enumerate(new_d['batch']['batchstep']) if 'MassDetectionModule' in d['@method']]
#The first idx will be for MS1 and the second will be for MS2
# Set the MS1 attributes
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx[0]]['parameter']) if 'Mass detector' in d['@name']][0]
idx3 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx[0]]['parameter'][idx2]['module']) if 'Centroid' in d['@name']][0]
new_d['batch']['batchstep'][idx[0]]['parameter'][idx2]['module'][idx3]['parameter']['#text'] = '%.2f'%(ms1_noise_level)
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx[0]]['parameter']) if 'Scans' in d['@name']][0]
new_d['batch']['batchstep'][idx[0]]['parameter'][idx2]['ms_level'] = '1'
# Set the MS2 attributes
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx[1]]['parameter']) if 'Mass detector' in d['@name']][0]
idx3 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx[1]]['parameter'][idx2]['module']) if 'Centroid' in d['@name']][0]
new_d['batch']['batchstep'][idx[1]]['parameter'][idx2]['module'][idx3]['parameter']['#text'] = '%.2f'%(ms2_noise_level)
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx[1]]['parameter']) if 'Scans' in d['@name']][0]
new_d['batch']['batchstep'][idx[1]]['parameter'][idx2]['ms_level'] = '2'
return new_d
def configure_smoothing(new_d,smoothing_scans):
"""
# <batchstep method="net.sf.mzmine.modules.peaklistmethods.peakpicking.smoothing.SmoothingModule">
# <parameter name="Peak lists" type="BATCH_LAST_PEAKLISTS"/>
# <parameter name="Filename suffix">smoothed</parameter>
# <parameter name="Filter width">9</parameter>
# <parameter name="Remove original peak list">false</parameter>
# </batchstep>
"""
idx = [i for i,d in enumerate(new_d['batch']['batchstep']) if 'SmoothingModule' in d['@method']][0]
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'Filter width' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['#text'] = '%.3f'%(smoothing_scans)
return new_d
def configure_chromatogram_builder(new_d,min_num_scans,group_intensity_threshold,min_peak_height,mz_tolerance):
"""
new_d = configure_chromatogram_builder(new_d,task.min_num_scans,task.group_intensity_threshold,task.min_peak_height,task.mz_tolerance)
# <batchstep method="net.sf.mzmine.modules.masslistmethods.ADAPchromatogrambuilder.ADAPChromatogramBuilderModule">
# <parameter name="Raw data files" type="ALL_FILES"/>
# <parameter name="Scans">
# <ms_level>1</ms_level>
# </parameter>
# <parameter name="Mass list">masses</parameter>
# <parameter name="Min group size in # of scans">5</parameter>
# <parameter name="Group intensity threshold">1000000.0</parameter>
# <parameter name="Min highest intensity">80000.0</parameter>
# <parameter name="m/z tolerance">
# <absolutetolerance>0.002</absolutetolerance>
# <ppmtolerance>7.0</ppmtolerance>
# </parameter>
# <parameter name="Suffix">chromatograms</parameter>
# </batchstep>
"""
idx = [i for i,d in enumerate(new_d['batch']['batchstep']) if 'ADAPChromatogramBuilderModule' in d['@method']][0]
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'Min group size' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['#text'] = '%.3f'%(min_num_scans)
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'Group intensity threshold' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['#text'] = '%.3f'%(group_intensity_threshold)
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'Min highest intensity' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['#text'] = '%.3f'%(min_peak_height)
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'm/z tolerance' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['ppmtolerance'] = '%.3f'%(mz_tolerance)
return new_d
def configure_adap_peak_deconvolution(new_d,min_peak_height,minimum_relative_height,search_for_minimum_rt_range,chromatographic_threshold,min_sn_ratio,min_peak_duration,max_peak_duration):
"""
<parameter name="Algorithm" selected="Wavelets (ADAP)">
<module name="Wavelets (ADAP)">
<parameter name="S/N threshold">3.0</parameter>
<parameter name="S/N estimator" selected="Intensity window SN">
<module name="Intensity window SN"/>
<module name="Wavelet Coeff. SN">
<parameter name="Peak width mult.">1.0</parameter>
<parameter name="abs(wavelet coeffs.)">true</parameter>
</module>
</parameter>
<parameter name="min feature height">4500.0</parameter>
<parameter name="coefficient/area threshold">60.0</parameter>
<parameter name="Peak duration range">
<min>0.0</min>
<max>0.5</max>
</parameter>
<parameter name="RT wavelet range">
<min>0.0</min>
<max>0.1</max>
</parameter>
</module>
"""
idx = [i for i,d in enumerate(new_d['batch']['batchstep']) if 'DeconvolutionModule' in d['@method']][0]
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'Algorithm' in d['@name']][0]
idx3 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter'][idx2]['module']) if 'Local minimum search' in d['@name']][0]
idx4 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter'][idx2]['module'][idx3]['parameter']) if 'Chromatographic threshold' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['module'][idx3]['parameter'][idx4]['#text'] = '%.3f'%chromatographic_threshold
idx4 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter'][idx2]['module'][idx3]['parameter']) if 'Search minimum in RT range (min)' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['module'][idx3]['parameter'][idx4]['#text'] = '%.3f'%search_for_minimum_rt_range
idx4 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter'][idx2]['module'][idx3]['parameter']) if 'Minimum relative height' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['module'][idx3]['parameter'][idx4]['#text'] = '%.3f'%minimum_relative_height
idx4 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter'][idx2]['module'][idx3]['parameter']) if 'Minimum absolute height' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['module'][idx3]['parameter'][idx4]['#text'] = '%.3f'%min_peak_height
idx4 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter'][idx2]['module'][idx3]['parameter']) if 'Min ratio of peak top/edge' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['module'][idx3]['parameter'][idx4]['#text'] = '%.3f'%min_sn_ratio
idx4 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter'][idx2]['module'][idx3]['parameter']) if 'Peak duration range (min)' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['module'][idx3]['parameter'][idx4]['min'] = '%.3f'%min_peak_duration
new_d['batch']['batchstep'][idx]['parameter'][idx2]['module'][idx3]['parameter'][idx4]['max'] = '%.3f'%max_peak_duration
return new_d
def configure_lms_peak_deconvolution(new_d,min_peak_height,minimum_relative_height,search_for_minimum_rt_range,chromatographic_threshold,min_sn_ratio,min_peak_duration,max_peak_duration):
"""
<parameter name="Algorithm" selected="Local minimum search">
<module name="Local minimum search">
<parameter name="Chromatographic threshold">0.75</parameter>
<parameter name="Search minimum in RT range (min)">0.02</parameter>
<parameter name="Minimum relative height">0.002</parameter>
<parameter name="Minimum absolute height">90000.0</parameter>
<parameter name="Min ratio of peak top/edge">1.03</parameter>
<parameter name="Peak duration range (min)">
<min>0.03</min>
<max>1.0</max>
</parameter>
</module>
"""
idx = [i for i,d in enumerate(new_d['batch']['batchstep']) if 'DeconvolutionModule' in d['@method']][0]
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'Algorithm' in d['@name']][0]
idx3 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter'][idx2]['module']) if 'Local minimum search' in d['@name']][0]
idx4 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter'][idx2]['module'][idx3]['parameter']) if 'Chromatographic threshold' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['module'][idx3]['parameter'][idx4]['#text'] = '%.3f'%chromatographic_threshold
idx4 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter'][idx2]['module'][idx3]['parameter']) if 'Search minimum in RT range (min)' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['module'][idx3]['parameter'][idx4]['#text'] = '%.3f'%search_for_minimum_rt_range
idx4 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter'][idx2]['module'][idx3]['parameter']) if 'Minimum relative height' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['module'][idx3]['parameter'][idx4]['#text'] = '%.3f'%minimum_relative_height
idx4 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter'][idx2]['module'][idx3]['parameter']) if 'Minimum absolute height' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['module'][idx3]['parameter'][idx4]['#text'] = '%.3f'%min_peak_height
idx4 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter'][idx2]['module'][idx3]['parameter']) if 'Min ratio of peak top/edge' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['module'][idx3]['parameter'][idx4]['#text'] = '%.3f'%min_sn_ratio
idx4 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter'][idx2]['module'][idx3]['parameter']) if 'Peak duration range (min)' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['module'][idx3]['parameter'][idx4]['min'] = '%.3f'%min_peak_duration
new_d['batch']['batchstep'][idx]['parameter'][idx2]['module'][idx3]['parameter'][idx4]['max'] = '%.3f'%max_peak_duration
return new_d
def configure_isotope_search(new_d,mz_tolerance,rt_tol_perfile,representative_isotope,remove_isotopes,polarity):
"""
"""
idx = [i for i,d in enumerate(new_d['batch']['batchstep']) if 'Isotope' in d['@method']][0]
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'm/z tolerance' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['ppmtolerance'] = '%.3f'%(mz_tolerance)
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'Retention time tolerance' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['#text'] = '%.3f'%(rt_tol_perfile)
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'Representative isotope' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['#text'] = '%s'%(representative_isotope)
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'Remove original peaklist' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['#text'] = '%s'%(str(remove_isotopes).lower())
idx = [i for i,d in enumerate(new_d['batch']['batchstep']) if 'Adduct' in d['@method']][0]
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'm/z tolerance' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['ppmtolerance'] = '%.3f'%(mz_tolerance)
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'RT tolerance' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['#text'] = '%.3f'%(rt_tol_perfile)
if polarity == 'negative':
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'Adducts' in d['@name']][0]
#the default is setup for positive mode adducts.
#only change them if you are in negative mode
for i,a in enumerate(new_d['batch']['batchstep'][idx]['parameter'][idx2]['adduct']):
if a['@selected'] == 'true':
new_d['batch']['batchstep'][idx]['parameter'][idx2]['adduct'][i]['@selected'] = 'false'
else:
new_d['batch']['batchstep'][idx]['parameter'][idx2]['adduct'][i]['@selected'] = 'true'
return new_d
def configure_join_aligner(new_d,mz_tolerance,rt_tol_multifile):
"""
# Join aligner has these scores:
# <parameter name="Minimum absolute intensity">3000.0</parameter>
# <parameter name="Minimum score">0.6</parameter>
"""
idx = [i for i,d in enumerate(new_d['batch']['batchstep']) if 'JoinAlignerModule' in d['@method']][0]
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'm/z tolerance' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['ppmtolerance'] = '%.3f'%(mz_tolerance)
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'Retention time tolerance' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['#text'] = '%.3f'%(rt_tol_multifile)
# idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'Minimum absolute intensity' in d['@name']][0]
# new_d['batch']['batchstep'][idx]['parameter'][idx2]['#text'] = 3000#'%.3f'%(mz_tolerance)
# idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'Minimum score' in d['@name']][0]
# new_d['batch']['batchstep'][idx]['parameter'][idx2]['#text'] = 0.6#'%.3f'%(rt_tol_multifile)
return new_d
def configure_rows_filter(new_d,min_peaks_in_row,peak_with_msms):
"""
"""
idx = [i for i,d in enumerate(new_d['batch']['batchstep']) if 'RowsFilterModule' in d['@method']][0]
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'Minimum peaks in a row' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['#text'] = '%d'%min_peaks_in_row
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'Minimum peaks in an isotope pattern' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['#text'] = '%d'%min_peaks_in_row
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'Keep only peaks with MS2 scan (GNPS)' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['#text'] = '%d'%peak_with_msms
return new_d
def configure_duplicate_filter(new_d,mz_tolerance,rt_tol_perfile):
"""
"""
idx = [i for i,d in enumerate(new_d['batch']['batchstep']) if 'DuplicateFilterModule' in d['@method']][0]
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'm/z tolerance' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['ppmtolerance'] = '%.3f'%(mz_tolerance)
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'RT tolerance' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['#text'] = '%.3f'%(rt_tol_perfile)
return new_d
def configure_gap_filling(new_d,mz_tolerance,gapfill_intensity_tolerance,rt_tol_multifile):
"""
# <batchstep method="net.sf.mzmine.modules.peaklistmethods.gapfilling.peakfinder.multithreaded.MultiThreadPeakFinderModule">
# <parameter name="Peak lists" type="BATCH_LAST_PEAKLISTS"/>
# <parameter name="Name suffix">gap-filled</parameter>
# <parameter name="Intensity tolerance">0.05</parameter>
# <parameter name="m/z tolerance">
# <absolutetolerance>0.001</absolutetolerance>
# <ppmtolerance>5.0</ppmtolerance>
# </parameter>
# <parameter name="Retention time tolerance" type="absolute">0.03</parameter>
# <parameter name="Remove original peak list">false</parameter>
# </batchstep>
"""
idx = [i for i,d in enumerate(new_d['batch']['batchstep']) if 'gapfilling.peakfinder' in d['@method']][0]
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'Intensity tolerance' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['#text'] = '%.3f'%(gapfill_intensity_tolerance)
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'Retention time tolerance' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['#text'] = '%.3f'%(rt_tol_multifile)
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'm/z tolerance' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['ppmtolerance'] = '%.3f'%(mz_tolerance)
return new_d
def configure_output(new_d,output_csv_height,output_csv_area,output_workspace,output_mgf):
"""
"""
idx = [i for i,d in enumerate(new_d['batch']['batchstep']) if 'CSVExportModule' in d['@method']]
#the first will be height the second will be area
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx[0]]['parameter']) if 'Filename' in d['@name']][0]
new_d['batch']['batchstep'][idx[0]]['parameter'][idx2]['#text'] = output_csv_height
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx[1]]['parameter']) if 'Filename' in d['@name']][0]
new_d['batch']['batchstep'][idx[1]]['parameter'][idx2]['#text'] = output_csv_area
idx = [i for i,d in enumerate(new_d['batch']['batchstep']) if 'GNPSExportModule' in d['@method']][0]
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'Filename' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['#text'] = output_mgf
idx = [i for i,d in enumerate(new_d['batch']['batchstep']) if 'ProjectSaveAsModule' in d['@method']][0]
new_d['batch']['batchstep'][idx]['parameter']['#text'] = output_workspace
return new_d
def configure_csv_output(new_d,output_csv):
"""
"""
idx = [i for i,d in enumerate(new_d['batch']['batchstep']) if 'CSVExportModule' in d['@method']][0]
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'Filename' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['#text'] = output_csv
return new_d
def indent_tree(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent_tree(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def get_targeted_batch_file_template(loc='do_not_change_batch_file_targeted_peak_list.xml'):
"""
return string of text from the template batch file
"""
with open(os.path.join(BATCH_FILE_PATH,loc),'r') as fid:
file_text = fid.read()
return file_text
def get_batch_file_template(loc='bootcamp_adap_template.xml'):
"""
return string of text from the template batch file
"""
with open(os.path.join(BATCH_FILE_PATH,loc),'r') as fid:
file_text = fid.read()
return file_text
def get_latest_mzmine_binary(system='Cori',version='most_recent'):
"""
Returns the path to the mzmine launch script.
Default is most recent. Alternatively specify the folder containng version you want
for example:
version='MZmine-2.23'
will use the launch script in that folder
wget $(curl -s https://api.github.com/repos/mzmine/mzmine2/releases/v2.33 | grep 'browser_' | cut -d\" -f4) -O mzmine_latest.zip
# To setup the most recent mzmine binary, follow these steps
cd /project/projectdirs/metatlas/projects/mzmine_parameters/MZmine
wget $(curl -s https://api.github.com/repos/mzmine/mzmine2/releases/latest | grep 'browser_' | cut -d\" -f4) -O mzmine_latest.zip
unzip mzmine_latest.zip
# change directories into latest mzmine download
# cd MZmine-XXXX
cp ../MZmine-2.24/startMZmine_NERSC_* .
cd /project/projectdirs/metatlas/projects/
chgrp -R metatlas mzmine_parameters
chmod -R 770 mzmine_parameters
"""
mzmine_versions = glob.glob(os.path.join(BINARY_PATH,'*' + os.path.sep))
if version == 'most_recent':
most_recent = sorted([os.path.basename(m) for m in mzmine_versions if 'MZmine-' in m])[-1]
else:
most_recent = [m.split(os.path.sep)[-2] for m in mzmine_versions if version in m][-1]
launch_script = os.path.join(os.path.join(BINARY_PATH,most_recent),'startMZmine_NERSC_Headless_%s.sh'%system)
if os.path.isfile(launch_script):
return launch_script
else:
print('See the docstring, the launch script seems to be missing.')
def replace_files(d,file_list):
"""
Replace files for mzmine task
Inputs:
d: an xml derived dictionary of batch commands
file_list: a list of full paths to mzML files
Outputs:
d: an xml derived dict with new files in it
"""
for i,step in enumerate(d['batch']['batchstep']):
if 'RawDataImportModule' in step['@method']:
d['batch']['batchstep'][i]['parameter']['file'] = file_list
return d
def tree_to_xml(t,filename=None):
"""
"""
xml_str = ET.tostring(t)
if filename:
with open(filename,'w') as fid:
fid.write(xml_str)
return xml_str
def dict_to_etree(d):
"""
Convert a python dictionary to an xml str
http://stackoverflow.com/questions/7684333/converting-xml-to-dictionary-using-elementtree
Example:
from collections import defaultdict
from xml.etree import cElementTree as ET
try:
basestring
except NameError: # python3
basestring = str
#d is a python dictionary
ET.tostring(dict_to_etree(d))
"""
def _to_etree(d, root):
print(type(d),d)
print('\n\n\n')
if not d:
pass
if type(d) is {}.values().__class__:
d = list(d.values)
if isinstance(d, str):
root.text = d
elif isinstance(d, dict):
for k,v in d.items():
assert isinstance(k, str)
if k.startswith('#'):
assert k == '#text' and isinstance(v, str)
root.text = v
elif k.startswith('@'):
assert isinstance(v, str)
root.set(k[1:], v)
elif isinstance(v, list):
for e in v:
_to_etree(e, ET.SubElement(root, k))
else:
_to_etree(v, ET.SubElement(root, k))
# elif isinstance(d,dict_values):
# d = [d]
# _to_etree(d,ET.SubElement(root, k))
else: assert d == 'invalid type', (type(d), d)
assert isinstance(d, dict) and len(d) == 1
tag, body = next(iter(d.items()))
node = ET.Element(tag)
_to_etree(body, node)
return node
def xml_to_dict(xml_str):
"""
Convert an xml file into a python dictionary.
http://stackoverflow.com/questions/7684333/converting-xml-to-dictionary-using-elementtree
Example:
from xml.etree import cElementTree as ET
filename = '/global/homes/b/bpb/batch_params/xmlfile.xml'
with open(filename,'r') as fid:
xml_str = fid.read()
d = xml_to_dict(xml_str)
"""
t = ET.XML(xml_str)
d = etree_to_dict(t)
return d
def etree_to_dict(t):
"""
Convert an xml tree into a python dictionary.
http://stackoverflow.com/questions/7684333/converting-xml-to-dictionary-using-elementtree
"""
d = {t.tag: {} if t.attrib else None}
children = list(t)
if children:
dd = defaultdict(list)
for dc in map(etree_to_dict, children):
for k, v in six.iteritems(dc):
dd[k].append(v)
d = {t.tag: {k:v[0] if len(v) == 1 else v for k, v in six.iteritems(dd)}}
if t.attrib:
d[t.tag].update(('@' + k, v) for k, v in six.iteritems(t.attrib))
if t.text:
text = t.text.strip()
if children or t.attrib:
if text:
d[t.tag]['#text'] = text
else:
d[t.tag] = text
return d
##########################################################
#### From Here ###########################################
### https://github.com/ianlini/flatten-dict ##############
##########################################################
##########################################################
def tuple_reducer(k1, k2):
if k1 is None:
return (k2,)
else:
return k1 + (k2,)
def path_reducer(k1, k2):
import os.path
if k1 is None:
return k2
else:
return os.path.join(k1, k2)
def tuple_splitter(flat_key):
return flat_key
def path_splitter(flat_key):
keys = PurePath(flat_key).parts
return keys
REDUCER_DICT = {
'tuple': tuple_reducer,
'path': path_reducer,
}
SPLITTER_DICT = {
'tuple': tuple_splitter,
'path': path_splitter,
}
def flatten(d, reducer='tuple', inverse=False, enumerate_types=()):
"""Flatten `Mapping` object.
Parameters
----------
d : dict-like object
The dict that will be flattened.
reducer : {'tuple', 'path', Callable}
The key joining method. If a `Callable` is given, the `Callable` will be
used to reduce.
'tuple': The resulting key will be tuple of the original keys.
'path': Use `os.path.join` to join keys.
inverse : bool
Whether you want invert the resulting key and value.
enumerate_types : Sequence[type]
Flatten these types using `enumerate`.
For example, if we set `enumerate_types` to ``(list,)``,
`list` indices become keys: ``{'a': ['b', 'c']}`` -> ``{('a', 0): 'b', ('a', 1): 'c'}``.
Returns
-------
flat_dict : dict
"""
enumerate_types = tuple(enumerate_types)
flattenable_types = (Mapping,) + enumerate_types
if not isinstance(d, flattenable_types):
raise ValueError("argument type %s is not in the flattenalbe types %s"
% (type(d), flattenable_types))
if isinstance(reducer, str):
reducer = REDUCER_DICT[reducer]
flat_dict = {}
def _flatten(d, parent=None):
key_value_iterable = enumerate(d) if isinstance(d, enumerate_types) else six.viewitems(d)
for key, value in key_value_iterable:
flat_key = reducer(parent, key)
if isinstance(value, flattenable_types):
_flatten(value, flat_key)
else:
if inverse:
flat_key, value = value, flat_key
if flat_key in flat_dict:
raise ValueError("duplicated key '{}'".format(flat_key))
flat_dict[flat_key] = value
_flatten(d)
return flat_dict
# def nested_set_dict(d, keys, value):
# """Set a value to a sequence of nested keys
# Parameters
# ----------
# d : Mapping
# keys : Sequence[str]
# value : Any
# """
# assert keys
# key = keys[0]
# if len(keys) == 1:
# if key in d:
# raise ValueError("duplicated key '{}'".format(key))
# d[key] = value
# return
# d = d.setdefault(key, {})
# nested_set_dict(d, keys[1:], value)
def nested_set_dict(d, keys, value):
"""Set a value to a sequence of nested keys
Parameters
----------
d : Mapping
keys : Sequence[str]
value : Any
"""
assert keys
key = keys[0]
if len(keys) == 1:
if type(d) == list:
d.append(value)
else:
d[key] = value
return
# the type is a string so make a dict if none exists
if type(keys[1]) == int:
if key in d:
pass
else:
d[key] = []
d = d[key]
elif type(key)==int:
if (key+1) > len(d):
d.append({})
d = d[key]
else:
d = d.setdefault(key, {})
nested_set_dict(d, keys[1:], value)
def unflatten(d, splitter='tuple', inverse=False):
"""Unflatten dict-like object.
Parameters
----------
d : dict-like object
The dict that will be unflattened.
splitter : {'tuple', 'path', Callable}
The key splitting method. If a Callable is given, the Callable will be
used to split.
'tuple': Use each element in the tuple key as the key of the unflattened dict.
'path': Use `pathlib.Path.parts` to split keys.
Tester
d1 = {'a':{'b':[{'c1':'nested1!','d1':[{'e1':'so_nested1!!!'}]},
{'c2':'nested2!','d2':[{'e2':'so_nested2!!!'}]},
{'c3':'nested3!','d3':[{'e3':'so_nested3!!!'}]},
{'c4':'nested4!','d4':[{'e4':'so_nested4a!!!'},
{'e4':'so_nested4b!!!'},
{'e4':'so_nested4c!!!'},
{'e4':'so_nested4d!!!'},
{'e4':'so_nested4e!!!'}]}]}}
Returns
-------
unflattened_dict : dict
"""
if isinstance(splitter, str):
splitter = SPLITTER_DICT[splitter]
kv = sorted([(k,v) for (k,v) in d.items()])
unflattened_dict = {}
for kkvv in kv:
key_tuple = kkvv[0]
value = kkvv[1]
nested_set_dict(unflattened_dict, key_tuple, value)
return unflattened_dict
|
{
"content_hash": "37452b4816c07880745ddceec4fbb5c6",
"timestamp": "",
"source": "github",
"line_count": 2327,
"max_line_length": 285,
"avg_line_length": 44.19982810485604,
"alnum_prop": 0.6042312815377286,
"repo_name": "biorack/metatlas",
"id": "be8bf43cc11997438859497c06e56daa92214eff",
"size": "102866",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "metatlas/untargeted/tools.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "4850"
},
{
"name": "Jupyter Notebook",
"bytes": "1233246"
},
{
"name": "Python",
"bytes": "1501450"
},
{
"name": "Shell",
"bytes": "66479"
},
{
"name": "wdl",
"bytes": "18796"
}
],
"symlink_target": ""
}
|
import csv, os
os.makedirs('headerRemoved', exist_ok=True)
# loop through every file in the current working directory.
for csvfile in os.listdir('.'):
if not csvfile.endswith('.csv'):
continue
print('Removing header from ' + csvfile + '...')
# Read the csv file in (skipping the first row)
csvRows = []
csvFileObj = open(csvfile)
reader = csv.reader(csvFileObj)
for row in reader:
if reader.line_num == 1:
continue
csvRows.append(row)
csvFileObj.close()
# Write out the csv file
csvFileObj = open(os.path.join('headerRemoved', csvfile), 'w', newline='')
writer = csv.writer(csvFileObj)
for row in csvRows:
writer.writerow(row)
csvFileObj.close()
|
{
"content_hash": "292b5470205c5c1ee8b0b39f3d63432b",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 78,
"avg_line_length": 26.379310344827587,
"alnum_prop": 0.6196078431372549,
"repo_name": "spencerpomme/coconuts-on-fire",
"id": "33722ff74f954ca141575219ff79679beebc2bfb",
"size": "873",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "removeCSVHeaer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "43654"
},
{
"name": "C++",
"bytes": "2027"
},
{
"name": "CSS",
"bytes": "79"
},
{
"name": "HTML",
"bytes": "2955"
},
{
"name": "JavaScript",
"bytes": "1262"
},
{
"name": "Python",
"bytes": "164695"
},
{
"name": "R",
"bytes": "7601"
}
],
"symlink_target": ""
}
|
from netfilterqueue import NetfilterQueue
from scapy.all import *
import subprocess
import sys
def pkt_filter_callback(packet):
print packet
packet.accept()
def packet_filter():
nfq = NetfilterQueue()
nfq.bind(1, pkt_filter_callback)
try:
print 'Controller packet filtering mode'
nfq.run()
except KeyboardInterrupt:
print 'Quitting packet filter'
nfq.unbind()
raise KeyboardInterrupt
|
{
"content_hash": "314459e6ebb19de8af06581361ab6366",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 48,
"avg_line_length": 18.32,
"alnum_prop": 0.6746724890829694,
"repo_name": "OpenWinCon/OpenWinNet",
"id": "f8dbd9a3f7c273acd328ee66bd4b4db119c74338",
"size": "483",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "agent/ACconnector/connect_module/Controller_module/connection_module.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "159532"
},
{
"name": "CSS",
"bytes": "81951"
},
{
"name": "HTML",
"bytes": "153375"
},
{
"name": "Java",
"bytes": "42290"
},
{
"name": "JavaScript",
"bytes": "140705"
},
{
"name": "Makefile",
"bytes": "4410"
},
{
"name": "Objective-C",
"bytes": "3394"
},
{
"name": "Python",
"bytes": "7012310"
},
{
"name": "Shell",
"bytes": "10671"
}
],
"symlink_target": ""
}
|
import chainer
import chainer.backends
from chainer.backends.cuda import cupy
import chainer.links as L
import chainer.testing
import chainermn
import numpy as np
import pytest
class Param(object):
def __init__(self, param):
self.dtype = None
self.__dict__.update(param)
params = [Param(p) for p in [
{
'dtype': np.float16,
}, {
'dtype': np.float32,
}, {
'dtype': chainer.mixed16
}]]
class Cycle0SubA(chainer.Chain):
def __init__(self, size):
super(Cycle0SubA, self).__init__()
with self.init_scope():
self.f = L.Linear(size, size)
def __call__(self, x):
return self.f(x)
class Cycle0SubB(chainer.Chain):
def __init__(self, size):
super(Cycle0SubB, self).__init__(
f=L.Linear(size, 2))
def __call__(self, h):
return self.f(h)
class Cycle0(chainermn.MultiNodeChainList):
def __init__(self, size, comm, rank_prev, rank_next):
super(Cycle0, self).__init__(comm=comm)
self.add_link(Cycle0SubA(size), rank_in=None, rank_out=rank_next)
self.add_link(Cycle0SubB(size), rank_in=rank_prev, rank_out=None)
class Cycle1Sub(chainer.Chain):
def __init__(self, size):
super(Cycle1Sub, self).__init__(
f=L.Linear(size, size))
def __call__(self, h):
return self.f(h)
class Cycle1(chainermn.MultiNodeChainList):
def __init__(self, size, comm, rank_prev, rank_next):
super(Cycle1, self).__init__(comm=comm)
self.add_link(Cycle1Sub(size), rank_in=rank_prev, rank_out=rank_next)
class Cross0(chainermn.MultiNodeChainList):
def __init__(self, size, comm, rank_prev, rank_next):
super(Cross0, self).__init__(comm=comm)
self.add_link(Cycle0SubA(size), rank_in=None, rank_out=rank_next)
self.add_link(Cycle0SubB(size), rank_in=rank_prev, rank_out=None)
class Cross1(chainermn.MultiNodeChainList):
def __init__(self, size, comm, rank_prev, rank_next):
super(Cross1, self).__init__(comm=comm)
self.add_link(Cycle0SubB(size), rank_in=rank_prev, rank_out=None)
self.add_link(Cycle0SubA(size), rank_in=None, rank_out=rank_next)
class BranchSubA(chainer.Chain):
def __init__(self, size):
super(BranchSubA, self).__init__(
f=L.Linear(size, size))
def __call__(self, x):
return self.f(x)
class BranchSubB(chainer.Chain):
def __init__(self, size):
super(BranchSubB, self).__init__(
f=L.Linear(size, size))
def __call__(self, *xs):
x = xs[0]
for _x in xs[1:]:
x = x + _x
return self.f(x)
class BranchParent1(chainermn.MultiNodeChainList):
def __init__(self, size, comm, rank_children):
super(BranchParent1, self).__init__(comm=comm)
self.add_link(BranchSubA(size), rank_in=None, rank_out=rank_children)
self.add_link(BranchSubB(size), rank_in=rank_children, rank_out=None)
class BranchParent2(chainermn.MultiNodeChainList):
def __init__(self, size, comm, rank_children):
super(BranchParent2, self).__init__(comm=comm)
ranks = [comm.rank] + rank_children
self.add_link(BranchSubA(size), rank_in=None, rank_out=ranks)
self.add_link(BranchSubA(size), rank_in=comm.rank, rank_out=comm.rank)
self.add_link(BranchSubB(size), rank_in=ranks, rank_out=None)
class BranchParent3(chainermn.MultiNodeChainList):
def __init__(self, size, comm, rank_children):
super(BranchParent3, self).__init__(comm=comm)
ranks = rank_children + [comm.rank]
self.add_link(BranchSubA(size), rank_in=None, rank_out=ranks)
self.add_link(BranchSubA(size), rank_in=comm.rank, rank_out=comm.rank)
self.add_link(BranchSubB(size), rank_in=ranks, rank_out=None)
class BranchParent4(chainermn.MultiNodeChainList):
def __init__(self, size, comm, rank_children):
super(BranchParent4, self).__init__(comm=comm)
ranks = rank_children + [comm.rank]
ranks = ranks[1:] + ranks[0:1]
self.add_link(BranchSubA(size), rank_in=None, rank_out=ranks)
self.add_link(BranchSubA(size), rank_in=comm.rank, rank_out=comm.rank)
self.add_link(BranchSubB(size), rank_in=ranks, rank_out=None)
class BranchChild(chainermn.MultiNodeChainList):
def __init__(self, size, comm, rank_parent):
super(BranchChild, self).__init__(comm=comm)
self.add_link(
BranchSubA(size),
rank_in=rank_parent,
rank_out=rank_parent)
class TwistFirst(chainermn.MultiNodeChainList):
def __init__(self, size, comm, rank_next):
super(TwistFirst, self).__init__(comm=comm)
self.add_link(BranchSubA(size), rank_in=None, rank_out=rank_next)
self.add_link(BranchSubA(size), rank_in=rank_next, rank_out=None)
class Twist(chainermn.MultiNodeChainList):
def __init__(self, size, comm, rank_prev, rank_next):
super(Twist, self).__init__(comm=comm)
self.add_link(BranchSubA(size), rank_in=rank_prev, rank_out=comm.rank)
self.add_link(BranchSubA(size), rank_in=None, rank_out=rank_prev)
self.add_link(BranchSubA(size), rank_in=None, rank_out=rank_next)
self.add_link(BranchSubA(size), rank_in=rank_next, rank_out=comm.rank)
self.add_link(
BranchSubB(size),
rank_in=[comm.rank, comm.rank],
rank_out=None)
class TwistLast(chainermn.MultiNodeChainList):
def __init__(self, size, comm, rank_prev):
super(TwistLast, self).__init__(comm=comm)
self.add_link(BranchSubA(size), rank_in=rank_prev, rank_out=None)
self.add_link(BranchSubA(size), rank_in=None, rank_out=rank_prev)
class TupleDataSubA(chainer.Chain):
def __init__(self, size):
super(TupleDataSubA, self).__init__(
f0=L.Linear(size, size),
f1=L.Linear(size, size))
def __call__(self, x):
y0 = self.f0(x)
y1 = self.f1(x)
return y0, y1
class TupleDataSubB(chainer.Chain):
def __init__(self, size):
super(TupleDataSubB, self).__init__(
f0=L.Linear(size, size),
f1=L.Linear(size, size))
def __call__(self, x):
# TupleDataSubB receives two elemental tuple from TupleDataSubA.
x0, x1 = x
y0 = self.f0(x0)
y1 = self.f1(x1)
return y0 + y1
class TupleDataSubC(chainer.Chain):
def __init__(self, size):
super(TupleDataSubC, self).__init__(
f=L.Linear(size, size))
def __call__(self, x):
return self.f(x)
class TupleDataParent(chainermn.MultiNodeChainList):
def __init__(self, comm, size, rank_child):
super(TupleDataParent, self).__init__(comm=comm)
self.add_link(TupleDataSubA(size), rank_in=None, rank_out=rank_child)
self.add_link(TupleDataSubC(size), rank_in=rank_child, rank_out=None)
class TupleDataChild(chainermn.MultiNodeChainList):
def __init__(self, comm, size, rank_parent):
super(TupleDataChild, self).__init__(comm=comm)
self.add_link(
TupleDataSubB(size), rank_in=rank_parent, rank_out=rank_parent)
def create_communicator(gpu):
if gpu:
communicator = chainermn.create_communicator('flat')
chainer.backends.cuda.get_device_from_id(communicator.intra_rank).use()
else:
communicator = chainermn.create_communicator('naive')
if communicator.size < 2:
pytest.skip('This test is for multinode only')
rank_next = (communicator.rank + 1) % communicator.size
rank_prev = (communicator.rank - 1) % communicator.size
return communicator, rank_next, rank_prev
def check_cycle_model(gpu, param):
communicator, rank_next, rank_prev = create_communicator(gpu)
n, d = 100, 10
with chainer.using_config('dtype', param.dtype):
if communicator.rank == 0:
X = np.random.randn(n, d).astype(param.dtype)
Y = (np.random.rand(n) * 2).astype(np.int32)
model = L.Classifier(
Cycle0(d, communicator, rank_next, rank_prev))
if gpu:
model.to_device(cupy.cuda.Device())
X = chainer.backends.cuda.to_gpu(X)
Y = chainer.backends.cuda.to_gpu(Y)
for i in range(n):
err = model(X[i:i + 1], Y[i:i + 1])
err.backward()
else:
model = Cycle1(
d, communicator, rank_next, rank_prev)
if gpu:
model.to_device(cupy.cuda.Device())
for i in range(n):
err = model()
err.backward()
@pytest.mark.parametrize('param', params)
def test_cycle_model_cpu(param):
check_cycle_model(False, param)
@pytest.mark.parametrize('param', params)
@chainer.testing.attr.gpu
def test_cycle_model_gpu(param):
check_cycle_model(True, param)
def check_crossing_model(gpu, param):
communicator, rank_next, rank_prev = create_communicator(gpu)
n, d = 100, 10
X = np.random.randn(n, d).astype(param.dtype)
Y = (np.random.rand(n) * 2).astype(np.int32)
with chainer.using_config('dtype', param.dtype):
if communicator.rank == 0:
model = L.Classifier(Cross0(
d, communicator, rank_next, rank_prev))
else:
model = L.Classifier(Cross1(
d, communicator, rank_next, rank_prev))
if gpu:
model.to_device(cupy.cuda.Device())
X = chainer.backends.cuda.to_gpu(X)
Y = chainer.backends.cuda.to_gpu(Y)
for i in range(n):
err = model(X[i:i + 1], Y[i:i + 1])
err.backward()
@pytest.mark.parametrize('param', params)
def test_crossing_model_cpu(param):
check_crossing_model(False, param)
@pytest.mark.parametrize('param', params)
@chainer.testing.attr.gpu
def test_crossing_model_gpu(param):
check_crossing_model(True, param)
def check_branching_model(gpu, communicator, rank_next, rank_prev,
parent_model, param):
n, d = 100, 10
X = np.random.randn(n, d).astype(param.dtype)
Y = (np.random.rand(n) * 2).astype(np.int32)
with chainer.using_config('dtype', param.dtype):
if communicator.rank == 0:
rank_children = [rank for rank in range(1, communicator.size)]
model = L.Classifier(parent_model(
d, communicator, rank_children))
if gpu:
model.to_device(cupy.cuda.Device())
X = chainer.backends.cuda.to_gpu(X)
Y = chainer.backends.cuda.to_gpu(Y)
for i in range(n):
err = model(X[i:i + 1], Y[i:i + 1])
err.backward()
else:
model = BranchChild(d, communicator, 0)
if gpu:
model.to_device(cupy.cuda.Device())
for i in range(n):
err = model()
err.backward()
def check_branching_models(gpu, param):
communicator, rank_next, rank_prev = create_communicator(gpu)
check_branching_model(gpu, communicator, rank_next, rank_prev,
BranchParent1, param)
check_branching_model(gpu, communicator, rank_next, rank_prev,
BranchParent2, param)
check_branching_model(gpu, communicator, rank_next, rank_prev,
BranchParent3, param)
check_branching_model(gpu, communicator, rank_next, rank_prev,
BranchParent4, param)
@pytest.mark.parametrize('param', params)
def test_branching_models_cpu(param):
check_branching_models(False, param)
@pytest.mark.parametrize('param', params)
@chainer.testing.attr.gpu
def test_branching_models_gpu(param):
check_branching_models(True, param)
def check_twisting_model(gpu, param):
communicator, rank_next, rank_prev = create_communicator(gpu)
n, d = 100, 10
X = np.random.randn(n, d).astype(param.dtype)
Y = (np.random.rand(n) * 2).astype(np.int32)
with chainer.using_config('dtype', param.dtype):
if communicator.rank == 0:
model = L.Classifier(
TwistFirst(d, communicator, rank_next))
elif communicator.rank == communicator.size - 1:
model = L.Classifier(
TwistLast(d, communicator, rank_prev))
else:
model = L.Classifier(Twist(
d, communicator, rank_prev, rank_next))
if gpu:
model.to_device(cupy.cuda.Device())
X = chainer.backends.cuda.to_gpu(X)
Y = chainer.backends.cuda.to_gpu(Y)
for i in range(n):
err = model(X[i:i + 1], Y[i:i + 1])
err.backward()
@pytest.mark.parametrize('param', params)
def test_twisting_model_cpu(param):
check_twisting_model(False, param)
@pytest.mark.parametrize('param', params)
@chainer.testing.attr.gpu
def test_twisting_model_gpu(param):
check_twisting_model(True, param)
def check_tuple_data_model(gpu, param):
# This test only uses pairs (0, 1), (2, 3), ... (2m, 2m+1)
communicator, rank_next, rank_prev = create_communicator(gpu)
n, d = 100, 10
X = np.random.randn(n, d).astype(param.dtype)
Y = (np.random.rand(n) * 2).astype(np.int32)
with chainer.using_config('dtype', param.dtype):
if communicator.rank % 2 == 0:
if communicator.rank == communicator.size - 1:
# in case 2m is the right end with odd number of nodes
return
model = L.Classifier(
TupleDataParent(communicator, d, rank_next))
elif communicator.rank % 2 == 1:
model = TupleDataChild(communicator, d, rank_prev)
assert model is not None
if gpu:
model.to_device(cupy.cuda.Device())
X = chainer.backends.cuda.to_gpu(X)
Y = chainer.backends.cuda.to_gpu(Y)
for i in range(n):
if communicator.rank % 2 == 0:
err = model(X[i:i + 1], Y[i:i + 1])
elif communicator.rank % 2 == 1:
err = model()
assert err is not None
err.backward()
@pytest.mark.parametrize('param', params)
def test_tuple_data_model_cpu(param):
check_tuple_data_model(False, param)
@pytest.mark.parametrize('param', params)
@chainer.testing.attr.gpu
def test_tuple_data_model_gpu(param):
check_tuple_data_model(True, param)
|
{
"content_hash": "54477fafb40198d0766d286cc55d68b3",
"timestamp": "",
"source": "github",
"line_count": 451,
"max_line_length": 79,
"avg_line_length": 32.06430155210643,
"alnum_prop": 0.6016872968674366,
"repo_name": "chainer/chainer",
"id": "b771887275388028d9b189de8d49812c6fb83e86",
"size": "14461",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/chainermn_tests/links_tests/test_multi_node_chain_list.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3805"
},
{
"name": "C",
"bytes": "1099"
},
{
"name": "C++",
"bytes": "1688016"
},
{
"name": "CMake",
"bytes": "51351"
},
{
"name": "Cuda",
"bytes": "191633"
},
{
"name": "Dockerfile",
"bytes": "6102"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "6431941"
},
{
"name": "Shell",
"bytes": "50151"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Todo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(help_text=b'What do you need to do?', max_length=255)),
('description', models.TextField(help_text=b'How are you going to do it?')),
('due_date', models.DateTimeField(default=None, null=True, blank=True)),
],
options={
},
bases=(models.Model,),
),
]
|
{
"content_hash": "3047ab263ac4167d69f3d4a9d94f7f2a",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 114,
"avg_line_length": 31.208333333333332,
"alnum_prop": 0.5580774365821095,
"repo_name": "pombredanne/djangle",
"id": "44dccd2e556512519b6b8955af5571777ee77ad1",
"size": "773",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "example/todo/migrations/0001_initial.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3218"
},
{
"name": "JavaScript",
"bytes": "2001"
},
{
"name": "Python",
"bytes": "8384"
}
],
"symlink_target": ""
}
|
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from mock.mock import MagicMock, call, patch
from stacks.utils.RMFTestCase import *
import json
class TestSqoop(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "SQOOP/1.4.4.2.0/package"
STACK_VERSION = "2.0.6"
CONFIG_OVERRIDES = {"serviceName":"SQOOP", "role":"SQOOP"}
def test_configure_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/sqoop_client.py",
classname = "SqoopClient",
command = "configure",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Link', '/usr/lib/sqoop/lib/mysql-connector-java.jar',
to = '/usr/share/java/mysql-connector-java.jar',)
self.assertResourceCalled('Directory', '/usr/lib/sqoop/conf',
create_parents = True,
owner = 'sqoop',
group = 'hadoop',)
self.assertResourceCalled('XmlConfig', 'sqoop-site.xml',
owner = 'sqoop',
group = 'hadoop',
conf_dir = '/usr/lib/sqoop/conf',
configurations = self.getConfig()['configurations']['sqoop-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['sqoop-site'])
self.assertResourceCalled('File', '/usr/lib/sqoop/conf/sqoop-env.sh',
owner = 'sqoop',
group = 'hadoop',
content = InlineTemplate(self.getConfig()['configurations']['sqoop-env']['content'])
)
self.assertResourceCalled('File', '/usr/lib/sqoop/conf/sqoop-env-template.sh',
only_if = 'test -e /usr/lib/sqoop/conf/sqoop-env-template.sh',
owner = 'sqoop',
group = 'hadoop',)
self.assertResourceCalled('File', '/usr/lib/sqoop/conf/sqoop-site-template.xml',
only_if = 'test -e /usr/lib/sqoop/conf/sqoop-site-template.xml',
owner = 'sqoop',
group = 'hadoop',)
self.assertResourceCalled('File', '/usr/lib/sqoop/conf/sqoop-site.xml',
only_if = 'test -e /usr/lib/sqoop/conf/sqoop-site.xml',
owner = 'sqoop',
group = 'hadoop',)
self.assertNoMoreResources()
def test_configure_add_jdbc(self):
config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/secured.json"
with open(config_file, "r") as f:
loaded_json = json.load(f)
loaded_json['configurations']['sqoop-env']['jdbc_drivers'] = 'org.postgresql.Driver, oracle.jdbc.driver.OracleDriver'
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/sqoop_client.py",
classname = "SqoopClient",
command = "configure",
config_dict = loaded_json,
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Link', '/usr/lib/sqoop/lib/mysql-connector-java.jar',
to = '/usr/share/java/mysql-connector-java.jar',
)
self.assertResourceCalled('File', '/usr/lib/sqoop/lib/test-postgres-jdbc.jar',
content = DownloadSource('http://c6401.ambari.apache.org:8080/resources//test-postgres-jdbc.jar'),
mode = 0644,
)
self.assertResourceCalled('File', '/usr/lib/sqoop/lib/oracle-jdbc-driver.jar',
content = DownloadSource('http://c6401.ambari.apache.org:8080/resources//oracle-jdbc-driver.jar'),
mode = 0644,
)
self.assertResourceCalled('Directory', '/usr/lib/sqoop/conf',
owner = 'sqoop',
group = 'hadoop',
create_parents = True,
)
self.assertResourceCalled('XmlConfig', 'sqoop-site.xml',
owner = 'sqoop',
group = 'hadoop',
conf_dir = '/usr/lib/sqoop/conf',
configurations = self.getConfig()['configurations']['sqoop-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['sqoop-site'])
self.assertResourceCalled('File', '/usr/lib/sqoop/conf/sqoop-env.sh',
content = InlineTemplate(self.getConfig()['configurations']['sqoop-env']['content']),
owner = 'sqoop',
group = 'hadoop',
)
self.assertResourceCalled('File', '/usr/lib/sqoop/conf/sqoop-env-template.sh',
owner = 'sqoop',
only_if = 'test -e /usr/lib/sqoop/conf/sqoop-env-template.sh',
group = 'hadoop',
)
self.assertResourceCalled('File', '/usr/lib/sqoop/conf/sqoop-site-template.xml',
owner = 'sqoop',
only_if = 'test -e /usr/lib/sqoop/conf/sqoop-site-template.xml',
group = 'hadoop',
)
self.assertResourceCalled('File', '/usr/lib/sqoop/conf/sqoop-site.xml',
owner = 'sqoop',
only_if = 'test -e /usr/lib/sqoop/conf/sqoop-site.xml',
group = 'hadoop',
)
self.assertNoMoreResources()
def test_pre_upgrade_restart_23(self):
config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.3.0.0-1234'
json_content['commandParams']['version'] = version
mocks_dict = {}
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/sqoop_client.py",
classname = "SqoopClient",
command = "pre_upgrade_restart",
config_dict = json_content,
config_overrides = self.CONFIG_OVERRIDES,
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES,
mocks_dict = mocks_dict)
self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'sqoop-client', version), sudo=True)
|
{
"content_hash": "0975d7d406aced5ea76e16a0ec617f7e",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 130,
"avg_line_length": 53.59722222222222,
"alnum_prop": 0.5309665716506867,
"repo_name": "radicalbit/ambari",
"id": "053d44ad15703f69cbeb80242c81a723d80ef1bf",
"size": "7741",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_sqoop.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "42212"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "182799"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "1287531"
},
{
"name": "CoffeeScript",
"bytes": "4323"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Groovy",
"bytes": "88056"
},
{
"name": "HTML",
"bytes": "5098825"
},
{
"name": "Java",
"bytes": "29006663"
},
{
"name": "JavaScript",
"bytes": "17274453"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLSQL",
"bytes": "2160"
},
{
"name": "PLpgSQL",
"bytes": "314333"
},
{
"name": "PowerShell",
"bytes": "2087991"
},
{
"name": "Python",
"bytes": "14584206"
},
{
"name": "R",
"bytes": "1457"
},
{
"name": "Roff",
"bytes": "13935"
},
{
"name": "Ruby",
"bytes": "14478"
},
{
"name": "SQLPL",
"bytes": "2117"
},
{
"name": "Shell",
"bytes": "741459"
},
{
"name": "Vim script",
"bytes": "5813"
}
],
"symlink_target": ""
}
|
from marshmallow import ValidationError
from pyramid.httpexceptions import HTTPInternalServerError
from pyramid.view import view_defaults
from pyramid.view import view_config
from amnesia.utils import recaptcha
from amnesia.utils.forms import render_form
from amnesia.modules.account.validation import AccountSchema
from amnesia.modules.account import AuthResource
from amnesia.views import BaseView
def includeme(config):
config.scan(__name__)
@view_defaults(context=AuthResource, name='register', permission='register',
renderer='amnesia:templates/account/register.pt')
class Register(BaseView):
form_tmpl = 'amnesia:templates/account/_form_register.pt'
def form(self, data=None, errors=None):
return render_form(self.form_tmpl, self.request, data, errors=errors)
@view_config(request_method='GET')
def get(self):
return {'form': self.form()}
@view_config(request_method='POST')
def post(self):
form_data = self.request.POST.mixed()
try:
result = AccountSchema().load(form_data)
except ValidationError as error:
return {'form': self.form(form_data, error.messages)}
if self.context.find_login(result['login']):
errors = {'login': 'Login already exists'}
elif self.context.find_email(result['email']):
errors = {'email': 'Email already exists'}
elif not recaptcha.verify(self.request, result['captcha_token']):
errors = {'captcha': 'Captcha validation failed'}
else:
errors = None
if errors:
return {'form': self.form(form_data, errors)}
new_account = self.context.register(result)
if not new_account:
raise HTTPInternalServerError()
self.request.override_renderer = 'amnesia:templates/account/register_ok.pt'
return {'new_account': new_account}
|
{
"content_hash": "205df140a3a32b85bfc3ef658932e618",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 83,
"avg_line_length": 31.933333333333334,
"alnum_prop": 0.6717118997912317,
"repo_name": "silenius/amnesia",
"id": "47170abad9b82ddc800ac57a0e66a8a6969db375",
"size": "1941",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "amnesia/modules/account/views/register.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "260179"
},
{
"name": "HTML",
"bytes": "14462"
},
{
"name": "JavaScript",
"bytes": "113808"
},
{
"name": "Mako",
"bytes": "806"
},
{
"name": "PLpgSQL",
"bytes": "18006"
},
{
"name": "Python",
"bytes": "274296"
}
],
"symlink_target": ""
}
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# See: http://stackoverflow.com/questions/6854947/how-to-display-a-window-on-a-secondary-display-in-pyqt
# http://pyqt.sourceforge.net/Docs/PyQt4/qdesktopwidget.html#details
import sys
from PyQt4 import QtGui, QtCore
class Window(QtGui.QWidget):
def __init__(self, name):
super(Window, self).__init__()
# Create a label
label = QtGui.QLabel(name + " (press Esc to quit)")
# Create the layout
vbox = QtGui.QVBoxLayout()
vbox.addWidget(label)
# Set the layout
self.setLayout(vbox)
self.resize(250, 150)
self.setWindowTitle(name)
def keyPressEvent(self, e):
if e.key() == QtCore.Qt.Key_Escape:
self.close()
def main():
"""Main function"""
app = QtGui.QApplication(sys.argv)
# For an application, the screen where the main widget resides is the
# primary screen. This is stored in the primaryScreen property. All windows
# opened in the context of the application should be constrained to the
# boundaries of the primary screen; for example, it would be inconvenient
# if a dialog box popped up on a different screen, or split over two
# screens.
desktop = QtGui.QDesktopWidget()
#print(desktop.numScreens())
#print(desktop.primaryScreen())
# The default constructor has no parent.
# A widget with no parent is a window.
window0 = Window("Window 0")
window1 = Window("Window 1")
qrect0 = desktop.screenGeometry(0)
qrect1 = desktop.screenGeometry(1)
window0.move(qrect0.left(), qrect0.top())
window1.move(qrect1.left(), qrect1.top())
window0.showFullScreen()
window1.showFullScreen()
# The mainloop of the application. The event handling starts from this point.
# The exec_() method has an underscore. It is because the exec is a Python keyword. And thus, exec_() was used instead.
exit_code = app.exec_()
# The sys.exit() method ensures a clean exit.
# The environment will be informed, how the application ended.
sys.exit(exit_code)
if __name__ == '__main__':
main()
|
{
"content_hash": "9caeddca9533778ebecff11a73a4e4a6",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 124,
"avg_line_length": 32.375,
"alnum_prop": 0.6822393822393822,
"repo_name": "jeremiedecock/snippets",
"id": "58daac6838ee8060becb61e0a80ba49f466cb2c3",
"size": "3276",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/pyqt/pyqt4/dual_head_fullscreen.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "4294"
},
{
"name": "Batchfile",
"bytes": "6779"
},
{
"name": "C",
"bytes": "102107"
},
{
"name": "C++",
"bytes": "320943"
},
{
"name": "CMake",
"bytes": "11424"
},
{
"name": "CSS",
"bytes": "21121"
},
{
"name": "Cython",
"bytes": "21"
},
{
"name": "Dockerfile",
"bytes": "1818"
},
{
"name": "Fortran",
"bytes": "633"
},
{
"name": "Gnuplot",
"bytes": "39999"
},
{
"name": "Go",
"bytes": "3166"
},
{
"name": "Groovy",
"bytes": "3009"
},
{
"name": "HTML",
"bytes": "138995"
},
{
"name": "IDL",
"bytes": "43"
},
{
"name": "Java",
"bytes": "120221"
},
{
"name": "JavaScript",
"bytes": "32342"
},
{
"name": "Jinja",
"bytes": "206"
},
{
"name": "Jupyter Notebook",
"bytes": "95991"
},
{
"name": "Lua",
"bytes": "200"
},
{
"name": "M4",
"bytes": "111"
},
{
"name": "MATLAB",
"bytes": "31972"
},
{
"name": "Makefile",
"bytes": "81307"
},
{
"name": "OpenSCAD",
"bytes": "14995"
},
{
"name": "PHP",
"bytes": "94"
},
{
"name": "Perl",
"bytes": "46"
},
{
"name": "Processing",
"bytes": "208"
},
{
"name": "Prolog",
"bytes": "454"
},
{
"name": "Python",
"bytes": "1685966"
},
{
"name": "R",
"bytes": "76"
},
{
"name": "Raku",
"bytes": "43"
},
{
"name": "Ruby",
"bytes": "42"
},
{
"name": "Scheme",
"bytes": "649"
},
{
"name": "Shell",
"bytes": "52865"
},
{
"name": "Smalltalk",
"bytes": "55"
},
{
"name": "TeX",
"bytes": "1189"
},
{
"name": "Vue",
"bytes": "49445"
},
{
"name": "XSLT",
"bytes": "1816"
}
],
"symlink_target": ""
}
|
import element.node
class ActionView(object):
def __init__(self, rendered, event_dispatcher):
self.rendered = rendered
self.event_dispatcher = event_dispatcher
def dispatch(self, request_handler, *args, **kwargs):
if '_controller' not in kwargs:
return
serviceId, method = kwargs['_controller'].split(":")
del kwargs['_controller']
parameters = request_handler.request.query_arguments.copy()
parameters.update(kwargs)
node = element.node.Node('action://%s' % serviceId, {
'type': 'action.node',
'serviceId': serviceId,
'method': method,
'kwargs': parameters,
'request': request_handler.request
})
event = self.event_dispatcher.dispatch('element.node.load.success', {
'node': node
})
return self.rendered.render(request_handler, event.get('node'))
|
{
"content_hash": "41926af3604a607e001c09883a45c6d9",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 77,
"avg_line_length": 30.419354838709676,
"alnum_prop": 0.5885471898197243,
"repo_name": "rande/python-element",
"id": "36122811e56841ad9fc075348348a69e8b78cbe4",
"size": "1545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "element/plugins/action/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "120475"
},
{
"name": "HTML",
"bytes": "93511"
},
{
"name": "JavaScript",
"bytes": "2830"
},
{
"name": "Makefile",
"bytes": "789"
},
{
"name": "Nginx",
"bytes": "410"
},
{
"name": "Perl",
"bytes": "2987"
},
{
"name": "Python",
"bytes": "303084"
}
],
"symlink_target": ""
}
|
"""
Classes for representing and processing probabilistic information.
The ``FreqDist`` class is used to encode "frequency distributions",
which count the number of times that each outcome of an experiment
occurs.
The ``ProbDistI`` class defines a standard interface for "probability
distributions", which encode the probability of each outcome for an
experiment. There are two types of probability distribution:
- "derived probability distributions" are created from frequency
distributions. They attempt to model the probability distribution
that generated the frequency distribution.
- "analytic probability distributions" are created directly from
parameters (such as variance).
The ``ConditionalFreqDist`` class and ``ConditionalProbDistI`` interface
are used to encode conditional distributions. Conditional probability
distributions can be derived or analytic; but currently the only
implementation of the ``ConditionalProbDistI`` interface is
``ConditionalProbDist``, a derived distribution.
"""
_NINF = float('-1e300')
import math
import random
import warnings
from operator import itemgetter
from itertools import imap, islice
from collections import defaultdict
##//////////////////////////////////////////////////////
## Frequency Distributions
##//////////////////////////////////////////////////////
# [SB] inherit from defaultdict?
# [SB] for NLTK 3.0, inherit from collections.Counter?
class FreqDist(dict):
"""
A frequency distribution for the outcomes of an experiment. A
frequency distribution records the number of times each outcome of
an experiment has occurred. For example, a frequency distribution
could be used to record the frequency of each word type in a
document. Formally, a frequency distribution can be defined as a
function mapping from each sample to the number of times that
sample occurred as an outcome.
Frequency distributions are generally constructed by running a
number of experiments, and incrementing the count for a sample
every time it is an outcome of an experiment. For example, the
following code will produce a frequency distribution that encodes
how often each word occurs in a text:
>>> from nltk.tokenize import word_tokenize
>>> from nltk.probability import FreqDist
>>> sent = 'This is an example sentence'
>>> fdist = FreqDist()
>>> for word in word_tokenize(sent):
... fdist.inc(word.lower())
An equivalent way to do this is with the initializer:
>>> fdist = FreqDist(word.lower() for word in word_tokenize(sent))
"""
def __init__(self, samples=None):
"""
Construct a new frequency distribution. If ``samples`` is
given, then the frequency distribution will be initialized
with the count of each object in ``samples``; otherwise, it
will be initialized to be empty.
In particular, ``FreqDist()`` returns an empty frequency
distribution; and ``FreqDist(samples)`` first creates an empty
frequency distribution, and then calls ``update`` with the
list ``samples``.
:param samples: The samples to initialize the frequency
distribution with.
:type samples: Sequence
"""
dict.__init__(self)
self._N = 0
self._reset_caches()
if samples:
self.update(samples)
def inc(self, sample, count=1):
"""
Increment this FreqDist's count for the given sample.
:param sample: The sample whose count should be incremented.
:type sample: any
:param count: The amount to increment the sample's count by.
:type count: int
:rtype: None
:raise NotImplementedError: If ``sample`` is not a
supported sample type.
"""
if count == 0: return
self[sample] = self.get(sample,0) + count
def __setitem__(self, sample, value):
"""
Set this FreqDist's count for the given sample.
:param sample: The sample whose count should be incremented.
:type sample: any hashable object
:param count: The new value for the sample's count
:type count: int
:rtype: None
:raise TypeError: If ``sample`` is not a supported sample type.
"""
self._N += (value - self.get(sample, 0))
dict.__setitem__(self, sample, value)
# Invalidate the caches
self._reset_caches()
def N(self):
"""
Return the total number of sample outcomes that have been
recorded by this FreqDist. For the number of unique
sample values (or bins) with counts greater than zero, use
``FreqDist.B()``.
:rtype: int
"""
return self._N
def B(self):
"""
Return the total number of sample values (or "bins") that
have counts greater than zero. For the total
number of sample outcomes recorded, use ``FreqDist.N()``.
(FreqDist.B() is the same as len(FreqDist).)
:rtype: int
"""
return len(self)
def samples(self):
"""
Return a list of all samples that have been recorded as
outcomes by this frequency distribution. Use ``fd[sample]``
to determine the count for each sample.
:rtype: list
"""
return self.keys()
def hapaxes(self):
"""
Return a list of all samples that occur once (hapax legomena)
:rtype: list
"""
return [item for item in self if self[item] == 1]
def Nr(self, r, bins=None):
"""
Return the number of samples with count r.
:type r: int
:param r: A sample count.
:type bins: int
:param bins: The number of possible sample outcomes. ``bins``
is used to calculate Nr(0). In particular, Nr(0) is
``bins-self.B()``. If ``bins`` is not specified, it
defaults to ``self.B()`` (so Nr(0) will be 0).
:rtype: int
"""
if r < 0: raise IndexError, 'FreqDist.Nr(): r must be non-negative'
# Special case for Nr(0):
if r == 0:
if bins is None: return 0
else: return bins-self.B()
# We have to search the entire distribution to find Nr. Since
# this is an expensive operation, and is likely to be used
# repeatedly, cache the results.
if self._Nr_cache is None:
self._cache_Nr_values()
if r >= len(self._Nr_cache): return 0
return self._Nr_cache[r]
def _cache_Nr_values(self):
Nr = [0]
for sample in self:
c = self.get(sample, 0)
if c >= len(Nr):
Nr += [0]*(c+1-len(Nr))
Nr[c] += 1
self._Nr_cache = Nr
def _cumulative_frequencies(self, samples=None):
"""
Return the cumulative frequencies of the specified samples.
If no samples are specified, all counts are returned, starting
with the largest.
:param samples: the samples whose frequencies should be returned.
:type sample: any
:rtype: list(float)
"""
cf = 0.0
if not samples:
samples = self.keys()
for sample in samples:
cf += self[sample]
yield cf
# slightly odd nomenclature freq() if FreqDist does counts and ProbDist does probs,
# here, freq() does probs
def freq(self, sample):
"""
Return the frequency of a given sample. The frequency of a
sample is defined as the count of that sample divided by the
total number of sample outcomes that have been recorded by
this FreqDist. The count of a sample is defined as the
number of times that sample outcome was recorded by this
FreqDist. Frequencies are always real numbers in the range
[0, 1].
:param sample: the sample whose frequency
should be returned.
:type sample: any
:rtype: float
"""
if self._N is 0:
return 0
return float(self[sample]) / self._N
def max(self):
"""
Return the sample with the greatest number of outcomes in this
frequency distribution. If two or more samples have the same
number of outcomes, return one of them; which sample is
returned is undefined. If no outcomes have occurred in this
frequency distribution, return None.
:return: The sample with the maximum number of outcomes in this
frequency distribution.
:rtype: any or None
"""
if self._max_cache is None:
if len(self) == 0:
raise ValueError('A FreqDist must have at least one sample before max is defined.')
self._max_cache = max([(a,b) for (b,a) in self.items()])[1]
return self._max_cache
def plot(self, *args, **kwargs):
"""
Plot samples from the frequency distribution
displaying the most frequent sample first. If an integer
parameter is supplied, stop after this many samples have been
plotted. If two integer parameters m, n are supplied, plot a
subset of the samples, beginning with m and stopping at n-1.
For a cumulative plot, specify cumulative=True.
(Requires Matplotlib to be installed.)
:param title: The title for the graph
:type title: str
:param cumulative: A flag to specify whether the plot is cumulative (default = False)
:type title: bool
"""
try:
import pylab
except ImportError:
raise ValueError('The plot function requires the matplotlib package (aka pylab). '
'See http://matplotlib.sourceforge.net/')
if len(args) == 0:
args = [len(self)]
samples = list(islice(self, *args))
cumulative = _get_kwarg(kwargs, 'cumulative', False)
if cumulative:
freqs = list(self._cumulative_frequencies(samples))
ylabel = "Cumulative Counts"
else:
freqs = [self[sample] for sample in samples]
ylabel = "Counts"
# percents = [f * 100 for f in freqs] only in ProbDist?
pylab.grid(True, color="silver")
if not "linewidth" in kwargs:
kwargs["linewidth"] = 2
if "title" in kwargs:
pylab.title(kwargs["title"])
del kwargs["title"]
pylab.plot(freqs, **kwargs)
pylab.xticks(range(len(samples)), [unicode(s) for s in samples], rotation=90)
pylab.xlabel("Samples")
pylab.ylabel(ylabel)
pylab.show()
def tabulate(self, *args, **kwargs):
"""
Tabulate the given samples from the frequency distribution (cumulative),
displaying the most frequent sample first. If an integer
parameter is supplied, stop after this many samples have been
plotted. If two integer parameters m, n are supplied, plot a
subset of the samples, beginning with m and stopping at n-1.
(Requires Matplotlib to be installed.)
:param samples: The samples to plot (default is all samples)
:type samples: list
"""
if len(args) == 0:
args = [len(self)]
samples = list(islice(self, *args))
cumulative = _get_kwarg(kwargs, 'cumulative', False)
if cumulative:
freqs = list(self._cumulative_frequencies(samples))
else:
freqs = [self[sample] for sample in samples]
# percents = [f * 100 for f in freqs] only in ProbDist?
for i in range(len(samples)):
print "%4s" % str(samples[i]),
print
for i in range(len(samples)):
print "%4d" % freqs[i],
print
def _sort_keys_by_value(self):
if not self._item_cache:
self._item_cache = sorted(dict.items(self), key=lambda x:(-x[1], x[0]))
def keys(self):
"""
Return the samples sorted in decreasing order of frequency.
:rtype: list(any)
"""
self._sort_keys_by_value()
return map(itemgetter(0), self._item_cache)
def values(self):
"""
Return the samples sorted in decreasing order of frequency.
:rtype: list(any)
"""
self._sort_keys_by_value()
return map(itemgetter(1), self._item_cache)
def items(self):
"""
Return the items sorted in decreasing order of frequency.
:rtype: list(tuple)
"""
self._sort_keys_by_value()
return self._item_cache[:]
def __iter__(self):
"""
Return the samples sorted in decreasing order of frequency.
:rtype: iter
"""
return iter(self.keys())
def iterkeys(self):
"""
Return the samples sorted in decreasing order of frequency.
:rtype: iter
"""
return iter(self.keys())
def itervalues(self):
"""
Return the values sorted in decreasing order.
:rtype: iter
"""
return iter(self.values())
def iteritems(self):
"""
Return the items sorted in decreasing order of frequency.
:rtype: iter of any
"""
self._sort_keys_by_value()
return iter(self._item_cache)
def copy(self):
"""
Create a copy of this frequency distribution.
:rtype: FreqDist
"""
return self.__class__(self)
def update(self, samples):
"""
Update the frequency distribution with the provided list of samples.
This is a faster way to add multiple samples to the distribution.
:param samples: The samples to add.
:type samples: list
"""
try:
sample_iter = samples.iteritems()
except:
sample_iter = imap(lambda x: (x,1), samples)
for sample, count in sample_iter:
self.inc(sample, count=count)
def pop(self, other):
self._N -= 1
self._reset_caches()
return dict.pop(self, other)
def popitem(self):
self._N -= 1
self._reset_caches()
return dict.popitem(self)
def clear(self):
self._N = 0
self._reset_caches()
dict.clear(self)
def _reset_caches(self):
self._Nr_cache = None
self._max_cache = None
self._item_cache = None
def __add__(self, other):
clone = self.copy()
clone.update(other)
return clone
def __le__(self, other):
if not isinstance(other, FreqDist): return False
return set(self).issubset(other) and all(self[key] <= other[key] for key in self)
def __lt__(self, other):
if not isinstance(other, FreqDist): return False
return self <= other and self != other
def __ge__(self, other):
if not isinstance(other, FreqDist): return False
return other <= self
def __gt__(self, other):
if not isinstance(other, FreqDist): return False
return other < self
def __repr__(self):
"""
Return a string representation of this FreqDist.
:rtype: string
"""
return '<FreqDist with %d samples and %d outcomes>' % (len(self), self.N())
def __str__(self):
"""
Return a string representation of this FreqDist.
:rtype: string
"""
items = ['%r: %r' % (s, self[s]) for s in self.keys()[:10]]
if len(self) > 10:
items.append('...')
return '<FreqDist: %s>' % ', '.join(items)
def __getitem__(self, sample):
return self.get(sample, 0)
##//////////////////////////////////////////////////////
## Probability Distributions
##//////////////////////////////////////////////////////
class ProbDistI(object):
"""
A probability distribution for the outcomes of an experiment. A
probability distribution specifies how likely it is that an
experiment will have any given outcome. For example, a
probability distribution could be used to predict the probability
that a token in a document will have a given type. Formally, a
probability distribution can be defined as a function mapping from
samples to nonnegative real numbers, such that the sum of every
number in the function's range is 1.0. A ``ProbDist`` is often
used to model the probability distribution of the experiment used
to generate a frequency distribution.
"""
SUM_TO_ONE = True
"""True if the probabilities of the samples in this probability
distribution will always sum to one."""
def __init__(self):
if self.__class__ == ProbDistI:
raise NotImplementedError("Interfaces can't be instantiated")
def prob(self, sample):
"""
Return the probability for a given sample. Probabilities
are always real numbers in the range [0, 1].
:param sample: The sample whose probability
should be returned.
:type sample: any
:rtype: float
"""
raise NotImplementedError()
def logprob(self, sample):
"""
Return the base 2 logarithm of the probability for a given sample.
:param sample: The sample whose probability
should be returned.
:type sample: any
:rtype: float
"""
# Default definition, in terms of prob()
p = self.prob(sample)
if p == 0:
# Use some approximation to infinity. What this does
# depends on your system's float implementation.
return _NINF
else:
return math.log(p, 2)
def max(self):
"""
Return the sample with the greatest probability. If two or
more samples have the same probability, return one of them;
which sample is returned is undefined.
:rtype: any
"""
raise NotImplementedError()
def samples(self):
"""
Return a list of all samples that have nonzero probabilities.
Use ``prob`` to find the probability of each sample.
:rtype: list
"""
raise NotImplementedError()
# cf self.SUM_TO_ONE
def discount(self):
"""
Return the ratio by which counts are discounted on average: c*/c
:rtype: float
"""
return 0.0
# Subclasses should define more efficient implementations of this,
# where possible.
def generate(self):
"""
Return a randomly selected sample from this probability distribution.
The probability of returning each sample ``samp`` is equal to
``self.prob(samp)``.
"""
p = random.random()
for sample in self.samples():
p -= self.prob(sample)
if p <= 0: return sample
# allow for some rounding error:
if p < .0001:
return sample
# we *should* never get here
if self.SUM_TO_ONE:
warnings.warn("Probability distribution %r sums to %r; generate()"
" is returning an arbitrary sample." % (self, 1-p))
return random.choice(list(self.samples()))
class UniformProbDist(ProbDistI):
"""
A probability distribution that assigns equal probability to each
sample in a given set; and a zero probability to all other
samples.
"""
def __init__(self, samples):
"""
Construct a new uniform probability distribution, that assigns
equal probability to each sample in ``samples``.
:param samples: The samples that should be given uniform
probability.
:type samples: list
:raise ValueError: If ``samples`` is empty.
"""
if len(samples) == 0:
raise ValueError('A Uniform probability distribution must '+
'have at least one sample.')
self._sampleset = set(samples)
self._prob = 1.0/len(self._sampleset)
self._samples = list(self._sampleset)
def prob(self, sample):
if sample in self._sampleset: return self._prob
else: return 0
def max(self): return self._samples[0]
def samples(self): return self._samples
def __repr__(self):
return '<UniformProbDist with %d samples>' % len(self._sampleset)
class DictionaryProbDist(ProbDistI):
"""
A probability distribution whose probabilities are directly
specified by a given dictionary. The given dictionary maps
samples to probabilities.
"""
def __init__(self, prob_dict=None, log=False, normalize=False):
"""
Construct a new probability distribution from the given
dictionary, which maps values to probabilities (or to log
probabilities, if ``log`` is true). If ``normalize`` is
true, then the probability values are scaled by a constant
factor such that they sum to 1.
If called without arguments, the resulting probability
distribution assigns zero probabiliy to all values.
"""
if prob_dict is None:
self._prob_dict = {}
else:
self._prob_dict = prob_dict.copy()
self._log = log
# Normalize the distribution, if requested.
if normalize:
if log:
value_sum = sum_logs(self._prob_dict.values())
if value_sum <= _NINF:
logp = math.log(1.0/len(prob_dict), 2)
for x in prob_dict:
self._prob_dict[x] = logp
else:
for (x, p) in self._prob_dict.items():
self._prob_dict[x] -= value_sum
else:
value_sum = sum(self._prob_dict.values())
if value_sum == 0:
p = 1.0/len(prob_dict)
for x in prob_dict:
self._prob_dict[x] = p
else:
norm_factor = 1.0/value_sum
for (x, p) in self._prob_dict.items():
self._prob_dict[x] *= norm_factor
def prob(self, sample):
if self._log:
if sample not in self._prob_dict: return 0
else: return 2**(self._prob_dict[sample])
else:
return self._prob_dict.get(sample, 0)
def logprob(self, sample):
if self._log:
return self._prob_dict.get(sample, _NINF)
else:
if sample not in self._prob_dict: return _NINF
elif self._prob_dict[sample] == 0: return _NINF
else: return math.log(self._prob_dict[sample], 2)
def max(self):
if not hasattr(self, '_max'):
self._max = max((p,v) for (v,p) in self._prob_dict.items())[1]
return self._max
def samples(self):
return self._prob_dict.keys()
def __repr__(self):
return '<ProbDist with %d samples>' % len(self._prob_dict)
class MLEProbDist(ProbDistI):
"""
The maximum likelihood estimate for the probability distribution
of the experiment used to generate a frequency distribution. The
"maximum likelihood estimate" approximates the probability of
each sample as the frequency of that sample in the frequency
distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the maximum likelihood estimate to create a probability
distribution for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
"""
self._freqdist = freqdist
def freqdist(self):
"""
Return the frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._freqdist
def prob(self, sample):
return self._freqdist.freq(sample)
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def __repr__(self):
"""
:rtype: str
:return: A string representation of this ``ProbDist``.
"""
return '<MLEProbDist based on %d samples>' % self._freqdist.N()
class LidstoneProbDist(ProbDistI):
"""
The Lidstone estimate for the probability distribution of the
experiment used to generate a frequency distribution. The
"Lidstone estimate" is paramaterized by a real number *gamma*,
which typically ranges from 0 to 1. The Lidstone estimate
approximates the probability of a sample with count *c* from an
experiment with *N* outcomes and *B* bins as
``c+gamma)/(N+B*gamma)``. This is equivalant to adding
*gamma* to the count for each bin, and taking the maximum
likelihood estimate of the resulting frequency distribution.
"""
SUM_TO_ONE = False
def __init__(self, freqdist, gamma, bins=None):
"""
Use the Lidstone estimate to create a probability distribution
for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type gamma: float
:param gamma: A real number used to paramaterize the
estimate. The Lidstone estimate is equivalant to adding
*gamma* to the count for each bin, and taking the
maximum likelihood estimate of the resulting frequency
distribution.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
if (bins == 0) or (bins is None and freqdist.N() == 0):
name = self.__class__.__name__[:-8]
raise ValueError('A %s probability distribution ' % name +
'must have at least one bin.')
if (bins is not None) and (bins < freqdist.B()):
name = self.__class__.__name__[:-8]
raise ValueError('\nThe number of bins in a %s distribution ' % name +
'(%d) must be greater than or equal to\n' % bins +
'the number of bins in the FreqDist used ' +
'to create it (%d).' % freqdist.N())
self._freqdist = freqdist
self._gamma = float(gamma)
self._N = self._freqdist.N()
if bins is None: bins = freqdist.B()
self._bins = bins
self._divisor = self._N + bins * gamma
if self._divisor == 0.0:
# In extreme cases we force the probability to be 0,
# which it will be, since the count will be 0:
self._gamma = 0
self._divisor = 1
def freqdist(self):
"""
Return the frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._freqdist
def prob(self, sample):
c = self._freqdist[sample]
return (c + self._gamma) / self._divisor
def max(self):
# For Lidstone distributions, probability is monotonic with
# frequency, so the most probable sample is the one that
# occurs most frequently.
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def discount(self):
gb = self._gamma * self._bins
return gb / (self._N + gb)
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<LidstoneProbDist based on %d samples>' % self._freqdist.N()
class LaplaceProbDist(LidstoneProbDist):
"""
The Laplace estimate for the probability distribution of the
experiment used to generate a frequency distribution. The
"Laplace estimate" approximates the probability of a sample with
count *c* from an experiment with *N* outcomes and *B* bins as
*(c+1)/(N+B)*. This is equivalant to adding one to the count for
each bin, and taking the maximum likelihood estimate of the
resulting frequency distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the Laplace estimate to create a probability distribution
for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
LidstoneProbDist.__init__(self, freqdist, 1, bins)
def __repr__(self):
"""
:rtype: str
:return: A string representation of this ``ProbDist``.
"""
return '<LaplaceProbDist based on %d samples>' % self._freqdist.N()
class ELEProbDist(LidstoneProbDist):
"""
The expected likelihood estimate for the probability distribution
of the experiment used to generate a frequency distribution. The
"expected likelihood estimate" approximates the probability of a
sample with count *c* from an experiment with *N* outcomes and
*B* bins as *(c+0.5)/(N+B/2)*. This is equivalant to adding 0.5
to the count for each bin, and taking the maximum likelihood
estimate of the resulting frequency distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the expected likelihood estimate to create a probability
distribution for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
LidstoneProbDist.__init__(self, freqdist, 0.5, bins)
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<ELEProbDist based on %d samples>' % self._freqdist.N()
class HeldoutProbDist(ProbDistI):
"""
The heldout estimate for the probability distribution of the
experiment used to generate two frequency distributions. These
two frequency distributions are called the "heldout frequency
distribution" and the "base frequency distribution." The
"heldout estimate" uses uses the "heldout frequency
distribution" to predict the probability of each sample, given its
frequency in the "base frequency distribution".
In particular, the heldout estimate approximates the probability
for a sample that occurs *r* times in the base distribution as
the average frequency in the heldout distribution of all samples
that occur *r* times in the base distribution.
This average frequency is *Tr[r]/(Nr[r].N)*, where:
- *Tr[r]* is the total count in the heldout distribution for
all samples that occur *r* times in the base distribution.
- *Nr[r]* is the number of samples that occur *r* times in
the base distribution.
- *N* is the number of outcomes recorded by the heldout
frequency distribution.
In order to increase the efficiency of the ``prob`` member
function, *Tr[r]/(Nr[r].N)* is precomputed for each value of *r*
when the ``HeldoutProbDist`` is created.
:type _estimate: list(float)
:ivar _estimate: A list mapping from *r*, the number of
times that a sample occurs in the base distribution, to the
probability estimate for that sample. ``_estimate[r]`` is
calculated by finding the average frequency in the heldout
distribution of all samples that occur *r* times in the base
distribution. In particular, ``_estimate[r]`` =
*Tr[r]/(Nr[r].N)*.
:type _max_r: int
:ivar _max_r: The maximum number of times that any sample occurs
in the base distribution. ``_max_r`` is used to decide how
large ``_estimate`` must be.
"""
SUM_TO_ONE = False
def __init__(self, base_fdist, heldout_fdist, bins=None):
"""
Use the heldout estimate to create a probability distribution
for the experiment used to generate ``base_fdist`` and
``heldout_fdist``.
:type base_fdist: FreqDist
:param base_fdist: The base frequency distribution.
:type heldout_fdist: FreqDist
:param heldout_fdist: The heldout frequency distribution.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
self._base_fdist = base_fdist
self._heldout_fdist = heldout_fdist
# The max number of times any sample occurs in base_fdist.
self._max_r = base_fdist[base_fdist.max()]
# Calculate Tr, Nr, and N.
Tr = self._calculate_Tr()
Nr = [base_fdist.Nr(r, bins) for r in range(self._max_r+1)]
N = heldout_fdist.N()
# Use Tr, Nr, and N to compute the probability estimate for
# each value of r.
self._estimate = self._calculate_estimate(Tr, Nr, N)
def _calculate_Tr(self):
"""
Return the list *Tr*, where *Tr[r]* is the total count in
``heldout_fdist`` for all samples that occur *r*
times in ``base_fdist``.
:rtype: list(float)
"""
Tr = [0.0] * (self._max_r+1)
for sample in self._heldout_fdist:
r = self._base_fdist[sample]
Tr[r] += self._heldout_fdist[sample]
return Tr
def _calculate_estimate(self, Tr, Nr, N):
"""
Return the list *estimate*, where *estimate[r]* is the probability
estimate for any sample that occurs *r* times in the base frequency
distribution. In particular, *estimate[r]* is *Tr[r]/(N[r].N)*.
In the special case that *N[r]=0*, *estimate[r]* will never be used;
so we define *estimate[r]=None* for those cases.
:rtype: list(float)
:type Tr: list(float)
:param Tr: the list *Tr*, where *Tr[r]* is the total count in
the heldout distribution for all samples that occur *r*
times in base distribution.
:type Nr: list(float)
:param Nr: The list *Nr*, where *Nr[r]* is the number of
samples that occur *r* times in the base distribution.
:type N: int
:param N: The total number of outcomes recorded by the heldout
frequency distribution.
"""
estimate = []
for r in range(self._max_r+1):
if Nr[r] == 0: estimate.append(None)
else: estimate.append(Tr[r]/(Nr[r]*N))
return estimate
def base_fdist(self):
"""
Return the base frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._base_fdist
def heldout_fdist(self):
"""
Return the heldout frequency distribution that this
probability distribution is based on.
:rtype: FreqDist
"""
return self._heldout_fdist
def samples(self):
return self._base_fdist.keys()
def prob(self, sample):
# Use our precomputed probability estimate.
r = self._base_fdist[sample]
return self._estimate[r]
def max(self):
# Note: the Heldout estimation is *not* necessarily monotonic;
# so this implementation is currently broken. However, it
# should give the right answer *most* of the time. :)
return self._base_fdist.max()
def discount(self):
raise NotImplementedError()
def __repr__(self):
"""
:rtype: str
:return: A string representation of this ``ProbDist``.
"""
s = '<HeldoutProbDist: %d base samples; %d heldout samples>'
return s % (self._base_fdist.N(), self._heldout_fdist.N())
class CrossValidationProbDist(ProbDistI):
"""
The cross-validation estimate for the probability distribution of
the experiment used to generate a set of frequency distribution.
The "cross-validation estimate" for the probability of a sample
is found by averaging the held-out estimates for the sample in
each pair of frequency distributions.
"""
SUM_TO_ONE = False
def __init__(self, freqdists, bins):
"""
Use the cross-validation estimate to create a probability
distribution for the experiment used to generate
``freqdists``.
:type freqdists: list(FreqDist)
:param freqdists: A list of the frequency distributions
generated by the experiment.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
self._freqdists = freqdists
# Create a heldout probability distribution for each pair of
# frequency distributions in freqdists.
self._heldout_probdists = []
for fdist1 in freqdists:
for fdist2 in freqdists:
if fdist1 is not fdist2:
probdist = HeldoutProbDist(fdist1, fdist2, bins)
self._heldout_probdists.append(probdist)
def freqdists(self):
"""
Return the list of frequency distributions that this ``ProbDist`` is based on.
:rtype: list(FreqDist)
"""
return self._freqdists
def samples(self):
# [xx] nb: this is not too efficient
return set(sum([fd.keys() for fd in self._freqdists], []))
def prob(self, sample):
# Find the average probability estimate returned by each
# heldout distribution.
prob = 0.0
for heldout_probdist in self._heldout_probdists:
prob += heldout_probdist.prob(sample)
return prob/len(self._heldout_probdists)
def discount(self):
raise NotImplementedError()
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<CrossValidationProbDist: %d-way>' % len(self._freqdists)
class WittenBellProbDist(ProbDistI):
"""
The Witten-Bell estimate of a probability distribution. This distribution
allocates uniform probability mass to as yet unseen events by using the
number of events that have only been seen once. The probability mass
reserved for unseen events is equal to *T / (N + T)*
where *T* is the number of observed event types and *N* is the total
number of observed events. This equates to the maximum likelihood estimate
of a new type event occurring. The remaining probability mass is discounted
such that all probability estimates sum to one, yielding:
- *p = T / Z (N + T)*, if count = 0
- *p = c / (N + T)*, otherwise
"""
def __init__(self, freqdist, bins=None):
"""
Creates a distribution of Witten-Bell probability estimates. This
distribution allocates uniform probability mass to as yet unseen
events by using the number of events that have only been seen once. The
probability mass reserved for unseen events is equal to *T / (N + T)*
where *T* is the number of observed event types and *N* is the total
number of observed events. This equates to the maximum likelihood
estimate of a new type event occurring. The remaining probability mass
is discounted such that all probability estimates sum to one,
yielding:
- *p = T / Z (N + T)*, if count = 0
- *p = c / (N + T)*, otherwise
The parameters *T* and *N* are taken from the ``freqdist`` parameter
(the ``B()`` and ``N()`` values). The normalising factor *Z* is
calculated using these values along with the ``bins`` parameter.
:param freqdist: The frequency counts upon which to base the
estimation.
:type freqdist: FreqDist
:param bins: The number of possible event types. This must be at least
as large as the number of bins in the ``freqdist``. If None, then
it's assumed to be equal to that of the ``freqdist``
:type bins: int
"""
assert bins is None or bins >= freqdist.B(),\
'Bins parameter must not be less than freqdist.B()'
if bins is None:
bins = freqdist.B()
self._freqdist = freqdist
self._T = self._freqdist.B()
self._Z = bins - self._freqdist.B()
self._N = self._freqdist.N()
# self._P0 is P(0), precalculated for efficiency:
if self._N==0:
# if freqdist is empty, we approximate P(0) by a UniformProbDist:
self._P0 = 1.0 / self._Z
else:
self._P0 = self._T / float(self._Z * (self._N + self._T))
def prob(self, sample):
# inherit docs from ProbDistI
c = self._freqdist[sample]
if c == 0:
return self._P0
else:
return c / float(self._N + self._T)
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def freqdist(self):
return self._freqdist
def discount(self):
raise NotImplementedError()
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<WittenBellProbDist based on %d samples>' % self._freqdist.N()
##//////////////////////////////////////////////////////
## Good-Turing Probablity Distributions
##//////////////////////////////////////////////////////
# Good-Turing frequency estimation was contributed by Alan Turing and
# his statistical assistant I.J. Good, during their collaboration in
# the WWII. It is a statistical technique for predicting the
# probability of occurrence of objects belonging to an unknown number
# of species, given past observations of such objects and their
# species. (In drawing balls from an urn, the 'objects' would be balls
# and the 'species' would be the distinct colors of the balls (finite
# but unknown in number).
#
# The situation frequency zero is quite common in the original
# Good-Turing estimation. Bill Gale and Geoffrey Sampson present a
# simple and effective approach, Simple Good-Turing. As a smoothing
# curve they simply use a power curve:
#
# Nr = a*r^b (with b < -1 to give the appropriate hyperbolic
# relationsihp)
#
# They estimate a and b by simple linear regression technique on the
# logarithmic form of the equation:
#
# log Nr = a + b*log(r)
#
# However, they suggest that such a simple curve is probably only
# appropriate for high values of r. For low values of r, they use the
# measured Nr directly. (see M&S, p.213)
#
# Gale and Sampson propose to use r while the difference between r and
# r* is 1.96 greather than the standar deviation, and switch to r* if
# it is less or equal:
#
# |r - r*| > 1.96 * sqrt((r + 1)^2 (Nr+1 / Nr^2) (1 + Nr+1 / Nr))
#
# The 1.96 coefficient correspond to a 0.05 significance criterion,
# some implementations can use a coefficient of 1.65 for a 0.1
# significance criterion.
#
class GoodTuringProbDist(ProbDistI):
"""
The Good-Turing estimate of a probability distribution. This method
calculates the probability mass to assign to events with zero or low
counts based on the number of events with higher counts. It does so by
using the smoothed count *c\**:
- *c\* = (c + 1) N(c + 1) / N(c)* for c >= 1
- *things with frequency zero in training* = N(1) for c == 0
where *c* is the original count, *N(i)* is the number of event types
observed with count *i*. We can think the count of unseen as the count
of frequency one (see Jurafsky & Martin 2nd Edition, p101).
"""
def __init__(self, freqdist, bins=None):
"""
:param freqdist: The frequency counts upon which to base the
estimation.
:type freqdist: FreqDist
:param bins: The number of possible event types. This must be at least
as large as the number of bins in the ``freqdist``. If None, then
it's assumed to be equal to that of the ``freqdist``
:type bins: int
"""
assert bins is None or bins >= freqdist.B(),\
'Bins parameter must not be less than freqdist.B()'
if bins is None:
bins = freqdist.B()
self._freqdist = freqdist
self._bins = bins
def prob(self, sample):
count = self._freqdist[sample]
# unseen sample's frequency (count zero) uses frequency one's
if count == 0 and self._freqdist.N() != 0:
p0 = 1.0 * self._freqdist.Nr(1) / self._freqdist.N()
if self._bins == self._freqdist.B():
p0 = 0.0
else:
p0 = p0 / (1.0 * self._bins - self._freqdist.B())
nc = self._freqdist.Nr(count)
ncn = self._freqdist.Nr(count + 1)
# avoid divide-by-zero errors for sparse datasets
if nc == 0 or self._freqdist.N() == 0:
return 0
return 1.0 * (count + 1) * ncn / (nc * self._freqdist.N())
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def discount(self):
"""
:return: The probability mass transferred from the
seen samples to the unseen samples.
:rtype: float
"""
return 1.0 * self._freqdist.Nr(1) / self._freqdist.N()
def freqdist(self):
return self._freqdist
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<GoodTuringProbDist based on %d samples>' % self._freqdist.N()
##//////////////////////////////////////////////////////
## Simple Good-Turing Probablity Distributions
##//////////////////////////////////////////////////////
class SimpleGoodTuringProbDist(ProbDistI):
"""
SimpleGoodTuring ProbDist approximates from frequency to freqency of
frequency into a linear line under log space by linear regression.
Details of Simple Good-Turing algorithm can be found in:
- Good Turing smoothing without tears" (Gale & Sampson 1995),
Journal of Quantitative Linguistics, vol. 2 pp. 217-237.
- "Speech and Language Processing (Jurafsky & Martin),
2nd Edition, Chapter 4.5 p103 (log(Nc) = a + b*log(c))
- http://www.grsampson.net/RGoodTur.html
Given a set of pair (xi, yi), where the xi denotes the freqency and
yi denotes the freqency of freqency, we want to minimize their
square variation. E(x) and E(y) represent the mean of xi and yi.
- slope: b = sigma ((xi-E(x)(yi-E(y))) / sigma ((xi-E(x))(xi-E(x)))
- intercept: a = E(y) - b.E(x)
"""
def __init__(self, freqdist, bins=None):
"""
:param freqdist: The frequency counts upon which to base the
estimation.
:type freqdist: FreqDist
:param bins: The number of possible event types. This must be
larger than the number of bins in the ``freqdist``. If None,
then it's assumed to be equal to ``freqdist``.B() + 1
:type bins: int
"""
assert bins is None or bins > freqdist.B(),\
'Bins parameter must not be less than freqdist.B() + 1'
if bins is None:
bins = freqdist.B() + 1
self._freqdist = freqdist
self._bins = bins
r, nr = self._r_Nr()
self.find_best_fit(r, nr)
self._switch(r, nr)
self._renormalize(r, nr)
def _r_Nr(self):
"""
Split the frequency distribution in two list (r, Nr), where Nr(r) > 0
"""
r, nr = [], []
b, i = 0, 0
while b != self._freqdist.B():
nr_i = self._freqdist.Nr(i)
if nr_i > 0:
b += nr_i
r.append(i)
nr.append(nr_i)
i += 1
return (r, nr)
def find_best_fit(self, r, nr):
"""
Use simple linear regression to tune parameters self._slope and
self._intercept in the log-log space based on count and Nr(count)
(Work in log space to avoid floating point underflow.)
"""
# For higher sample frequencies the data points becomes horizontal
# along line Nr=1. To create a more evident linear model in log-log
# space, we average positive Nr values with the surrounding zero
# values. (Church and Gale, 1991)
if not r or not nr:
# Empty r or nr?
return
zr = []
for j in range(len(r)):
if j > 0:
i = r[j-1]
else:
i = 0
if j != len(r) - 1:
k = r[j+1]
else:
k = 2 * r[j] - i
zr_ = 2.0 * nr[j] / (k - i)
zr.append(zr_)
log_r = [math.log(i) for i in r]
log_zr = [math.log(i) for i in zr]
xy_cov = x_var = 0.0
x_mean = 1.0 * sum(log_r) / len(log_r)
y_mean = 1.0 * sum(log_zr) / len(log_zr)
for (x, y) in zip(log_r, log_zr):
xy_cov += (x - x_mean) * (y - y_mean)
x_var += (x - x_mean)**2
if x_var != 0:
self._slope = xy_cov / x_var
else:
self._slope = 0.0
self._intercept = y_mean - self._slope * x_mean
def _switch(self, r, nr):
"""
Calculate the r frontier where we must switch from Nr to Sr
when estimating E[Nr].
"""
for i, r_ in enumerate(r):
if len(r) == i + 1 or r[i+1] != r_ + 1:
# We are at the end of r, or there is a gap in r
self._switch_at = r_
break
Sr = self.smoothedNr
smooth_r_star = (r_ + 1) * Sr(r_+1) / Sr(r_)
unsmooth_r_star = 1.0 * (r_ + 1) * nr[i+1] / nr[i]
std = math.sqrt(self._variance(r_, nr[i], nr[i+1]))
if abs(unsmooth_r_star-smooth_r_star) <= 1.96 * std:
self._switch_at = r_
break
def _variance(self, r, nr, nr_1):
r = float(r)
nr = float(nr)
nr_1 = float(nr_1)
return (r + 1.0)**2 * (nr_1 / nr**2) * (1.0 + nr_1 / nr)
def _renormalize(self, r, nr):
"""
It is necessary to renormalize all the probability estimates to
ensure a proper probability distribution results. This can be done
by keeping the estimate of the probability mass for unseen items as
N(1)/N and renormalizing all the estimates for previously seen items
(as Gale and Sampson (1995) propose). (See M&S P.213, 1999)
"""
prob_cov = 0.0
for r_, nr_ in zip(r, nr):
prob_cov += nr_ * self._prob_measure(r_)
if prob_cov:
self._renormal = (1 - self._prob_measure(0)) / prob_cov
def smoothedNr(self, r):
"""
Return the number of samples with count r.
:param r: The amount of freqency.
:type r: int
:rtype: float
"""
# Nr = a*r^b (with b < -1 to give the appropriate hyperbolic
# relationship)
# Estimate a and b by simple linear regression technique on
# the logarithmic form of the equation: log Nr = a + b*log(r)
return math.exp(self._intercept + self._slope * math.log(r))
def prob(self, sample):
"""
Return the sample's probability.
:param sample: sample of the event
:type sample: str
:rtype: float
"""
count = self._freqdist[sample]
p = self._prob_measure(count)
if count == 0:
if self._bins == self._freqdist.B():
p = 0.0
else:
p = p / (1.0 * self._bins - self._freqdist.B())
else:
p = p * self._renormal
return p
def _prob_measure(self, count):
if count == 0 and self._freqdist.N() == 0 :
return 1.0
elif count == 0 and self._freqdist.N() != 0:
return 1.0 * self._freqdist.Nr(1) / self._freqdist.N()
if self._switch_at > count:
Er_1 = 1.0 * self._freqdist.Nr(count+1)
Er = 1.0 * self._freqdist.Nr(count)
else:
Er_1 = self.smoothedNr(count+1)
Er = self.smoothedNr(count)
r_star = (count + 1) * Er_1 / Er
return r_star / self._freqdist.N()
def check(self):
prob_sum = 0.0
for i in range(0, len(self._Nr)):
prob_sum += self._Nr[i] * self._prob_measure(i) / self._renormal
print "Probability Sum:", prob_sum
#assert prob_sum != 1.0, "probability sum should be one!"
def discount(self):
"""
This function returns the total mass of probability transfers from the
seen samples to the unseen samples.
"""
return 1.0 * self.smoothedNr(1) / self._freqdist.N()
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def freqdist(self):
return self._freqdist
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<SimpleGoodTuringProbDist based on %d samples>'\
% self._freqdist.N()
class MutableProbDist(ProbDistI):
"""
An mutable probdist where the probabilities may be easily modified. This
simply copies an existing probdist, storing the probability values in a
mutable dictionary and providing an update method.
"""
def __init__(self, prob_dist, samples, store_logs=True):
"""
Creates the mutable probdist based on the given prob_dist and using
the list of samples given. These values are stored as log
probabilities if the store_logs flag is set.
:param prob_dist: the distribution from which to garner the
probabilities
:type prob_dist: ProbDist
:param samples: the complete set of samples
:type samples: sequence of any
:param store_logs: whether to store the probabilities as logarithms
:type store_logs: bool
"""
try:
import numpy
except ImportError:
print "Error: Please install numpy; for instructions see http://www.nltk.org/"
exit()
self._samples = samples
self._sample_dict = dict((samples[i], i) for i in range(len(samples)))
self._data = numpy.zeros(len(samples), numpy.float64)
for i in range(len(samples)):
if store_logs:
self._data[i] = prob_dist.logprob(samples[i])
else:
self._data[i] = prob_dist.prob(samples[i])
self._logs = store_logs
def samples(self):
# inherit documentation
return self._samples
def prob(self, sample):
# inherit documentation
i = self._sample_dict.get(sample)
if i is not None:
if self._logs:
return 2**(self._data[i])
else:
return self._data[i]
else:
return 0.0
def logprob(self, sample):
# inherit documentation
i = self._sample_dict.get(sample)
if i is not None:
if self._logs:
return self._data[i]
else:
return math.log(self._data[i], 2)
else:
return float('-inf')
def update(self, sample, prob, log=True):
"""
Update the probability for the given sample. This may cause the object
to stop being the valid probability distribution - the user must
ensure that they update the sample probabilities such that all samples
have probabilities between 0 and 1 and that all probabilities sum to
one.
:param sample: the sample for which to update the probability
:type sample: any
:param prob: the new probability
:type prob: float
:param log: is the probability already logged
:type log: bool
"""
i = self._sample_dict.get(sample)
assert i is not None
if self._logs:
if log: self._data[i] = prob
else: self._data[i] = math.log(prob, 2)
else:
if log: self._data[i] = 2**(prob)
else: self._data[i] = prob
##//////////////////////////////////////////////////////
## Probability Distribution Operations
##//////////////////////////////////////////////////////
def log_likelihood(test_pdist, actual_pdist):
if (not isinstance(test_pdist, ProbDistI) or
not isinstance(actual_pdist, ProbDistI)):
raise ValueError('expected a ProbDist.')
# Is this right?
return sum(actual_pdist.prob(s) * math.log(test_pdist.prob(s), 2)
for s in actual_pdist)
def entropy(pdist):
probs = [pdist.prob(s) for s in pdist.samples()]
return -sum([p * math.log(p,2) for p in probs])
##//////////////////////////////////////////////////////
## Conditional Distributions
##//////////////////////////////////////////////////////
class ConditionalFreqDist(defaultdict):
"""
A collection of frequency distributions for a single experiment
run under different conditions. Conditional frequency
distributions are used to record the number of times each sample
occurred, given the condition under which the experiment was run.
For example, a conditional frequency distribution could be used to
record the frequency of each word (type) in a document, given its
length. Formally, a conditional frequency distribution can be
defined as a function that maps from each condition to the
FreqDist for the experiment under that condition.
Conditional frequency distributions are typically constructed by
repeatedly running an experiment under a variety of conditions,
and incrementing the sample outcome counts for the appropriate
conditions. For example, the following code will produce a
conditional frequency distribution that encodes how often each
word type occurs, given the length of that word type:
>>> from nltk.probability import ConditionalFreqDist
>>> from nltk.tokenize import word_tokenize
>>> sent = "the the the dog dog some other words that we do not care about"
>>> cfdist = ConditionalFreqDist()
>>> for word in word_tokenize(sent):
... condition = len(word)
... cfdist[condition].inc(word)
An equivalent way to do this is with the initializer:
>>> cfdist = ConditionalFreqDist((len(word), word) for word in word_tokenize(sent))
The frequency distribution for each condition is accessed using
the indexing operator:
>>> cfdist[3]
<FreqDist with 6 outcomes>
>>> cfdist[3].freq('the')
0.5
>>> cfdist[3]['dog']
2
When the indexing operator is used to access the frequency
distribution for a condition that has not been accessed before,
``ConditionalFreqDist`` creates a new empty FreqDist for that
condition.
"""
def __init__(self, cond_samples=None):
"""
Construct a new empty conditional frequency distribution. In
particular, the count for every sample, under every condition,
is zero.
:param cond_samples: The samples to initialize the conditional
frequency distribution with
:type cond_samples: Sequence of (condition, sample) tuples
"""
defaultdict.__init__(self, FreqDist)
if cond_samples:
for (cond, sample) in cond_samples:
self[cond].inc(sample)
def conditions(self):
"""
Return a list of the conditions that have been accessed for
this ``ConditionalFreqDist``. Use the indexing operator to
access the frequency distribution for a given condition.
Note that the frequency distributions for some conditions
may contain zero sample outcomes.
:rtype: list
"""
return sorted(self.keys())
def N(self):
"""
Return the total number of sample outcomes that have been
recorded by this ``ConditionalFreqDist``.
:rtype: int
"""
return sum(fdist.N() for fdist in self.itervalues())
def plot(self, *args, **kwargs):
"""
Plot the given samples from the conditional frequency distribution.
For a cumulative plot, specify cumulative=True.
(Requires Matplotlib to be installed.)
:param samples: The samples to plot
:type samples: list
:param title: The title for the graph
:type title: str
:param conditions: The conditions to plot (default is all)
:type conditions: list
"""
try:
import pylab
except ImportError:
raise ValueError('The plot function requires the matplotlib package (aka pylab).'
'See http://matplotlib.sourceforge.net/')
cumulative = _get_kwarg(kwargs, 'cumulative', False)
conditions = _get_kwarg(kwargs, 'conditions', self.conditions())
title = _get_kwarg(kwargs, 'title', '')
samples = _get_kwarg(kwargs, 'samples',
sorted(set(v for c in conditions for v in self[c]))) # this computation could be wasted
if not "linewidth" in kwargs:
kwargs["linewidth"] = 2
for condition in conditions:
if cumulative:
freqs = list(self[condition]._cumulative_frequencies(samples))
ylabel = "Cumulative Counts"
legend_loc = 'lower right'
else:
freqs = [self[condition][sample] for sample in samples]
ylabel = "Counts"
legend_loc = 'upper right'
# percents = [f * 100 for f in freqs] only in ConditionalProbDist?
kwargs['label'] = str(condition)
pylab.plot(freqs, *args, **kwargs)
pylab.legend(loc=legend_loc)
pylab.grid(True, color="silver")
pylab.xticks(range(len(samples)), [unicode(s) for s in samples], rotation=90)
if title:
pylab.title(title)
pylab.xlabel("Samples")
pylab.ylabel(ylabel)
pylab.show()
def tabulate(self, *args, **kwargs):
"""
Tabulate the given samples from the conditional frequency distribution.
:param samples: The samples to plot
:type samples: list
:param title: The title for the graph
:type title: str
:param conditions: The conditions to plot (default is all)
:type conditions: list
"""
cumulative = _get_kwarg(kwargs, 'cumulative', False)
conditions = _get_kwarg(kwargs, 'conditions', self.conditions())
samples = _get_kwarg(kwargs, 'samples',
sorted(set(v for c in conditions for v in self[c]))) # this computation could be wasted
condition_size = max(len(str(c)) for c in conditions)
print ' ' * condition_size,
for s in samples:
print "%4s" % str(s),
print
for c in conditions:
print "%*s" % (condition_size, str(c)),
if cumulative:
freqs = list(self[c]._cumulative_frequencies(samples))
else:
freqs = [self[c][sample] for sample in samples]
for f in freqs:
print "%4d" % f,
print
def __le__(self, other):
if not isinstance(other, ConditionalFreqDist): return False
return set(self.conditions()).issubset(other.conditions()) \
and all(self[c] <= other[c] for c in self.conditions())
def __lt__(self, other):
if not isinstance(other, ConditionalFreqDist): return False
return self <= other and self != other
def __ge__(self, other):
if not isinstance(other, ConditionalFreqDist): return False
return other <= self
def __gt__(self, other):
if not isinstance(other, ConditionalFreqDist): return False
return other < self
def __repr__(self):
"""
Return a string representation of this ``ConditionalFreqDist``.
:rtype: str
"""
return '<ConditionalFreqDist with %d conditions>' % len(self)
class ConditionalProbDistI(defaultdict):
"""
A collection of probability distributions for a single experiment
run under different conditions. Conditional probability
distributions are used to estimate the likelihood of each sample,
given the condition under which the experiment was run. For
example, a conditional probability distribution could be used to
estimate the probability of each word type in a document, given
the length of the word type. Formally, a conditional probability
distribution can be defined as a function that maps from each
condition to the ``ProbDist`` for the experiment under that
condition.
"""
def __init__(self):
raise NotImplementedError("Interfaces can't be instantiated")
def conditions(self):
"""
Return a list of the conditions that are represented by
this ``ConditionalProbDist``. Use the indexing operator to
access the probability distribution for a given condition.
:rtype: list
"""
return self.keys()
def __repr__(self):
"""
Return a string representation of this ``ConditionalProbDist``.
:rtype: str
"""
return '<%s with %d conditions>' % (type(self).__name__, len(self))
class ConditionalProbDist(ConditionalProbDistI):
"""
A conditional probability distribution modelling the experiments
that were used to generate a conditional frequency distribution.
A ConditionalProbDist is constructed from a
``ConditionalFreqDist`` and a ``ProbDist`` factory:
- The ``ConditionalFreqDist`` specifies the frequency
distribution for each condition.
- The ``ProbDist`` factory is a function that takes a
condition's frequency distribution, and returns its
probability distribution. A ``ProbDist`` class's name (such as
``MLEProbDist`` or ``HeldoutProbDist``) can be used to specify
that class's constructor.
The first argument to the ``ProbDist`` factory is the frequency
distribution that it should model; and the remaining arguments are
specified by the ``factory_args`` parameter to the
``ConditionalProbDist`` constructor. For example, the following
code constructs a ``ConditionalProbDist``, where the probability
distribution for each condition is an ``ELEProbDist`` with 10 bins:
>>> from nltk.probability import ConditionalProbDist, ELEProbDist
>>> cpdist = ConditionalProbDist(cfdist, ELEProbDist, 10)
>>> print cpdist['run'].max()
'NN'
>>> print cpdist['run'].prob('NN')
0.0813
"""
def __init__(self, cfdist, probdist_factory,
*factory_args, **factory_kw_args):
"""
Construct a new conditional probability distribution, based on
the given conditional frequency distribution and ``ProbDist``
factory.
:type cfdist: ConditionalFreqDist
:param cfdist: The ``ConditionalFreqDist`` specifying the
frequency distribution for each condition.
:type probdist_factory: class or function
:param probdist_factory: The function or class that maps
a condition's frequency distribution to its probability
distribution. The function is called with the frequency
distribution as its first argument,
``factory_args`` as its remaining arguments, and
``factory_kw_args`` as keyword arguments.
:type factory_args: (any)
:param factory_args: Extra arguments for ``probdist_factory``.
These arguments are usually used to specify extra
properties for the probability distributions of individual
conditions, such as the number of bins they contain.
:type factory_kw_args: (any)
:param factory_kw_args: Extra keyword arguments for ``probdist_factory``.
"""
# self._probdist_factory = probdist_factory
# self._cfdist = cfdist
# self._factory_args = factory_args
# self._factory_kw_args = factory_kw_args
factory = lambda: probdist_factory(FreqDist(),
*factory_args, **factory_kw_args)
defaultdict.__init__(self, factory)
for condition in cfdist:
self[condition] = probdist_factory(cfdist[condition],
*factory_args, **factory_kw_args)
class DictionaryConditionalProbDist(ConditionalProbDistI):
"""
An alternative ConditionalProbDist that simply wraps a dictionary of
ProbDists rather than creating these from FreqDists.
"""
def __init__(self, probdist_dict):
"""
:param probdist_dict: a dictionary containing the probdists indexed
by the conditions
:type probdist_dict: dict any -> probdist
"""
defaultdict.__init__(self, DictionaryProbDist)
self.update(probdist_dict)
##//////////////////////////////////////////////////////
## Adding in log-space.
##//////////////////////////////////////////////////////
# If the difference is bigger than this, then just take the bigger one:
_ADD_LOGS_MAX_DIFF = math.log(1e-30, 2)
def add_logs(logx, logy):
"""
Given two numbers ``logx`` = *log(x)* and ``logy`` = *log(y)*, return
*log(x+y)*. Conceptually, this is the same as returning
``log(2**(logx)+2**(logy))``, but the actual implementation
avoids overflow errors that could result from direct computation.
"""
if (logx < logy + _ADD_LOGS_MAX_DIFF):
return logy
if (logy < logx + _ADD_LOGS_MAX_DIFF):
return logx
base = min(logx, logy)
return base + math.log(2**(logx-base) + 2**(logy-base), 2)
def sum_logs(logs):
if len(logs) == 0:
# Use some approximation to infinity. What this does
# depends on your system's float implementation.
return _NINF
else:
return reduce(add_logs, logs[1:], logs[0])
##//////////////////////////////////////////////////////
## Probabilistic Mix-in
##//////////////////////////////////////////////////////
class ProbabilisticMixIn(object):
"""
A mix-in class to associate probabilities with other classes
(trees, rules, etc.). To use the ``ProbabilisticMixIn`` class,
define a new class that derives from an existing class and from
ProbabilisticMixIn. You will need to define a new constructor for
the new class, which explicitly calls the constructors of both its
parent classes. For example:
>>> from nltk.probability import ProbabilisticMixIn
>>> class A:
... def __init__(self, x, y): self.data = (x,y)
...
>>> class ProbabilisticA(A, ProbabilisticMixIn):
... def __init__(self, x, y, **prob_kwarg):
... A.__init__(self, x, y)
... ProbabilisticMixIn.__init__(self, **prob_kwarg)
See the documentation for the ProbabilisticMixIn
``constructor<__init__>`` for information about the arguments it
expects.
You should generally also redefine the string representation
methods, the comparison methods, and the hashing method.
"""
def __init__(self, **kwargs):
"""
Initialize this object's probability. This initializer should
be called by subclass constructors. ``prob`` should generally be
the first argument for those constructors.
:param prob: The probability associated with the object.
:type prob: float
:param logprob: The log of the probability associated with
the object.
:type logprob: float
"""
if 'prob' in kwargs:
if 'logprob' in kwargs:
raise TypeError('Must specify either prob or logprob '
'(not both)')
else:
ProbabilisticMixIn.set_prob(self, kwargs['prob'])
elif 'logprob' in kwargs:
ProbabilisticMixIn.set_logprob(self, kwargs['logprob'])
else:
self.__prob = self.__logprob = None
def set_prob(self, prob):
"""
Set the probability associated with this object to ``prob``.
:param prob: The new probability
:type prob: float
"""
self.__prob = prob
self.__logprob = None
def set_logprob(self, logprob):
"""
Set the log probability associated with this object to
``logprob``. I.e., set the probability associated with this
object to ``2**(logprob)``.
:param logprob: The new log probability
:type logprob: float
"""
self.__logprob = logprob
self.__prob = None
def prob(self):
"""
Return the probability associated with this object.
:rtype: float
"""
if self.__prob is None:
if self.__logprob is None: return None
self.__prob = 2**(self.__logprob)
return self.__prob
def logprob(self):
"""
Return ``log(p)``, where ``p`` is the probability associated
with this object.
:rtype: float
"""
if self.__logprob is None:
if self.__prob is None: return None
self.__logprob = math.log(self.__prob, 2)
return self.__logprob
class ImmutableProbabilisticMixIn(ProbabilisticMixIn):
def set_prob(self, prob):
raise ValueError, '%s is immutable' % self.__class__.__name__
def set_logprob(self, prob):
raise ValueError, '%s is immutable' % self.__class__.__name__
## Helper function for processing keyword arguments
def _get_kwarg(kwargs, key, default):
if key in kwargs:
arg = kwargs[key]
del kwargs[key]
else:
arg = default
return arg
##//////////////////////////////////////////////////////
## Demonstration
##//////////////////////////////////////////////////////
def _create_rand_fdist(numsamples, numoutcomes):
"""
Create a new frequency distribution, with random samples. The
samples are numbers from 1 to ``numsamples``, and are generated by
summing two numbers, each of which has a uniform distribution.
"""
import random
fdist = FreqDist()
for x in range(numoutcomes):
y = (random.randint(1, (1+numsamples)/2) +
random.randint(0, numsamples/2))
fdist.inc(y)
return fdist
def _create_sum_pdist(numsamples):
"""
Return the true probability distribution for the experiment
``_create_rand_fdist(numsamples, x)``.
"""
fdist = FreqDist()
for x in range(1, (1+numsamples)/2+1):
for y in range(0, numsamples/2+1):
fdist.inc(x+y)
return MLEProbDist(fdist)
def demo(numsamples=6, numoutcomes=500):
"""
A demonstration of frequency distributions and probability
distributions. This demonstration creates three frequency
distributions with, and uses them to sample a random process with
``numsamples`` samples. Each frequency distribution is sampled
``numoutcomes`` times. These three frequency distributions are
then used to build six probability distributions. Finally, the
probability estimates of these distributions are compared to the
actual probability of each sample.
:type numsamples: int
:param numsamples: The number of samples to use in each demo
frequency distributions.
:type numoutcomes: int
:param numoutcomes: The total number of outcomes for each
demo frequency distribution. These outcomes are divided into
``numsamples`` bins.
:rtype: None
"""
# Randomly sample a stochastic process three times.
fdist1 = _create_rand_fdist(numsamples, numoutcomes)
fdist2 = _create_rand_fdist(numsamples, numoutcomes)
fdist3 = _create_rand_fdist(numsamples, numoutcomes)
# Use our samples to create probability distributions.
pdists = [
MLEProbDist(fdist1),
LidstoneProbDist(fdist1, 0.5, numsamples),
HeldoutProbDist(fdist1, fdist2, numsamples),
HeldoutProbDist(fdist2, fdist1, numsamples),
CrossValidationProbDist([fdist1, fdist2, fdist3], numsamples),
GoodTuringProbDist(fdist1),
SimpleGoodTuringProbDist(fdist1),
SimpleGoodTuringProbDist(fdist1, 7),
_create_sum_pdist(numsamples),
]
# Find the probability of each sample.
vals = []
for n in range(1,numsamples+1):
vals.append(tuple([n, fdist1.freq(n)] +
[pdist.prob(n) for pdist in pdists]))
# Print the results in a formatted table.
print ('%d samples (1-%d); %d outcomes were sampled for each FreqDist' %
(numsamples, numsamples, numoutcomes))
print '='*9*(len(pdists)+2)
FORMATSTR = ' FreqDist '+ '%8s '*(len(pdists)-1) + '| Actual'
print FORMATSTR % tuple(`pdist`[1:9] for pdist in pdists[:-1])
print '-'*9*(len(pdists)+2)
FORMATSTR = '%3d %8.6f ' + '%8.6f '*(len(pdists)-1) + '| %8.6f'
for val in vals:
print FORMATSTR % val
# Print the totals for each column (should all be 1.0)
zvals = zip(*vals)
def sum(lst): return reduce(lambda x,y:x+y, lst, 0)
sums = [sum(val) for val in zvals[1:]]
print '-'*9*(len(pdists)+2)
FORMATSTR = 'Total ' + '%8.6f '*(len(pdists)) + '| %8.6f'
print FORMATSTR % tuple(sums)
print '='*9*(len(pdists)+2)
# Display the distributions themselves, if they're short enough.
if len(`str(fdist1)`) < 70:
print ' fdist1:', str(fdist1)
print ' fdist2:', str(fdist2)
print ' fdist3:', str(fdist3)
print
print 'Generating:'
for pdist in pdists:
fdist = FreqDist(pdist.generate() for i in range(5000))
print '%20s %s' % (pdist.__class__.__name__[:20], str(fdist)[:55])
print
def gt_demo():
from nltk import corpus
emma_words = corpus.gutenberg.words('austen-emma.txt')
fd = FreqDist(emma_words)
gt = GoodTuringProbDist(fd)
sgt = SimpleGoodTuringProbDist(fd)
katz = SimpleGoodTuringProbDist(fd, 7)
print '%18s %8s %12s %14s %12s' \
% ("word", "freqency", "GoodTuring", "SimpleGoodTuring", "Katz-cutoff" )
for key in fd:
print '%18s %8d %12e %14e %12e' \
% (key, fd[key], gt.prob(key), sgt.prob(key), katz.prob(key))
if __name__ == '__main__':
demo(6, 10)
demo(5, 5000)
gt_demo()
__all__ = ['ConditionalFreqDist', 'ConditionalProbDist',
'ConditionalProbDistI', 'CrossValidationProbDist',
'DictionaryConditionalProbDist', 'DictionaryProbDist', 'ELEProbDist',
'FreqDist', 'GoodTuringProbDist', 'SimpleGoodTuringProbDist', 'HeldoutProbDist',
'ImmutableProbabilisticMixIn', 'LaplaceProbDist', 'LidstoneProbDist',
'MLEProbDist', 'MutableProbDist', 'ProbDistI', 'ProbabilisticMixIn',
'UniformProbDist', 'WittenBellProbDist', 'add_logs',
'log_likelihood', 'sum_logs', 'entropy']
|
{
"content_hash": "abc5f6243232f8f75f9822d10ed4d9d4",
"timestamp": "",
"source": "github",
"line_count": 2235,
"max_line_length": 117,
"avg_line_length": 36.28545861297539,
"alnum_prop": 0.5937261091518903,
"repo_name": "JeyZeta/Dangerous",
"id": "88ce5a0d63463d1c66beb20e01361858141f5e42",
"size": "81647",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "Dangerous/Golismero/thirdparty_libs/nltk/probability.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "13260"
},
{
"name": "C",
"bytes": "12851"
},
{
"name": "C++",
"bytes": "3174"
},
{
"name": "CSS",
"bytes": "267451"
},
{
"name": "HTML",
"bytes": "2686153"
},
{
"name": "JavaScript",
"bytes": "1356956"
},
{
"name": "Lua",
"bytes": "14436"
},
{
"name": "Makefile",
"bytes": "11190"
},
{
"name": "Objective-C",
"bytes": "998"
},
{
"name": "PHP",
"bytes": "619"
},
{
"name": "PLpgSQL",
"bytes": "536"
},
{
"name": "Perl",
"bytes": "263365"
},
{
"name": "Python",
"bytes": "16669102"
},
{
"name": "Roff",
"bytes": "9828"
},
{
"name": "Ruby",
"bytes": "503"
},
{
"name": "Shell",
"bytes": "6691"
}
],
"symlink_target": ""
}
|
'''
https://spacy.io/usage/vectors-similarity
python -m spacy download en_core_web_sm
python -m spacy download en_core_web_md
'''
import spacy
nlp = spacy.load('en_core_web_md') # make sure to use larger model!
tokens = nlp(u'dog cat banana')
for token1 in tokens:
for token2 in tokens:
print(token1.text, token2.text, token1.similarity(token2))
|
{
"content_hash": "f90f7049c81bd99b33e52034177a980f",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 68,
"avg_line_length": 21.470588235294116,
"alnum_prop": 0.7041095890410959,
"repo_name": "ajrichards/bayesian-examples",
"id": "a69d0046758f0d0c7c17126bcee6cf652df6612f",
"size": "387",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nlp/spacy-similarity.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "14178"
},
{
"name": "R",
"bytes": "4733"
}
],
"symlink_target": ""
}
|
import argparse
import json
import sys
import yaml
import shade
import shade.inventory
def output_format_dict(data, use_yaml):
if use_yaml:
return yaml.safe_dump(data, default_flow_style=False)
else:
return json.dumps(data, sort_keys=True, indent=2)
def parse_args():
parser = argparse.ArgumentParser(description='OpenStack Inventory Module')
parser.add_argument('--refresh', action='store_true',
help='Refresh cached information')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--list', action='store_true',
help='List active servers')
group.add_argument('--host', help='List details about the specific host')
parser.add_argument('--yaml', action='store_true', default=False,
help='Output data in nicely readable yaml')
parser.add_argument('--debug', action='store_true', default=False,
help='Enable debug output')
return parser.parse_args()
def main():
args = parse_args()
try:
shade.simple_logging(debug=args.debug)
inventory = shade.inventory.OpenStackInventory(
refresh=args.refresh)
if args.list:
output = inventory.list_hosts()
elif args.host:
output = inventory.get_host(args.host)
print(output_format_dict(output, args.yaml))
except shade.OpenStackCloudException as e:
sys.stderr.write(e.message + '\n')
sys.exit(1)
sys.exit(0)
if __name__ == '__main__':
main()
|
{
"content_hash": "20ffa6be28b742efbae44c97af2705c5",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 78,
"avg_line_length": 31.5,
"alnum_prop": 0.6266666666666667,
"repo_name": "jsmartin/shade",
"id": "c4d396f463ac5030be7dafd460bd30f687bca74b",
"size": "2208",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shade/cmd/inventory.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "450419"
},
{
"name": "Shell",
"bytes": "851"
}
],
"symlink_target": ""
}
|
from fabric.api import env, task
from fabric.operations import sudo
@task
def start():
"""Start the etherpad service."""
sudo("service etherpad start")
@task
def stop():
"""Stop the etherpad service."""
sudo("service etherpad stop", warn_only=True)
@task
def clean():
"""Remove all the code from the deployed etherpad directory and its code backup directory."""
sudo("rm -rf /tmp/etherpad_backup", warn_only=True)
# Ditch the etherpad directory, only if it exists
if not sudo("test -d %s" % etherpad_dir(), warn_only=True).failed:
sudo("mv %s /tmp/etherpad_backup" % etherpad_dir())
@task
def version():
"""Determine the installed version of etherpad."""
sudo("cat %s/build-info.json" % etherpad_dir())
def etherpad_dir():
return getattr(env, 'etherpad_dir', '/opt/etherpad')
|
{
"content_hash": "33b9fb23a115e9e6416af8ae9e42dd8d",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 97,
"avg_line_length": 24.676470588235293,
"alnum_prop": 0.66626936829559,
"repo_name": "oaeproject/oae-fabric",
"id": "eee7cdde2f48d0dd6d481241a0e5d4a98be865cb",
"size": "839",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabfile/etherpad/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "29230"
}
],
"symlink_target": ""
}
|
from haystack.query import SearchQuerySet
from lists import *
# from haystack.views import search_view_factory
from django.conf.urls import patterns, include, url
from django.contrib.auth.decorators import login_required
from django.conf import settings
# from engage.api import *
from engage import api, api_public_v1
#from organizations.backends import invitation_backend
from tastypie.api import Api
from forms import *
import views
from fileHandlingView import *
from django.contrib import admin
from voting.views import vote_on_object
# from haystack.views import SearchView, search_view_factory
from engage.models import DatasetRequests
from django.views.generic import RedirectView
from wiki.urls import get_pattern as get_wiki_pattern
from django_notify.urls import get_pattern as get_notify_pattern
from djangoratings.views import AddRatingFromModel
import autocomplete_light
# import every app/autocomplete_light_registry.py
autocomplete_light.autodiscover()
admin.autodiscover()
engage_api = Api(api_name='engage')
engage_api.register(api.OrganizationResource())
engage_api.register(api.OrganizationUserResource())
engage_api.register(api.UserResource())
engage_api.register(api.ListResource())
engage_api.register(api.ExtensionGraphResource())
engage_api.register(api.DatasetResource())
engage_api.register(api.ResourceResource())
engage_api.register(api.DatasetApiResource())
engage_api_public_v1 = Api(api_name='v1')
engage_api_public_v1.register(api_public_v1.UserResource())
engage_api_public_v1.register(api_public_v1.ListResource())
engage_api_public_v1.register(api_public_v1.DatasetResource())
engage_api_public_v1.register(api_public_v1.DatasetApiResource())
engage_api_public_v1.register(api_public_v1.VisualizationResource())
engage_api_public_v1.register(api_public_v1.ResourcesResource())
engage_api_public_v1.register(api_public_v1.ApplicationResource())
datasetrequests_dict = {
'model': DatasetRequests,
'template_object_name': 'datasetrequests',
'allow_xmlhttprequest': True,
'template_name':'request.html',
}
sqs = SearchQuerySet().facet('country')
sqs = sqs.facet('categories')
sqs = sqs.facet('publisher')
sqs = sqs.facet('license')
sqs = sqs.facet('views')
sqs = sqs.facet('format')
urlpatterns = patterns('',
url(r'^$',views.index, name='index'),
url(r'^datasetrequests/$', views.DatasetRequestListView.as_view(), name='datasetrequests_index'),
url(r'^datasetrequests/new/$', views.request_new, name='datasetrequests_new'),
url(r'^datasetrequests/(?P<pk>\d+)/$',views.DatasetRequestDetailView.as_view(), name='datasetrequests_details'),
url(r'^datasetrequests/', include('fluent_comments.urls')),
url(r'^datasetrequests/vote/(?P<request_id>\d+)/(?P<direction>up|down|clear)$', views.request_vote,name='datasetrequests_vote'),
url(r'^datasetrequests/answer/(?P<request_id>\d+)/(?P<comment_id>\d+)$', views.request_acceptanswer,name='datasetrequests_acceptanswer'),
url(r'^profile/(?P<pk>\d+)/$', login_required(views.profile), name='profile'),
url(r'^terms/$', views.terms, name='terms'),
url(r'^about/$', views.about, name='about'),
url(r'^team/$', views.team, name='team'),
url(r'^contact/$', views.contact, name='contact'),
url(r'^community$', views.community, name='community'),
url(r'^userautocomplete$', views.userprofile_navigation_autocomplete, name='userautocomplete'),
url(r'^users$', login_required(views.UserListView.as_view()), name='users'),
# url(r'^/message/send/(?P<pk>\d+)/$', login_required(views.UserListView.as_view()), name='users'),
# url(r'^dataset-search/$', views.DatasetListView.as_view(), name='dataset_search'),
url(r'^dataset-search/$', views.DatasetFacetedSearchView(form_class=DatasetFacetedSearchForm, searchqueryset=sqs), name='dataset_search'),
url(r'^dataset/(?P<pk>\d+)/$', views.DatasetDetailView.as_view(), name='dataset_details'),
url(r'^dataset/(?P<pk>\d+)/(?P<slug>[\w-]+)/$', views.DatasetDetailView.as_view(), name='dataset_details'),
url(r'^dataset/(?P<slug>[\w-]+)/$', views.DatasetDetailView.as_view(), name='dataset_details'),
url(r'^dataset/(?P<pk>\d+)/edit$', login_required(views.DatasetUpdateView.as_view()), name='dataset_edit'),
url(r'^dataset/(?P<pk>\d+)/delete$', login_required(views.DatasetDeleteView.as_view()), name='dataset_delete'),
url(r'^dataset/(?P<pk>\d+)/extend/(?P<extension_attempt>\d+)$', login_required(views.dataset_extend), name='dataset_extend'),
url(r'^dataset/add', login_required(views.dataset_add), name='dataset_add'),
url(r'^dataset/(?P<pk>\d+)/extend/modal$', login_required(views.dataset_extend_modal), name='dataset_extend_modal'),
url(r'^dataset/(?P<pk>\d+)/increase-popularity$', views.dataset_increase_popularity, name='dataset_increase_popularity'),
# Visualizations
# url(r'^visual/$', views.visual, name='visual'), #ONLY FOR TESTING PURPOSES
# url(r'^resources/$', views.visualjson, name='visualjson'), #ONLY FOR TESTING PURPOSES
# Visualizations STABILIZE
url(r'^views/(?P<pk>\d+)/$', views.evisuals, name='views'), #ONLY FOR TESTING PURPOSES
url(r'^views/(?P<pk>\d+)/saved/(?P<saved>\d+)/$', views.evisualsSaved, name='viewsSaved'),
url(r'^eresources/(?P<pk>\d+)/$', views.evisualjsons, name='visualjsons'), #ONLY FOR TESTING PURPOSES
url(r'^eresources/(?P<pk>\d+)/saved/(?P<saved>\d+)/$', views.evisualjsonsSaved, name='visualjsonsAfterSaved'),
url(r'^refineview/(?P<pk>\d+)/$', views.dataset_details_view, name='testingdatasetview'),
url(r'^csvviewer/(?P<pk>\d+)/$', views.csvviewer, name='csvviewer'),
url(r'^analyze/$', views.analyze, name='analyze'),
url(r'^externalvis/(?P<application_pk>\d+)/$', views.externalvis, name='externalvis'),
# LINKED DATA
url(r'^sparql/', views.sparql, name='sparql'),
#ratings
url(r'rate-dataset/(?P<object_id>\d+)/(?P<score>\d+)/', AddRatingFromModel(), {'app_label': 'engage','model': 'dataset','field_name': 'rating',}),
url(r'^dataset/(?P<pk>\d+)/ratedialog/modal$', login_required(views.dataset_rate_modal), name='dataset_rate_modal'),
url(r'^dataset/(?P<pk>\d+)/rateviewdialog/modal$', login_required(views.dataset_rate_view_modal), name='dataset_rate_view_modal'),
# url(r'^portal/add', login_required(views.portal_add), name='portal_add'),
url(r'^accounts/', include('allauth.urls')),
url(r'^opendatasites$', views.opendatasites, name='opendatasites'),
url(r'^group/add', login_required(views.group_add), name='group_add'),
url(r'^group-name-validate/$', 'ajax_validation.views.validate', {'form_class': GroupForm}, 'group_name_validate'),
url(r'^groups/', include('organizations.urls')),
(r'^i18n/', include('django.conf.urls.i18n')),
url(r'^profile/(?P<pk>\w+)/edit$', login_required(views.UserUpdateView.as_view()), name='profile_edit'),
url(r'^admin/', include(admin.site.urls)),
(r'^api/', include(engage_api.urls)),
(r'^api/public/', include(engage_api_public_v1.urls)),
(r'^messages/', include('messages.urls')),
(r'^s/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.STATIC_DOC_ROOT}),
url(r'^rosetta/', include('rosetta.urls')),
#Applications
url(r'^application/$', views.ApplicationListView.as_view(), name='application_index'),
url(r'^tweets/$', views.view_tweets, name='tweets'),
url(r'^application/(?P<pk>\d+)/$',views.ApplicationDetailView.as_view(), name='application_details'),
#Followers
url(r'profile/Follow/(?P<user_id>\d+)/', login_required(views.add_follower_user),name='profile_add_follower_user'),
url(r'profile/deletemyaccount/', login_required(views.delete_profile_user),name='profile_delete_user'),
url(r'profile/Unfollow/(?P<user_id>\d+)/', login_required(views.remove_follower_user),name='profile_remove_follower_user'),
url(r'profile/following/$', login_required(views.see_following),name='profile_see_following_user'),
url(r'dataset/Follow/(?P<dataset_id>\d+)/', login_required(views.add_follower_dataset),name='profile_add_follower_dataset'),
url(r'dataset/Unfollow/(?P<dataset_id>\d+)/', login_required(views.remove_follower_dataset),name='profile_remove_follower_dataset'),
# url(r'^$', 'engage.views.home', name='home'),
# url(r'^engage/', include('engage.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
# (r'^search/autocomplete/',views.autocomplete),
# (r'autocomplete/',views.autocomplete_template),
# url(r'^searchApp$', views.search, name='searchApp'),
# (r'^appsearch/$', 'engage.engage.searchApp.searchApp.views.search'),
# (r'^search/', include('haystack.urls')),
# url(r'^search/person/', search_view_factory(
# view_class=SearchView,
# template='autocomplete.html',
# form_class=AutocompleteModelSearchForm
# ), name='autocomplete'),
url(r'^wiki/', get_wiki_pattern()),
url(r'^notify/', get_notify_pattern()),
(r'^favicon\.ico$',RedirectView.as_view(url='/s/imgs/favicon.ico')),
url(r'^datastories', include('articles.urls')),
url(r'^autocomplete/', include('autocomplete_light.urls')),
url(r'api/doc/', include('tastypie_swagger.urls', namespace='tastypie_swagger')),
#Hackathon
url(r'^hackathon$', views.hackathon, name="hackathon"),
url(r'^datathon$', views.datathon, name="datathon"),
url(r'^hackathon/thanks$', views.thanks, name="hackathonThanks"),
)
|
{
"content_hash": "f0261164101cb0a4425b25abe0dded53",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 150,
"avg_line_length": 56.34705882352941,
"alnum_prop": 0.7000730765215576,
"repo_name": "Suite5/DataColibri",
"id": "e5b5629a0511780e4e47f36b121f4a34791f1957",
"size": "9872",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "engage/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "381"
},
{
"name": "CSS",
"bytes": "944246"
},
{
"name": "HTML",
"bytes": "566711"
},
{
"name": "JavaScript",
"bytes": "1510227"
},
{
"name": "PHP",
"bytes": "972"
},
{
"name": "Python",
"bytes": "1046512"
},
{
"name": "Shell",
"bytes": "79"
}
],
"symlink_target": ""
}
|
"""
.. py:currentmodule:: MCXRay.BackgroundProblem.mcxray
.. moduleauthor:: Hendrix Demers <hendrix.demers@mail.mcgill.ca>
Base module to create and analuyze MCXRay simulations.
"""
# Script information for the file.
__author__ = "Hendrix Demers (hendrix.demers@mail.mcgill.ca)"
__version__ = ""
__date__ = ""
__copyright__ = "Copyright (c) 2012 Hendrix Demers"
__license__ = ""
# Standard library modules.
import os.path
import shutil
import time
import argparse
import logging
import zipfile
import stat
import math
import datetime
import filecmp
# Third party modules.
import numpy as np
import h5py
from apscheduler.schedulers.blocking import BlockingScheduler
# Local modules.
from pymcxray import get_current_module_path, create_path, get_results_mcgill_path, get_mcxray_program_path, get_mcxray_program_name, get_mcxray_archive_path, get_mcxray_archive_name
import pymcxray.serialization.SerializationPickle as SerializationPickle
# Project modules
import pymcxray.Simulation as Simulation
from pymcxray.SimulationsParameters import PARAMETER_SPECIMEN
# Globals and constants variables.
ANALYZE_TYPE_GENERATE_INPUT_FILE = "generate"
ANALYZE_TYPE_CHECK_PROGRESS = "check"
ANALYZE_TYPE_READ_RESULTS = "read"
ANALYZE_TYPE_ANALYZE_RESULTS = "analyze"
ANALYZE_TYPE_ANALYZE_SCHEDULED_READ = "scheduled_read"
SAVE_EVERY_SIMULATIONS = 10
HDF5_SIMULATIONS = "simulations"
HDF5_PARAMETERS = "parameters"
def _getOptions():
analyzeTypes = []
analyzeTypes.append(ANALYZE_TYPE_GENERATE_INPUT_FILE)
analyzeTypes.append(ANALYZE_TYPE_CHECK_PROGRESS)
analyzeTypes.append(ANALYZE_TYPE_READ_RESULTS)
analyzeTypes.append(ANALYZE_TYPE_ANALYZE_RESULTS)
analyzeTypes.append(ANALYZE_TYPE_ANALYZE_SCHEDULED_READ)
parser = argparse.ArgumentParser(description='Analyze MCXRay x-ray background problem.')
parser.add_argument('type', metavar='AnalyzeType', type=str, choices=analyzeTypes, nargs='?',
default=ANALYZE_TYPE_GENERATE_INPUT_FILE,
help='Type of analyze to do')
parser.add_argument('args', nargs=argparse.REMAINDER)
args = parser.parse_args()
logging.debug(args.type)
return args.type
class _Simulations(object):
SIMULATIONS_FOLDER = "simulations"
RESULTS_FOLDER = os.path.join(SIMULATIONS_FOLDER, "Results")
ANALYSES_FOLDER = "analyzes"
INPUTS_FOLDER = "input"
def __init__(self, simulationPath=None, basepath=None, relativePath=None, configurationFilepath=None):
if configurationFilepath is None:
self._configurationFilepath = get_current_module_path(__file__, "../../pyMcGill.cfg")
else:
self._configurationFilepath = configurationFilepath
self._output = None
self.overwrite = True
self.resetCache = False
self.useSerialization = True
self.verbose = True
self.createBackup = True
self.use_hdf5 = False
self.delete_result_files = False
self.read_interval_h = 1
self.read_interval_m = None
if simulationPath is not None:
self._simulationPath = os.path.normpath(simulationPath)
else:
self._simulationPath = None
if basepath is not None:
self._basepath = os.path.normpath(basepath)
else:
self._basepath = None
if relativePath is not None:
self._relativePath = os.path.normpath(relativePath)
else:
self._relativePath = None
self._createAllFolders(self.getSimulationPath())
self._simulationResultsList = {}
self._serializationExtension = '.ser'
self.format_digit = {}
def getSimulationPath(self):
try:
if self._simulationPath is not None:
return self._simulationPath
elif self._relativePath is not None:
path = get_results_mcgill_path(self._configurationFilepath)
path = os.path.join(path, self._relativePath)
elif self._basepath is not None:
name = self.getAnalysisName()
path = get_results_mcgill_path(self._configurationFilepath)
path = os.path.join(path, self._basepath, "%s" % (name))
else:
name = self.getAnalysisName()
path = get_results_mcgill_path(self._configurationFilepath, "%s" % (name))
if not os.path.isdir(path):
os.makedirs(path)
self._simulationPath = path
return path
except NotImplementedError:
return None
def getSimulationsPath(self):
path = os.path.join(self.getSimulationPath(), self.SIMULATIONS_FOLDER)
return path
def getResultsPath(self):
path = os.path.join(self.getSimulationPath(), self.RESULTS_FOLDER)
return path
def getAnalyzesPath(self):
path = os.path.join(self.getSimulationPath(), self.ANALYSES_FOLDER)
return path
def getInputPath(self):
inputPath = os.path.join(self.getSimulationsPath(), self.INPUTS_FOLDER)
inputPath = create_path(inputPath)
return inputPath
def get_hdf5_file_path(self):
result_path = self.getResultsPath()
name = self.getAnalysisName()
file_path = os.path.join(result_path, name + ".hdf5")
logging.debug(file_path)
return file_path
def get_hdf5_group(self, hdf5_file):
try:
hdf5_group = hdf5_file[HDF5_SIMULATIONS]
return hdf5_group
except KeyError as message:
logging.error(message)
message = "Filename: %s" % (hdf5_file.filename)
logging.error(message)
return None
def _createAllFolders(self, basePath):
if basePath is not None:
if not os.path.isdir(basePath):
os.makedirs(basePath)
newPaths = [self.SIMULATIONS_FOLDER, self.RESULTS_FOLDER, self.ANALYSES_FOLDER]
for newPath in newPaths:
path = os.path.join(basePath, newPath)
if not os.path.isdir(path):
os.makedirs(path)
def _copyMCXRayProgramOld(self):
basePath = get_mcxray_program_path(self._configurationFilepath)
programName = get_mcxray_program_name(self._configurationFilepath, default="McXRay.exe")
sourceFilepath = os.path.join(basePath, programName)
destinationPath = os.path.join(self.getSimulationsPath(), programName)
#if self._overwrite or not os.path.isfile(destinationPath) or os.stat(sourceFilepath).st_mtime > os.stat(destinationPath).st_mtime:
if self._overwrite or not os.path.isfile(destinationPath):
shutil.copy2(sourceFilepath, destinationPath)
optionFilenames = ["PhiRhoZPjm.txt", "MapPjm.txt", "SnrPjm.txt", "TomoPjm.txt"]
for optionFilename in optionFilenames:
sourceFilepath = os.path.join(basePath, optionFilename)
if os.path.isfile(sourceFilepath):
destinationPath = os.path.join(self.getSimulationsPath(), optionFilename)
if self._overwrite or not os.path.isfile(destinationPath):
shutil.copy2(sourceFilepath, destinationPath)
macPathName = "MACHenke"
sourcePath = os.path.join(basePath, macPathName)
destinationPath = os.path.join(self.getSimulationsPath(), macPathName)
if self._overwrite or not os.path.isdir(destinationPath) or os.stat(sourceFilepath).st_mtime > os.stat(destinationPath).st_mtime:
if os.path.isdir(destinationPath):
shutil.rmtree(destinationPath)
time.sleep(1)
shutil.copytree(sourcePath, destinationPath)
def _copyMCXRayProgram(self):
archivesPath = get_mcxray_archive_path(self._configurationFilepath)
archiveFilename = get_mcxray_archive_name(self._configurationFilepath)
archiveFilepath = os.path.join(archivesPath, archiveFilename)
destinationPath = self.getSimulationsPath()
self._extractZipfile(archiveFilename, archiveFilepath, destinationPath)
def _extractZipfile(self, archiveFilename, archiveFilepath, destinationPath):
versionBasename, dummyExtension = os.path.splitext(os.path.basename(archiveFilepath))
versionFilename = versionBasename + ".txt"
versionFilepath = os.path.join(destinationPath, versionFilename)
logging.debug("Extracting archive %s to %s.", archiveFilepath, destinationPath)
#shutil.rmtree(destinationPath, ignore_errors=True)
if not os.path.isdir(destinationPath):
self._createPath(destinationPath)
destinationFilepath = os.path.join(destinationPath, archiveFilename)
shutil.copy2(archiveFilepath, destinationFilepath)
zipFile = zipfile.ZipFile(destinationFilepath, 'r')
try:
zipFile.extractall(destinationPath)
except IOError as message:
logging.error(message)
zipFile.close()
fileVersion = open(versionFilepath, 'w')
fileVersion.write(versionBasename)
fileVersion.close()
try:
os.remove(destinationFilepath)
except WindowsError as message:
logging.error(message)
def logNumberSimulations(self):
numberSimulations = 0
numberSimulationsTodo = 0
numberSimulationsDone = 0
for simulation in self.getAllSimulationParameters():
if simulation.isDone(self.getSimulationsPath()):
numberSimulationsDone += 1
else:
numberSimulationsTodo += 1
numberSimulations += 1
percentage = 100.0*float(numberSimulationsDone)/float(numberSimulations)
logging.info("Number of done: %4i/%i (%5.2f%%)", numberSimulationsDone, numberSimulations, percentage)
percentage = 100.0*float(numberSimulationsTodo)/float(numberSimulations)
logging.info("Number of todo: %4i/%i (%5.2f%%)", numberSimulationsTodo, numberSimulations, percentage)
def generateInputFiles(self, batchFile):
logging.info("generateInputFiles for analysis: %s", self.getAnalysisName())
self._copyMCXRayProgram()
file_path = self.get_hdf5_file_path()
if self.use_hdf5 and os.path.isfile(file_path):
with h5py.File(file_path, 'r', driver='core') as hdf5_file:
hdf5_group = self.get_hdf5_group(hdf5_file)
self._generate_input_files(batchFile, hdf5_group)
else:
hdf5_group = None
self._generate_input_files(batchFile, hdf5_group)
def _generate_input_files(self, batchFile, hdf5_group):
numberSimulations = 0
numberSimulationsTodo = 0
numberSimulationsDone = 0
simulationTodoNames = []
for simulation in self.getAllSimulationParameters():
simulation.createSimulationFiles(self.getInputPath(), self.getSimulationsPath(), hdf5_group)
if simulation.isDone(self.getSimulationsPath(), hdf5_group):
numberSimulationsDone += 1
else:
numberSimulationsTodo += 1
simulationTodoNames.append(simulation.name)
filename = os.path.join("input", simulation.filename)
batchFile.addSimulationName(filename)
numberSimulations += 1
if self._verbose:
for simulationTodoName in simulationTodoNames:
logging.info("Todo: \t%s", simulationTodoName)
percentage = 100.0*float(numberSimulationsDone)/float(numberSimulations)
logging.info("Number of done: %4i/%i (%5.2f%%)", numberSimulationsDone, numberSimulations, percentage)
percentage = 100.0*float(numberSimulationsTodo)/float(numberSimulations)
logging.info("Number of todo: %4i/%i (%5.2f%%)", numberSimulationsTodo, numberSimulations, percentage)
def checkProgress(self):
file_path = self.get_hdf5_file_path()
if self.use_hdf5 and os.path.isfile(file_path):
with h5py.File(file_path, 'r', driver='core') as hdf5_file:
hdf5_group = self.get_hdf5_group(hdf5_file)
self._check_progress(hdf5_group)
else:
hdf5_group = None
self._check_progress(hdf5_group)
def _check_progress(self, hdf5_group):
numberSimulations = 0
numberSimulationsTodo = 0
numberSimulationsDone = 0
simulationTodoNames = []
inputPath = os.path.join(self.getSimulationsPath(), "input")
inputPath = create_path(inputPath)
for simulation in self.getAllSimulationParameters():
if simulation.isDone(self.getSimulationsPath(), hdf5_group):
numberSimulationsDone += 1
else:
numberSimulationsTodo += 1
simulationTodoNames.append(simulation.name)
numberSimulations += 1
if self._verbose:
for simulationTodoName in simulationTodoNames:
logging.debug("Todo: \t%s", simulationTodoName)
logging.info("Check progress for %s", self.getAnalysisName())
percentage = 100.0*float(numberSimulationsDone)/float(numberSimulations)
logging.info("Number of done: %4i/%i (%5.2f%%)", numberSimulationsDone, numberSimulations, percentage)
percentage = 100.0*float(numberSimulationsTodo)/float(numberSimulations)
logging.info("Number of todo: %4i/%i (%5.2f%%)", numberSimulationsTodo, numberSimulations, percentage)
def getAllSimulationParameters(self):
simulationParametersList = []
for parameters in self._simulationsParameters.getAllSimulationParameters():
simulation = Simulation.Simulation(overwrite=self._overwrite)
simulation.format_digit = self.format_digit
simulation.basename = self.getAnalysisName()
simulation.setParameters(parameters)
if PARAMETER_SPECIMEN in parameters:
simulation._specimen = parameters[PARAMETER_SPECIMEN]
else:
simulation._specimen = self.createSpecimen(parameters)
simulation.generateBaseFilename()
simulationParametersList.append(simulation)
return simulationParametersList
def _isAllResultFileExist(self, resultFilepath, simulationFilepath):
resultSerializedFilepath = resultFilepath.replace('.cas', '_numpy.npz')
if os.path.isfile(resultFilepath) and not self.isOlderThan(resultFilepath, simulationFilepath):
logging.debug("Done: %s", resultFilepath)
return True
else:
logging.debug("missing: %s", resultFilepath)
if os.path.isfile(resultSerializedFilepath) and not self.isOlderThan(resultSerializedFilepath, simulationFilepath):
logging.debug("Done: %s", resultSerializedFilepath)
return True
else:
logging.debug("missing: %s", resultSerializedFilepath)
return False
def isOlderThan(self, resultFilepath, simulationFilepath):
if not os.path.isfile(resultFilepath):
return True
statMainFile = os.stat(resultFilepath)
statOtherFile = os.stat(simulationFilepath)
if statOtherFile[stat.ST_MTIME] > statMainFile[stat.ST_MTIME]:
return True
elif statOtherFile[stat.ST_CTIME] > statMainFile[stat.ST_MTIME] and statOtherFile[stat.ST_CTIME] > statMainFile[stat.ST_CTIME]:
return True
else:
return False
def readResults(self, resultFilepaths=None, serializationFilename="", isResultsKeep=True):
logging.info("readResults")
if self.use_hdf5:
self._read_all_results_hdf5()
else:
self._readAllResults(serializationFilename, isResultsKeep)
def _readAllResults(self, serializationFilename="", isResultsKeep=True):
if self.useSerialization:
if serializationFilename == "":
serializationFilename = self.getAnalysisName() + ".ser"
self._readAllResultsSerialization(serializationFilename, isResultsKeep)
else:
self._readAllResultsNoSerialization(isResultsKeep)
def _readResultsSerialization(self, serializationFilename):
logging.info("_readAllResultsSerialization")
simulationsResults = SerializationPickle.SerializationPickle()
simulationsResults.setPathname(self.getResultsPath())
simulationsResults.setFilename(serializationFilename)
if self.resetCache:
simulationsResults.deleteFile()
simulationResultsList = {}
if simulationsResults.isFile():
simulationResultsList = simulationsResults.load()
self._simulationResultsList = simulationResultsList
logging.info("Number of simulation results: %i", len(self._simulationResultsList))
def _readAllResultsSerialization(self, serializationFilename, isResultsKeep):
logging.info("_readAllResultsSerialization")
simulationsResults = SerializationPickle.SerializationPickle()
simulationsResults.setPathname(self.getResultsPath())
simulationsResults.setFilename(serializationFilename)
if self.createBackup:
simulationsResults.backupFile()
newResults = False
if self.resetCache:
simulationsResults.deleteFile()
simulationResultsList = {}
if simulationsResults.isFile():
simulationResultsList = simulationsResults.load()
_numberError = 0
simulations = self.getAllSimulationParameters()
total = len(simulations)
for index, simulation in enumerate(simulations):
if simulation.isDone(self.getSimulationsPath()):
try:
key = self.generateResultsKey(simulation)
filepath = simulation.getProgramVersionFilepath(self.getSimulationsPath())
if simulationsResults.isOlderThan(filepath) or key not in simulationResultsList:
logging.info("Processing file %i/%i", (index+1), total)
if os.path.isfile(filepath):
logging.debug(filepath)
simulationResultsList[key] = self.readOneResults(simulation)
newResults = True
if index % SAVE_EVERY_SIMULATIONS == 0:
simulationsResults.save(simulationResultsList)
else:
logging.warning("File not found: %s", filepath)
except UnboundLocalError as message:
logging.error("UnboundLocalError in %s for %s", "_readAllResultsSerialization", filepath)
logging.error(message)
except ValueError as message:
logging.error("ValueError in %s for %s", "_readAllResultsSerialization", filepath)
logging.error(message)
except AssertionError as message:
logging.error("AssertionError in %s for %s", "_readAllResultsSerialization", filepath)
logging.error(message)
except IOError as message:
logging.warning(message)
logging.warning(simulation.name)
_numberError += 1
if _numberError > 0:
logging.info("Number of IO error: %i", _numberError)
if newResults:
simulationsResults.save(simulationResultsList)
if isResultsKeep:
self._simulationResultsList = simulationResultsList
logging.info("Number of simulation results: %i", len(self._simulationResultsList))
else:
del simulationResultsList
def _readAllResultsNoSerialization(self, isResultsKeep):
logging.info("_readAllResultsNoSerialization")
simulationResultsList = {}
_numberError = 0
simulations = self.getAllSimulationParameters()
total = len(simulations)
for index, simulation in enumerate(simulations):
if simulation.isDone(self.getSimulationsPath()):
try:
key = self.generateResultsKey(simulation)
if key not in simulationResultsList:
if self.verbose:
logging.info("Processing file %i/%i", (index+1), total)
else:
logging.debug("Processing file %i/%i", (index+1), total)
filepath = simulation.getProgramVersionFilepath(self.getSimulationsPath())
if os.path.isfile(filepath):
logging.debug(filepath)
simulationResultsList[key] = self.readOneResults(simulation)
else:
logging.warning("File not found: %s", filepath)
except UnboundLocalError as message:
logging.error("UnboundLocalError in %s for %s", "_readAllResultsSerialization", filepath)
logging.error(message)
except ValueError as message:
logging.error("ValueError in %s for %s", "_readAllResultsSerialization", filepath)
logging.error(message)
except AssertionError as message:
logging.error("AssertionError in %s for %s", "_readAllResultsSerialization", filepath)
logging.error(message)
except IOError as message:
logging.warning(message)
logging.warning(simulation.name)
_numberError += 1
if _numberError > 0:
logging.info("Number of IO error: %i", _numberError)
if isResultsKeep:
self._simulationResultsList = simulationResultsList
logging.info("Number of simulation results: %i", len(self._simulationResultsList))
else:
del simulationResultsList
def _read_all_results_hdf5(self):
logging.info("_read_all_results_hdf5")
starting_time_all = time.perf_counter()
number_simulations_read = 0
file_path = self.get_hdf5_file_path()
backup_file_path = ""
if os.path.isfile(file_path):
backup_file_path = self.backup_hdf5_File(file_path)
with h5py.File(file_path, 'a', driver='core', backing_store=True) as hdf5_file:
hdf5_root = hdf5_file.require_group(HDF5_SIMULATIONS)
_numberError = 0
simulations = self.getAllSimulationParameters()
self._write_parameters_hdf5(hdf5_root)
total = len(simulations)
for index, simulation in enumerate(simulations):
if simulation.isDone(self.getSimulationsPath(), None):
starting_time = time.perf_counter()
try:
filepath = simulation.getProgramVersionFilepath(self.getSimulationsPath())
logging.info("Processing file %i/%i", (index+1), total)
if os.path.isfile(filepath):
logging.debug(filepath)
name = simulation.name
if name in hdf5_root:
del hdf5_root[name]
hdf5_group = hdf5_root.require_group(name)
parameters = simulation.getParameters()
for parameter_name in parameters:
hdf5_group.attrs[parameter_name] = parameters[parameter_name]
self.read_one_results_hdf5(simulation, hdf5_group)
# if number_simulations_read%50 == 0:
# hdf5_root.file.flush()
if self.delete_result_files:
self.delete_simulation_result_files(simulation)
else:
logging.warning("File not found: %s", filepath)
except UnboundLocalError as message:
logging.error("UnboundLocalError in %s for %s", "_read_all_results_hdf5", filepath)
logging.error(message)
except ValueError as message:
logging.error("ValueError in %s for %s", "_read_all_results_hdf5", filepath)
logging.error(message)
except AssertionError as message:
logging.error("AssertionError in %s for %s", "_read_all_results_hdf5", filepath)
logging.error(message)
except IOError as message:
logging.warning(message)
logging.warning(simulation.name)
_numberError += 1
elapse_time = time.perf_counter() - starting_time
logging.info("Elapse time for one simulation: %.1f s", elapse_time)
number_simulations_read += 1
if _numberError > 0:
logging.info("Number of IO error: %i", _numberError)
elapse_time_all = time.perf_counter() - starting_time_all
logging.info("Elapse time for all simulations (%i): %.1f s", number_simulations_read, elapse_time_all)
if not self.createBackup and os.path.isfile(backup_file_path):
file_path = self.get_hdf5_file_path()
if self.use_hdf5 and os.path.isfile(file_path):
with h5py.File(file_path, 'r', driver='core') as hdf5_file:
logging.info("Remove file: %s", backup_file_path)
os.remove(backup_file_path)
def _write_parameters_hdf5(self, hdf5_root):
hdf5_parameters_group = hdf5_root.require_group(HDF5_PARAMETERS)
for fixed_parameter_name, fixed_parameter_value in self._simulationsParameters.fixedParameters.items():
hdf5_parameters_group.attrs[fixed_parameter_name] = fixed_parameter_value
for varied_parameter_name, varied_parameter_value in self._simulationsParameters.variedParameters.items():
if varied_parameter_name in hdf5_parameters_group:
del hdf5_parameters_group[varied_parameter_name]
if type(varied_parameter_value[0]) is str:
length = np.max([len(item) for item in varied_parameter_value])
data_type = "S{:d}".format(length)
data = np.array(varied_parameter_value, dtype=data_type)
else:
data = np.array(varied_parameter_value)
data_set = hdf5_parameters_group.create_dataset(varied_parameter_name, data=data)
def backup_hdf5_File(self, file_path):
backup_file_path = None
if os.path.isfile(file_path):
suffix = self.generate_time_stamp()
index_extension = file_path.rfind('.hdf5')
base_name = file_path[:index_extension]
backup_file_path = base_name + "_" + suffix + ".hdf5"
shutil.copy2(file_path, backup_file_path)
logging.info("Backup created: %s", backup_file_path)
return backup_file_path
def generate_time_stamp(self):
dateTime = datetime.datetime.now()
year = dateTime.year
month = dateTime.month
day = dateTime.day
hour = dateTime.hour
minute = dateTime.minute
second = dateTime.second
arguments = (year, month, day, hour, minute, second)
name = "%04i-%02i-%02i_%02ih%02im%02is" % (arguments)
return name
def delete_simulation_result_files(self, simulation):
simulation_name = simulation.name.replace('.', 'd')
for file_name in os.listdir(self.getResultsPath()):
if file_name.startswith(simulation_name):
file_path = os.path.join(self.getResultsPath(), file_name)
logging.debug("Remove file: %s", file_name)
os.remove(file_path)
def getResults(self, parameters):
return self._simulationResultsList[parameters]
def getAllResults(self):
return self._simulationResultsList
def _initData(self): #pragma: no cover
raise NotImplementedError
def getAnalysisName(self): #pragma: no cover
raise NotImplementedError
def createSpecimen(self, parameters): #pragma: no cover
raise NotImplementedError
def readResultsFiles(self):
logging.info("readResultsFiles")
self.readResults()
def analyzeResultsFiles(self): #pragma: no cover
raise NotImplementedError
def readOneResults(self, simulation):
raise NotImplementedError
def generateResultsKey(self, simulation):
variedParameterLabels = self.getVariedParameterLabels()
key = self._createKey(variedParameterLabels, simulation)
return tuple(key)
def run(self, batchFile):
self._initData()
options = _getOptions()
if options == ANALYZE_TYPE_GENERATE_INPUT_FILE:
self.generateInputFiles(batchFile)
batchFile.write(self.getSimulationsPath())
if options == ANALYZE_TYPE_CHECK_PROGRESS:
self.checkProgress()
if options == ANALYZE_TYPE_READ_RESULTS:
self.readResultsFiles()
if options == ANALYZE_TYPE_ANALYZE_RESULTS:
if self.use_hdf5:
self.analyze_results_hdf5()
else:
self.analyzeResultsFiles()
if options == ANALYZE_TYPE_ANALYZE_SCHEDULED_READ:
self.readResultsFiles()
scheduler = BlockingScheduler()
if self.read_interval_h is not None and self.read_interval_m is None:
scheduler.add_job(self.readResultsFiles, 'interval', hours=self.read_interval_h, coalesce=True)
else:
scheduler.add_job(self.readResultsFiles, 'interval', minutes=self.read_interval_m, coalesce=True)
print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
try:
scheduler.start()
except (KeyboardInterrupt, SystemExit):
pass
def _computeMCXrayFwhm_keV(self, detectorNoise_eV, xrayEnergy_keV):
xrayEnergy_eV = xrayEnergy_keV*1.0e3
factorK_eV = 2.6231
fwhm_eV = math.sqrt(detectorNoise_eV*detectorNoise_eV + factorK_eV*xrayEnergy_eV)
fwhm_keV = fwhm_eV * 1.0e-3
return fwhm_keV
def countsFromFixedWidth(self, xrayEnergies_keV, position_keV, width_keV, counts):
counts = np.array(counts)
v1 = position_keV - width_keV/2.0
v2 = position_keV + width_keV/2.0
maskArray = np.ma.masked_outside(xrayEnergies_keV, v1, v2)
counts = np.sum(counts[~maskArray.mask])
return counts
def _computeSNR(self, IA, IB):
snr = IA/math.sqrt(2.0*IB)
return snr
def _computeCDmin(self, CD0, snr0):
CDmin = 3.0 * CD0 / snr0
return CDmin
def getVariedParameterLabels(self):
return self._simulationsParameters.getVariedParameterLabels()
def _createKey(self, variedParameterLabels, simulation):
parameters = simulation.getParameters()
key = []
for label in variedParameterLabels:
value = parameters[label]
key.append(value)
return key
@property
def overwrite(self):
return self._overwrite
@overwrite.setter
def overwrite(self, overwrite):
self._overwrite = overwrite
@property
def resetCache(self):
return self._resetCache
@resetCache.setter
def resetCache(self, resetCache):
self._resetCache = resetCache
@property
def useSerialization(self):
return self._useSerialization
@useSerialization.setter
def useSerialization(self, useSerialization):
self._useSerialization = useSerialization
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, verbose):
self._verbose = verbose
@property
def createBackup(self):
return self._createBackup
@createBackup.setter
def createBackup(self, createBackup):
self._createBackup = createBackup
|
{
"content_hash": "64bfee043ba7b312c4136df871c9695b",
"timestamp": "",
"source": "github",
"line_count": 814,
"max_line_length": 182,
"avg_line_length": 39.84398034398034,
"alnum_prop": 0.6238090833410416,
"repo_name": "drix00/pymcxray",
"id": "1421bdc28f683f50cd3b11ebd46772a2930b039b",
"size": "32455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymcxray/mcxray.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2299"
},
{
"name": "Python",
"bytes": "933760"
}
],
"symlink_target": ""
}
|
import numpy as np
import time
import logging
from collections import Counter
from ShortTextCodec import NonEncodableTextException
from sklearn.preprocessing import OneHotEncoder
DEBUG_TIMING = False
# Taken from StackOverflow
def timeit(f):
if not DEBUG_TIMING:
return f
def timed(*args, **kw):
ts = time.time()
result = f(*args, **kw)
te = time.time()
print 'func:%r took: %2.4f sec' % \
(f.__name__, te - ts)
return result
return timed
def vectors_from_txtfile(fname, codec, limit=-1, mutagen=None):
f = open(fname)
skipped = Counter()
vecs = []
for line in f:
line = line.strip()
try:
vecs.append(codec.encode(line, mutagen=mutagen))
if len(vecs) == limit:
break
except NonEncodableTextException as e:
# Too long, or illegal characters
skipped[e.reason] += 1
logging.debug("Gathered {} vectors. Skipped {} ({})".format(len(vecs),
sum(skipped.values()), dict(skipped)))
vecs = np.asarray(vecs)
# TODO: Why default to dtype=float? Seems wasteful? Maybe it doesn't really matter. Actually, docs here seem inconsistent? Constructor docs say default float. transform docs say int. Should file a bug on sklearn.
return OneHotEncoder(len(codec.alphabet)).fit_transform(vecs)
# Adapted from sklearn.utils.extmath.softmax
def softmax(X, copy=True):
if copy:
X = np.copy(X)
X_shape = X.shape
a, b, c = X_shape
# This will cause overflow when large values are exponentiated.
# Hence the largest value in each row is subtracted from each data
max_prob = np.max(X, axis=2).reshape((X.shape[0], X.shape[1], 1))
X -= max_prob
np.exp(X, X)
sum_prob = np.sum(X, axis=2).reshape((X.shape[0], X.shape[1], 1))
X /= sum_prob
return X
def softmax_and_sample(X, copy=True):
"""
Given an array of 2-d arrays, each having shape (M, N) representing M softmax
units with N possible values each, return an array of the same shape where
each N-dimensional inner array has a 1 at one index, and zero everywhere
else. The 1 is assigned according to the corresponding softmax probabilities
(i.e. np.exp(X) / np.sum(np.exp(X)) )
Parameters
----------
X: array-like, shape (n_samples, M, N), dtype=float
Argument to the logistic function
copy: bool, optional
Copy X or not.
Returns
-------
out: array of 0,1, shape (n_samples, M, N)
Softmax function evaluated at every point in x and sampled
"""
a,b,c = X.shape
X_shape = X.shape
X = softmax(X, copy)
# We've got our probabilities, now sample from them
thresholds = np.random.rand(X.shape[0], X.shape[1], 1)
cumsum = np.cumsum(X, axis=2, out=X)
x, y, z = np.indices(cumsum.shape)
# This relies on the fact that, if there are multiple instances of the max
# value in an array, argmax returns the index of the first one
to_select = np.argmax(cumsum > thresholds, axis=2).reshape(a, b, 1)
bin_sample = np.zeros(X_shape)
bin_sample[x, y, to_select] = 1
return bin_sample
|
{
"content_hash": "6ec383f7c28e0c58dcba7a6c33c34cd3",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 216,
"avg_line_length": 33.270833333333336,
"alnum_prop": 0.632122730118973,
"repo_name": "colinmorris/char-rbm",
"id": "7b1baf11c44990f86e0ee4b7d27d490ee8e2c9b7",
"size": "3194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62251"
}
],
"symlink_target": ""
}
|
"""
Finance module forms
"""
from django.shortcuts import get_object_or_404
from django import forms
from anaf.identities.models import Contact
from models import Transaction, Liability, Category, Account, Asset, Equity, Currency, Tax
from anaf.sales.models import SaleOrder
from anaf.core.models import Object, ModuleSetting
from django.core.urlresolvers import reverse
from anaf.core.decorators import preprocess_form
from django.utils.translation import ugettext as _
from anaf.sales.forms import standard_currencies
preprocess_form()
class MassActionForm(forms.Form):
""" Mass action form for Transactions & Liabilities """
category = forms.ModelChoiceField(queryset=[], required=False)
delete = forms.ChoiceField(label=_("Delete"), choices=(('', '-----'), ('delete', _('Delete Completely')),
('trash', _('Move to Trash'))), required=False)
instance = None
def __init__(self, user, *args, **kwargs):
if 'instance' in kwargs:
self.instance = kwargs['instance']
del kwargs['instance']
super(MassActionForm, self).__init__(*args, **kwargs)
self.fields['delete'] = forms.ChoiceField(label=_("Delete"), choices=(('', '-----'),
('delete', _(
'Delete Completely')),
('trash', _('Move to Trash'))),
required=False)
self.fields['category'].label = _("Category")
self.fields['category'].queryset = Object.filter_permitted(
user, Category.objects, mode='x')
self.fields['category'].label = _("Add to Category:")
def save(self, *args, **kwargs):
"Process form"
if self.instance and self.is_valid():
if self.cleaned_data['category']:
self.instance.category = self.cleaned_data['category']
self.instance.save()
if self.cleaned_data['delete']:
if self.cleaned_data['delete'] == 'delete':
self.instance.delete()
if self.cleaned_data['delete'] == 'trash':
self.instance.trash = True
self.instance.save()
class CategoryForm(forms.ModelForm):
""" Category form """
def __init__(self, user, *args, **kwargs):
super(CategoryForm, self).__init__(*args, **kwargs)
self.fields['name'].label = _("Name")
self.fields['details'].label = _("Details")
class Meta:
"Category Form"
model = Category
fields = ('name', 'details')
class AccountForm(forms.ModelForm):
""" Account form """
def __init__(self, user, *args, **kwargs):
super(AccountForm, self).__init__(*args, **kwargs)
self.fields['name'].label = _("Name")
self.fields['owner'].label = _("Owner")
self.fields['owner'].queryset = Object.filter_permitted(
user, Contact.objects)
self.fields['owner'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('contacts:location_add')})
self.fields['owner'].widget.attrs.update(
{'popuplink': reverse('contacts:contact-add')})
self.fields['balance_currency'].label = _("Currency")
self.fields['balance_currency'].widget.attrs.update(
{'popuplink': reverse('finance_currency_add')})
try:
self.fields['balance_currency'].initial = Currency.objects.get(
is_default=True)
except:
pass
self.fields['balance_display'].label = _("Initial Balance")
self.fields['details'].label = _("Details")
class Meta:
"Account Form"
model = Account
fields = (
'name', 'owner', 'balance_currency', 'balance_display', 'details')
class AccountFilterForm(forms.ModelForm):
""" Filters definition """
def __init__(self, user, skip=None, *args, **kwargs):
if skip is None:
skip = []
super(AccountFilterForm, self).__init__(*args, **kwargs)
if 'owner' in skip:
del self.fields['owner']
else:
self.fields['owner'].queryset = Object.filter_permitted(
user, Contact.objects)
self.fields['owner'].required = False
self.fields['owner'].label = _("Owner")
self.fields['owner'].help_text = ""
self.fields['owner'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('contacts:location_add')})
class Meta:
"Account Filter Form"
model = Account
fields = ['owner']
class AssetForm(forms.ModelForm):
""" Asset form """
def __init__(self, user, *args, **kwargs):
super(AssetForm, self).__init__(*args, **kwargs)
self.fields['name'].label = _("Name")
self.fields['asset_type'].label = _("Asset type")
self.fields['initial_value'].label = _("Initial value")
self.fields['lifetime'].label = _("Lifetime (years)")
self.fields['endlife_value'].label = _("Endlife value")
self.fields['depreciation_rate'].label = _("Depreciation rate")
self.fields['purchase_date'].label = _("Purchase date")
self.fields['purchase_date'].widget.attrs.update(
{'class': 'datepicker'})
self.fields['current_value'].label = _("Current value")
self.fields['owner'].queryset = Object.filter_permitted(
user, Contact.objects)
self.fields['owner'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('contacts:location_add')})
self.fields['owner'].widget.attrs.update(
{'popuplink': reverse('contacts:contact-add')})
class Meta:
"Asset Form"
model = Asset
fields = ('name', 'asset_type', 'initial_value', 'lifetime', 'endlife_value',
'depreciation_rate', 'depreciation_type', 'purchase_date', 'current_value', 'owner')
class AssetFilterForm(forms.ModelForm):
""" Filters definition """
def __init__(self, user, skip=None, *args, **kwargs):
if skip is None:
skip = []
super(AssetFilterForm, self).__init__(*args, **kwargs)
if 'purchase_date_from' in skip:
del self.fields['purchase_date_from']
else:
self.fields['purchase_date_from'] = forms.DateField(label="Purchase Date From:",
required=False)
self.fields['purchase_date_from'].widget.attrs.update(
{'class': 'datepicker'})
self.fields['purchase_date_from'].label = _("Purchase Date From")
if 'purchase_date_to' in skip:
del self.fields['purchase_date_to']
else:
self.fields['purchase_date_to'] = forms.DateField(
label="Purchase Date To:", required=False)
self.fields['purchase_date_to'].widget.attrs.update(
{'class': 'datepicker'})
self.fields['purchase_date_to'].label = _("Purchase Date To")
if 'asset_type' in skip:
del self.fields['asset_type']
else:
self.fields['asset_type'].label = _("Asset Type")
self.fields['asset_type'].help_text = ""
self.fields['asset_type'].required = False
if 'owner' in skip:
del self.fields['owner']
else:
self.fields['owner'].queryset = Object.filter_permitted(
user, Contact.objects)
self.fields['owner'].required = False
self.fields['owner'].label = _("Owner")
self.fields['owner'].help_text = ""
self.fields['owner'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('contacts:location_add')})
class Meta:
"Asset Filter Form"
model = Asset
fields = ('owner', 'asset_type')
class EquityForm(forms.ModelForm):
""" Equity form """
def __init__(self, user, *args, **kwargs):
super(EquityForm, self).__init__(*args, **kwargs)
self.fields['equity_type'].label = _("Equity type")
self.fields['issue_price'].label = _("Issue price")
self.fields['sell_price'].label = _("Sell price")
self.fields['issuer'].label = _("Issuer")
self.fields['owner'].label = _("Owner")
self.fields['amount'].label = _("Quantity")
self.fields['purchase_date'].label = _("Purchase date")
self.fields['details'].label = _("Details")
self.fields['owner'].queryset = Object.filter_permitted(
user, Contact.objects)
self.fields['owner'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('contacts:location_add')})
self.fields['owner'].widget.attrs.update(
{'popuplink': reverse('contacts:contact-add')})
self.fields['issuer'].queryset = Object.filter_permitted(
user, Contact.objects)
self.fields['issuer'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('contacts:location_add')})
self.fields['issuer'].widget.attrs.update(
{'popuplink': reverse('contacts:contact-add')})
try:
conf = ModuleSetting.get_for_module(
'anaf.finance', 'my_company')[0]
self.fields['issuer'].initial = int(conf.value)
except Exception:
pass
self.fields['purchase_date'].widget.attrs.update(
{'class': 'datepicker'})
class Meta:
"Equity Form"
model = Equity
fields = ('equity_type', 'issue_price', 'sell_price', 'issuer',
'owner', 'amount', 'purchase_date', 'details')
class EquityFilterForm(forms.ModelForm):
""" Filters definition """
def __init__(self, user, skip=None, *args, **kwargs):
if skip is None:
skip = []
super(EquityFilterForm, self).__init__(*args, **kwargs)
if 'purchase_date_from' in skip:
del self.fields['purchase_date_from']
else:
self.fields['purchase_date_from'] = forms.DateField(label="Purchase Date From:",
required=False)
self.fields['purchase_date_from'].widget.attrs.update(
{'class': 'datepicker'})
self.fields['purchase_date_from'].label = _("Purchase Date From")
if 'purchase_date_to' in skip:
del self.fields['purchase_date_to']
else:
self.fields['purchase_date_to'] = forms.DateField(
label="Purchase Date To:", required=False)
self.fields['purchase_date_to'].widget.attrs.update(
{'class': 'datepicker'})
self.fields['purchase_date_to'].label = _("Purchase Date To")
if 'equity_type' in skip:
del self.fields['equity_type']
else:
self.fields['equity_type'].label = _("Equity Type")
self.fields['equity_type'].help_text = ""
self.fields['equity_type'].required = False
if 'issuer' in skip:
del self.fields['issuer']
else:
self.fields['issuer'].queryset = Object.filter_permitted(
user, Contact.objects)
self.fields['issuer'].label = _("Issuer")
self.fields['issuer'].help_text = ""
self.fields['issuer'].required = False
self.fields['issuer'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('contacts:location_add')})
if 'owner' in skip:
del self.fields['owner']
else:
self.fields['owner'].queryset = Object.filter_permitted(
user, Contact.objects)
self.fields['owner'].required = False
self.fields['owner'].label = _("Owner")
self.fields['owner'].help_text = ""
self.fields['owner'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('contacts:location_add')})
class Meta:
"Equity Filter Form"
model = Equity
fields = ('issuer', 'owner', 'equity_type')
class ReceivableForm(forms.ModelForm):
""" Receivable form """
def __init__(self, user, *args, **kwargs):
super(ReceivableForm, self).__init__(*args, **kwargs)
self.fields['name'].label = _("Name")
self.fields['category'].label = _("Category")
self.fields['source'].label = _("Source")
self.fields['target'].label = _("Target")
self.fields['account'].label = _("Bank Account")
self.fields['due_date'].label = _("Due date")
self.fields['value_currency'].label = _("Currency")
self.fields['value_currency'].widget.attrs.update(
{'popuplink': reverse('finance_currency_add')})
self.fields['value_currency'].initial = Currency.objects.get(
is_default=True)
self.fields['value_display'].label = _("Value")
self.fields['details'].label = _("Details")
self.fields['source'].queryset = Object.filter_permitted(
user, Contact.objects)
self.fields['source'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('contacts:location_add')})
self.fields['source'].widget.attrs.update(
{'popuplink': reverse('contacts:contact-add')})
self.fields['account'].queryset = Object.filter_permitted(
user, Account.objects)
try:
conf = ModuleSetting.get_for_module(
'anaf.finance', 'default_account')[0]
self.fields['account'].initial = int(conf.value)
except Exception:
pass
self.fields['due_date'].widget.attrs.update({'class': 'datepicker'})
del self.fields['target']
class Meta:
"Receivable Form"
model = Liability
fields = ('name', 'category', 'source', 'target', 'account',
'due_date', 'value_currency', 'value_display', 'details')
class TransactionForm(forms.ModelForm):
""" Transaction form """
def __init__(self, user, liability_id=None, order_id=None, *args, **kwargs):
super(TransactionForm, self).__init__(*args, **kwargs)
self.fields['name'].label = _("Description")
self.fields['category'].label = _("Category")
self.fields['source'].label = _("Source")
self.fields['target'].label = _("Target")
self.fields['account'].label = _("Bank Account")
self.fields['datetime'].label = _("Date & Time")
self.fields['value_currency'].label = _("Currency")
self.fields['value_currency'].widget.attrs.update(
{'popuplink': reverse('finance_currency_add')})
self.fields['value_currency'].initial = Currency.objects.get(
is_default=True)
self.fields['value_display'].label = _("Value")
self.fields['details'].label = _("Details")
self.fields['source'].queryset = Object.filter_permitted(
user, Contact.objects)
self.fields['target'].queryset = Object.filter_permitted(
user, Contact.objects)
self.fields['source'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('contacts:location_add')})
self.fields['target'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('contacts:location_add')})
self.fields['source'].widget.attrs.update(
{'popuplink': reverse('contacts:contact-add')})
self.fields['target'].widget.attrs.update(
{'popuplink': reverse('contacts:contact-add')})
self.fields['datetime'].widget.attrs.update(
{'class': 'datetimepicker'})
self.fields['account'].queryset = Object.filter_permitted(
user, Account.objects)
try:
conf = ModuleSetting.get_for_module(
'anaf.finance', 'default_account')[0]
self.fields['account'].initial = int(conf.value)
except Exception:
pass
self.fields['liability'].queryset = Object.filter_permitted(
user, Liability.objects)
self.fields['liability'].label = _("Liability / Receivable")
if order_id:
order = get_object_or_404(SaleOrder, pk=order_id)
self.fields['name'].initial = order.reference
if order.client:
self.fields['source'].initial = order.client
# default company
try:
conf = ModuleSetting.get_for_module(
'anaf.finance', 'my_company')[0]
self.fields['target'].initial = Contact.objects.get(
pk=int(conf.value))
except Exception:
pass
self.fields['details'].initial = order.details
self.fields['value_display'].initial = order.balance_due()
self.fields['value_currency'].initial = order.currency
if liability_id:
self.fields['liability'].initial = liability_id
liability = get_object_or_404(Liability, pk=liability_id)
self.fields['name'].initial = liability.name
self.fields['source'].initial = liability.source
self.fields['target'].initial = liability.target
self.fields['details'].initial = liability.details
self.fields['category'].initial = liability.category
self.fields['account'].initial = liability.account
self.fields['value_display'].initial = liability.value_display
self.fields['value_currency'].initial = liability.value_currency
class Meta:
"Transaction Form"
model = Transaction
fields = ('name', 'category', 'source', 'target', 'account',
'datetime', 'liability', 'value_currency', 'value_display', 'details')
class TransactionFilterForm(forms.ModelForm):
""" Filters definition """
def __init__(self, user, skip=None, *args, **kwargs):
if skip is None:
skip = []
super(TransactionFilterForm, self).__init__(*args, **kwargs)
if 'datefrom' in skip:
del self.fields['datefrom']
del self.fields['dateto']
else:
self.fields['datefrom'] = forms.DateField(
label=_("Date From"), required=False)
self.fields['datefrom'].widget.attrs.update(
{'class': 'datepicker'})
if 'dateto' in skip:
del self.fields['dateto']
del self.fields['datefrom']
else:
self.fields['dateto'] = forms.DateField(
label=_("Date To"), required=False)
self.fields['dateto'].widget.attrs.update({'class': 'datepicker'})
if 'category' in skip:
del self.fields['category']
else:
self.fields['category'].queryset = Object.filter_permitted(
user, Category.objects)
self.fields['category'].label = _("Category")
self.fields['category'].help_text = ""
self.fields['category'].required = False
if 'source' in skip:
del self.fields['source']
else:
self.fields['source'].queryset = Object.filter_permitted(
user, Contact.objects)
self.fields['source'].label = _("Source")
self.fields['source'].help_text = ""
self.fields['source'].required = False
self.fields['source'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('contacts:location_add')})
if 'target' in skip:
del self.fields['target']
else:
self.fields['target'].queryset = Object.filter_permitted(
user, Contact.objects)
self.fields['target'].required = False
self.fields['target'].label = _("Target")
self.fields['target'].help_text = ""
self.fields['target'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('contacts:location_add')})
class Meta:
"Transaction Filter Form"
model = Transaction
fields = ('category', 'source', 'target')
class LiabilityForm(forms.ModelForm):
""" Folder form """
def __init__(self, user, *args, **kwargs):
super(LiabilityForm, self).__init__(*args, **kwargs)
self.fields['name'].label = _("Name")
self.fields['category'].label = _("Category")
self.fields['source'].label = _("Source")
self.fields['target'].label = _("Target")
self.fields['account'].label = _("Bank Account")
self.fields['due_date'].label = _("Due date")
self.fields['value_currency'].label = _("Currency")
self.fields['value_currency'].widget.attrs.update(
{'popuplink': reverse('finance_currency_add')})
self.fields['value_currency'].initial = Currency.objects.get(
is_default=True)
self.fields['value_display'].label = _("Value")
self.fields['details'].label = _("Details")
self.fields['target'].queryset = Object.filter_permitted(
user, Contact.objects)
self.fields['target'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('contacts:location_add')})
self.fields['target'].widget.attrs.update(
{'popuplink': reverse('contacts:contact-add')})
self.fields['account'].queryset = Object.filter_permitted(
user, Account.objects)
try:
conf = ModuleSetting.get_for_module(
'anaf.finance', 'default_account')[0]
self.fields['account'].initial = int(conf.value)
except Exception:
pass
self.fields['due_date'].widget.attrs.update({'class': 'datepicker'})
del self.fields['source']
class Meta:
"Liability Form"
model = Liability
fields = ('name', 'category', 'source', 'target', 'account',
'due_date', 'value_currency', 'value_display', 'details')
class LiabilityFilterForm(forms.ModelForm):
""" Filters definition """
def __init__(self, user, skip=None, *args, **kwargs):
if skip is None:
skip = []
super(LiabilityFilterForm, self).__init__(*args, **kwargs)
if 'due_date_from' in skip:
del self.fields['due_date_from']
else:
self.fields['due_date_from'] = forms.DateField(
label=_("Due Date From:"), required=False)
self.fields['due_date_from'].widget.attrs.update(
{'class': 'datepicker'})
if 'due_date_to' in skip:
del self.fields['due_date_to']
else:
self.fields['due_date_to'] = forms.DateField(
label=_("Due Date To:"), required=False)
self.fields['due_date_to'].widget.attrs.update(
{'class': 'datepicker'})
if 'category' in skip:
del self.fields['category']
else:
self.fields['category'].queryset = Object.filter_permitted(
user, Category.objects)
self.fields['category'].label = _("Category")
self.fields['category'].help_text = ""
self.fields['category'].required = False
if 'source' in skip:
del self.fields['source']
else:
self.fields['source'].queryset = Object.filter_permitted(
user, Contact.objects)
self.fields['source'].label = _("Source")
self.fields['source'].help_text = ""
self.fields['source'].required = False
self.fields['source'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('contacts:location_add')})
if 'target' in skip:
del self.fields['target']
else:
self.fields['target'].queryset = Object.filter_permitted(
user, Contact.objects)
self.fields['target'].required = False
self.fields['target'].label = _("Target")
self.fields['target'].help_text = ""
self.fields['target'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('contacts:location_add')})
if 'account' in skip:
del self.fields['account']
else:
self.fields['account'].queryset = Object.filter_permitted(
user, Account.objects)
self.fields['account'].required = False
self.fields['account'].label = _("Account")
self.fields['account'].help_text = ""
class Meta:
"Liability Filter Form"
model = Liability
fields = ('category', 'source', 'target', 'account')
class SettingsForm(forms.Form):
""" Administration settings form """
default_currency = forms.ModelChoiceField(
label='Base Currency', queryset=[])
my_company = forms.ModelChoiceField(label='My Company', queryset=[])
default_account = forms.ModelChoiceField(
label='Default Account', queryset=[])
def __init__(self, user, *args, **kwargs):
"Sets choices and initial value"
super(SettingsForm, self).__init__(*args, **kwargs)
self.fields['my_company'].queryset = Object.filter_permitted(
user, Contact.objects)
self.fields['my_company'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('contacts:location_add')})
self.fields['default_account'].queryset = Object.filter_permitted(
user, Account.objects)
# Translation
self.fields['default_currency'].label = _('Base Currency')
self.fields['my_company'].label = _('My Company')
self.fields['default_account'].label = _('Default Account')
try:
self.fields['default_currency'].widget.attrs.update(
{'popuplink': reverse('finance_currency_add')})
self.fields['default_currency'].queryset = Currency.objects.all()
self.fields['default_currency'].initial = Currency.objects.get(
is_default=True)
except Exception:
pass
try:
conf = ModuleSetting.get_for_module(
'anaf.finance', 'my_company')[0]
my_company = Contact.objects.get(pk=int(conf.value))
self.fields['my_company'].initial = my_company.id
except Exception:
pass
try:
conf = ModuleSetting.get_for_module(
'anaf.finance', 'default_account')[0]
default_account = Account.objects.get(pk=int(conf.value))
self.fields['default_account'].initial = default_account.id
except Exception:
pass
def clean_my_company(self, *args, **kwargs):
"Check that my company has an account"
my_company = self.cleaned_data['my_company']
if not my_company.account_set.count():
raise forms.ValidationError(
_("Your company has to have at least one Financial Account"))
return my_company
def clean_default_account(self):
"Check that account owner is the same as my company"
account = self.cleaned_data['default_account']
try:
company = self.cleaned_data['my_company']
if not account.owner_id == company.id:
raise forms.ValidationError(
_("Default Account has to belong to your company"))
except KeyError:
pass
return account
def save(self):
"Form processor"
try:
ModuleSetting.set_for_module('my_company',
self.cleaned_data['my_company'].id,
'anaf.finance')
ModuleSetting.set_for_module('default_account',
self.cleaned_data[
'default_account'].id,
'anaf.finance')
currency = Currency.objects.get(
pk=self.cleaned_data['default_currency'])
currency.is_default = True
currency.save()
return True
except Exception:
return False
#
# Currency
#
class CurrencyForm(forms.ModelForm):
"Currency Form"
code = forms.ChoiceField(
label=_("Currency Code"), choices=standard_currencies)
def __init__(self, user, *args, **kwargs):
super(CurrencyForm, self).__init__(*args, **kwargs)
class Meta:
"Currency Form"
model = Currency
fields = ('name', 'code', 'symbol', 'factor') # ,'is_active')
#
# Tax
#
class TaxForm(forms.ModelForm):
"Tax Form"
def __init__(self, user, *args, **kwargs):
super(TaxForm, self).__init__(*args, **kwargs)
class Meta:
"Tax Form"
model = Tax
fields = ('name', 'rate', 'compound')
|
{
"content_hash": "70f8c3c704b7427632692216e702b14d",
"timestamp": "",
"source": "github",
"line_count": 777,
"max_line_length": 109,
"avg_line_length": 38.61647361647362,
"alnum_prop": 0.5449091818030328,
"repo_name": "tovmeod/anaf",
"id": "19cb2a5546c8d77d34e04c84646ae0b37f2cb313",
"size": "30005",
"binary": false,
"copies": "1",
"ref": "refs/heads/drf",
"path": "anaf/finance/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "400736"
},
{
"name": "HTML",
"bytes": "1512873"
},
{
"name": "JavaScript",
"bytes": "2136807"
},
{
"name": "PHP",
"bytes": "25856"
},
{
"name": "Python",
"bytes": "2045934"
},
{
"name": "Shell",
"bytes": "18005"
},
{
"name": "TSQL",
"bytes": "147855"
}
],
"symlink_target": ""
}
|
from rackattack.common import globallock
from rackattack.tcp import debug
from inaugurator.server import server
from inaugurator.server import rabbitmqwrapper
import logging
class Inaugurate:
def __init__(self, filesPath):
self._registered = {}
self._rabbit = rabbitmqwrapper.RabbitMQWrapper(filesPath)
self._server = server.Server(checkInCallback=self._checkIn,
doneCallback=self._done,
progressCallback=self._progress,
failedCallback=self._failure)
def register(self, id, checkInCallback, doneCallback, progressCallback, failureCallback):
assert globallock.assertLocked()
assert id not in self._registered
self._server.listenOnID(id)
self._registered[id] = dict(
checkInCallback=checkInCallback, doneCallback=doneCallback,
progressCallback=progressCallback, failureCallback=failureCallback)
def unregister(self, id):
assert globallock.assertLocked()
assert id in self._registered
del self._registered[id]
self._server.stopListeningOnID(id)
def provideLabel(self, id, label):
with debug.logNetwork("Providing label '%(label)s' to '%(id)s'" % dict(label=label, id=id)):
self._server.provideLabel(id=id, label=label)
def _checkIn(self, id):
logging.info("%(id)s inaugurator check in", dict(id=id))
with globallock.lock():
if id not in self._registered:
logging.error("Unknown Inaugurator checked in: %(id)s", dict(id=id))
return
self._registered[id]['checkInCallback']()
def _done(self, id):
logging.info("%(id)s done", dict(id=id))
with globallock.lock():
if id not in self._registered:
logging.error("Unknown Inaugurator done: %(id)s", dict(id=id))
return
self._registered[id]['doneCallback']()
def _progress(self, id, progress):
with globallock.lock():
if id not in self._registered:
logging.error("Unknown Inaugurator progress: %(id)s", dict(id=id))
return
self._registered[id]['progressCallback'](progress)
def _failure(self, id, message):
with globallock.lock():
if id not in self._registered:
logging.error("Unknown Inaugurator failure: %(id)s", dict(id=id))
return
self._registered[id]['failureCallback'](message)
|
{
"content_hash": "d3935d15373842eb421b447a63027ccb",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 100,
"avg_line_length": 40.904761904761905,
"alnum_prop": 0.6061311602638727,
"repo_name": "Stratoscale/rackattack-virtual",
"id": "ddf2fca8b2dd5fc409c65c06acc6698e266623ad",
"size": "2577",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rackattack/common/inaugurate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "3643"
},
{
"name": "Python",
"bytes": "184744"
},
{
"name": "Shell",
"bytes": "1904"
}
],
"symlink_target": ""
}
|
from Root import c
import unittest
class test3(unittest.TestCase):
def test_cc1(self):
self.assertEqual(c.bar1()+12,13)
def test_cc2(self):
self.assertEqual(c.bar2()*c.bar2(),4)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "a24d077550123af40f332837864b2390",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 39,
"avg_line_length": 25.333333333333332,
"alnum_prop": 0.6798245614035088,
"repo_name": "codeboardio/mantra",
"id": "4f1f39871da913f5f8b85005abbe45db6f123e2c",
"size": "228",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_resources/python-unittest/py_several_files2/Root/test/test3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3416"
},
{
"name": "C++",
"bytes": "2328"
},
{
"name": "Haskell",
"bytes": "2068"
},
{
"name": "Java",
"bytes": "31758"
},
{
"name": "JavaScript",
"bytes": "259653"
},
{
"name": "Python",
"bytes": "13015"
},
{
"name": "Shell",
"bytes": "4102"
}
],
"symlink_target": ""
}
|
"""
The flask application package.
"""
import logging
from flask import Flask
app = Flask(__name__, instance_relative_config=True)
app.config.from_object('jarvis_svc.default_settings')
app.config.from_pyfile('app.cfg', silent = True)
logging.basicConfig(filename='jarvis.log', level=logging.WARNING)
import jarvis_svc.views
|
{
"content_hash": "5ae5bc996a090b5b494bce59934497f3",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 65,
"avg_line_length": 25.153846153846153,
"alnum_prop": 0.7614678899082569,
"repo_name": "ztepsic/jarvis",
"id": "02d1c670f400a067287555d9097310e7c0793ef2",
"size": "327",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Jarvis.Svc.Py/Jarvis.Svc.Py/jarvis_svc/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "240"
},
{
"name": "C++",
"bytes": "12213"
},
{
"name": "HTML",
"bytes": "845"
},
{
"name": "Python",
"bytes": "13188"
},
{
"name": "Shell",
"bytes": "1192"
},
{
"name": "TSQL",
"bytes": "20529"
}
],
"symlink_target": ""
}
|
"""
Copyright (C) 2017 Roberto Bruttomesso <roberto.bruttomesso@gmail.com>
This file is distributed under the terms of the 3-clause BSD License.
A copy of the license can be found in the root directory or at
https://opensource.org/licenses/BSD-3-Clause.
Author: Roberto Bruttomesso <roberto.bruttomesso@gmail.com>
Date: 27/03/2017
"""
import unittest
import intrepyd
import intrepyd.circuit
from intrepyd.engine import EngineResult
class MyCirc(intrepyd.circuit.Circuit):
def __init__(self, context, name):
intrepyd.circuit.Circuit.__init__(self, context, name)
def _mk_naked_circuit_impl(self, inputs):
inputKeys = list(inputs)
i1 = inputs[inputKeys[0]]
i2 = inputs[inputKeys[1]]
output = self.context.mk_and(i1, i2)
return output
class TestCircuit(unittest.TestCase):
def test_circuit_01(self):
ctx = intrepyd.Context()
inst1 = MyCirc(ctx, 'mycirc1')
inst2 = MyCirc(ctx, 'mycirc2')
bt = ctx.mk_boolean_type()
i1 = ctx.mk_input('i1', bt)
i2 = ctx.mk_input('i2', bt)
inputs = { 'i1' : i1, 'i2' : i2 }
out1 = inst1.mk_naked_circuit(inputs, True)
out2 = inst2.mk_naked_circuit(inputs, True)
diff = ctx.mk_xor(out1, out2)
bmc = ctx.mk_bmc()
bmc.add_target(diff)
result = bmc.reach_targets()
self.assertEqual(EngineResult.UNREACHABLE, result)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "91459989d0dd58d555517229738159cb",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 70,
"avg_line_length": 31.020833333333332,
"alnum_prop": 0.6326393552719947,
"repo_name": "formalmethods/intrepyd",
"id": "88432fbd8c1c0fcc8dcf36530896b115c682ac1c",
"size": "1489",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_circuit.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ANTLR",
"bytes": "29134"
},
{
"name": "Java",
"bytes": "82352"
},
{
"name": "Python",
"bytes": "844923"
},
{
"name": "Shell",
"bytes": "1049"
}
],
"symlink_target": ""
}
|
from abc import abstractmethod, abstractproperty
from ebu_tt_live.utils import AutoRegisteringABCMeta, AbstractStaticMember, validate_types_only
# Interfaces
# ==========
class ICarriageMechanism(object, metaclass=AutoRegisteringABCMeta):
"""
Basic interface for the carrige mechanisms
"""
class IProducerCarriage(ICarriageMechanism):
"""
Carriage mechanism interface for producer nodes.
"""
_expects = AbstractStaticMember(validate_types_only)
@classmethod
def expects(cls):
"""
Data type expected
:return:
"""
if isinstance(cls._expects, AbstractStaticMember):
raise TypeError('Classmethod relies on abstract property: \'_expects\'')
return cls._expects
@abstractmethod
def register_producer_node(self, node):
"""
Register the producer node in the carriage mechanism
:param node: The node to connect to.
"""
@abstractproperty
def producer_node(self):
"""
Node accessor
:return:
"""
@abstractmethod
def emit_data(self, data, **kwargs):
"""
Implement protocol specific postprocessing here.
:param kwargs: Extra parameters to send down
:param data:
:return:
"""
@abstractmethod
def resume_producing(self):
"""
This makes sure that the producers can be pulled. This is good for timer or manual triggering
:return:
"""
class IConsumerCarriage(ICarriageMechanism):
"""
Carriage mechanism interface for consumer nodes.
"""
_provides = AbstractStaticMember(validate_types_only)
@classmethod
def provides(cls):
"""
Data type provided
:return:
"""
if isinstance(cls._provides, AbstractStaticMember):
raise TypeError('Classmethod relies on abstract property: \'_provides\'')
return cls._provides
@abstractmethod
def register_consumer_node(self, node):
"""
Register the consumer node in the carriage mechanism
:param node:
:return:
"""
@abstractproperty
def consumer_node(self):
"""
Node accessor
:return:
"""
@abstractmethod
def on_new_data(self, data, **kwargs):
"""
Implement protocol specific preprocessing here.
:param kwargs: Extra parameters to send down
"""
|
{
"content_hash": "c4297d14c072680bf5183f4dcba28aeb",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 101,
"avg_line_length": 22.445454545454545,
"alnum_prop": 0.6103685702713649,
"repo_name": "bbc/ebu-tt-live-toolkit",
"id": "c445fd834390761a8a7c5baf74e7e70e2d7c5fef",
"size": "2469",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ebu_tt_live/carriage/interface.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "827"
},
{
"name": "CSS",
"bytes": "1835"
},
{
"name": "Gherkin",
"bytes": "184126"
},
{
"name": "HTML",
"bytes": "16970"
},
{
"name": "JavaScript",
"bytes": "156508"
},
{
"name": "Makefile",
"bytes": "1320"
},
{
"name": "Python",
"bytes": "665429"
}
],
"symlink_target": ""
}
|
import os
import numpy as np
from skimage.io import imread_collection
from skimage.transform import resize, rescale
def list_paths_labels(label_dir):
'''label_dir - a directory path
for data, where
directories act as a label,
data in directories
return paths_to_data_list, labels_list '''
dir_iter = os.walk(label_dir)
next(dir_iter)
X = []
y = []
for i in dir_iter:
for file_name in i[2]:
X.append(i[0]+ "/" + file_name)
y.append(i[0])
return X,y
class image_iter:
''' iterator to get batches of photos and labels
initialize with a list of paths and labels'''
def __init__(self, list_X, list_y, batch_size = 1, scale = 1, normalize = False):
'''image_iter(X, Y)
list_x - list of paths for photos
list_y - list of labels
batch_size
scale - scale the image
normalize - divide values by 255 to make the 0 to 1'''
self.list_X = list_X
self.list_y = list_y
self.lenght = len(list_y)
self.scale = scale
self.iter_num = 0
self.normalize = normalize
self.batch_size = batch_size
def __iter__(self):
self.iter_num = 0
return self
def __next__(self):
if self.iter_num >= self.lenght:
raise StopIteration
ret_batch_X =imread_collection(self.list_X[self.iter_num:self.iter_num + self.batch_size])
ret_batch_X = list(map(lambda x: rescale(x, scale=self.scale), ret_batch_X))
ret_batch_y =self.list_y[self.iter_num:self.iter_num + self.batch_size]
self.iter_num = self.iter_num + self.batch_size
ret_batch_X = np.array(ret_batch_X, dtype=np.float32)
if self.normalize:
ret_batch_X = np.divide(ret_batch_X, 255)
return ret_batch_X, ret_batch_y
|
{
"content_hash": "f31054145adb3e52d68f08f043193dde",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 98,
"avg_line_length": 33.32142857142857,
"alnum_prop": 0.5884244372990354,
"repo_name": "AugustasVol/Image_classification_produce",
"id": "f46ea29642ec440af8558470441a8b43c50d63b3",
"size": "1866",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "images_dir_labels.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1141720"
},
{
"name": "Python",
"bytes": "5914"
}
],
"symlink_target": ""
}
|
from octavia.common import constants
from octavia.common import data_models
from octavia.tests.common import constants as ut_constants
def generate_load_balancer_tree(additional_vips=None):
vip = generate_vip()
amps = [generate_amphora(), generate_amphora()]
lb = generate_load_balancer(vip=vip, amphorae=amps,
additional_vips=additional_vips)
return lb
LB_SEED = 0
def generate_load_balancer(vip=None, amphorae=None,
topology=constants.TOPOLOGY_SINGLE,
additional_vips=None):
amphorae = amphorae or []
additional_vips = additional_vips or []
global LB_SEED
LB_SEED += 1
lb = data_models.LoadBalancer(id='lb{0}-id'.format(LB_SEED),
project_id='2',
name='lb{0}'.format(LB_SEED),
description='lb{0}'.format(LB_SEED),
vip=vip,
topology=topology,
amphorae=amphorae)
for amp in lb.amphorae:
amp.load_balancer = lb
amp.load_balancer_id = lb.id
amp.status = constants.AMPHORA_ALLOCATED
if vip:
vip.load_balancer = lb
vip.load_balancer_id = lb.id
for add_vip in additional_vips:
add_vip_obj = data_models.AdditionalVip(
load_balancer_id=lb.id,
ip_address=add_vip.get('ip_address'),
subnet_id=add_vip.get('subnet_id'),
network_id=vip.network_id,
port_id=vip.port_id,
load_balancer=lb
)
lb.additional_vips.append(add_vip_obj)
return lb
VIP_SEED = 0
def generate_vip(load_balancer=None):
global VIP_SEED
VIP_SEED += 1
vip = data_models.Vip(ip_address='10.0.0.{0}'.format(VIP_SEED),
subnet_id=ut_constants.MOCK_VIP_SUBNET_ID,
port_id='vrrp-port-{0}'.format(VIP_SEED),
load_balancer=load_balancer)
if load_balancer:
vip.load_balancer_id = load_balancer.id
return vip
AMP_SEED = 0
def generate_amphora(load_balancer=None):
global AMP_SEED
AMP_SEED += 1
amp = data_models.Amphora(id='amp{0}-id'.format(AMP_SEED),
compute_id='amp{0}-compute-id'.format(AMP_SEED),
status='ACTIVE',
lb_network_ip='99.99.99.{0}'.format(AMP_SEED),
vrrp_ip='55.55.55.{0}'.format(AMP_SEED),
vrrp_port_id='vrrp_port-{0}-id'.format(AMP_SEED),
load_balancer=load_balancer)
if load_balancer:
amp.load_balancer_id = load_balancer.id
return amp
|
{
"content_hash": "b4f2319d00cca2f5fef2f510a99c8fe9",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 79,
"avg_line_length": 34.641975308641975,
"alnum_prop": 0.5331432644333571,
"repo_name": "openstack/octavia",
"id": "0cc12c464b12905310a4eea24430b63a61829456",
"size": "3411",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "octavia/tests/common/data_model_helpers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "60600"
},
{
"name": "Mako",
"bytes": "922"
},
{
"name": "Python",
"bytes": "6651664"
},
{
"name": "Ruby",
"bytes": "531"
},
{
"name": "Shell",
"bytes": "117966"
}
],
"symlink_target": ""
}
|
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes_bi,
initialize_chain_clean,
start_nodes,
wait_and_assert_operationid_status,
)
from decimal import Decimal
SAPLING_TREE_EMPTY_ROOT = "3e49b5f954aa9d3545bc6c37744661eea48d7c34e3000d82b7f0010c30f4c2fb"
NULL_FIELD = "0000000000000000000000000000000000000000000000000000000000000000"
# Verify block header field 'hashFinalSaplingRoot' (returned in rpc as 'finalsaplingroot')
# is updated when Sapling transactions with outputs (commitments) are mined into a block.
class FinalSaplingRootTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self, split=False):
self.nodes = start_nodes(4, self.options.tmpdir, extra_args=[[
'-nuparams=5ba81b19:100', # Overwinter
'-nuparams=76b809bb:200', # Sapling
'-txindex' # Avoid JSONRPC error: No information available about transaction
]] * 4 )
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
def run_test(self):
# Activate Overwinter and Sapling
self.nodes[0].generate(200)
self.sync_all()
# Verfify genesis block contains null field for what is now called the final sapling root field.
blk = self.nodes[0].getblock("0")
assert_equal(blk["finalsaplingroot"], NULL_FIELD)
# Verify all generated blocks contain the empty root of the Sapling tree.
blockcount = self.nodes[0].getblockcount()
for height in xrange(1, blockcount + 1):
blk = self.nodes[0].getblock(str(height))
assert_equal(blk["finalsaplingroot"], SAPLING_TREE_EMPTY_ROOT)
# Node 0 shields some funds
taddr0 = self.nodes[0].getnewaddress()
saplingAddr0 = self.nodes[0].z_getnewaddress('sapling')
recipients = []
recipients.append({"address": saplingAddr0, "amount": Decimal('20')})
myopid = self.nodes[0].z_sendmany(taddr0, recipients, 1, 0)
mytxid = wait_and_assert_operationid_status(self.nodes[0], myopid)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
# Verify the final Sapling root has changed
blk = self.nodes[0].getblock("201")
root = blk["finalsaplingroot"]
assert(root is not SAPLING_TREE_EMPTY_ROOT)
assert(root is not NULL_FIELD)
# Verify there is a Sapling output description (its commitment was added to tree)
result = self.nodes[0].getrawtransaction(mytxid, 1)
assert_equal(len(result["vShieldedOutput"]), 1)
# Mine an empty block and verify the final Sapling root does not change
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(root, self.nodes[0].getblock("202")["finalsaplingroot"])
# Mine a block with a transparent tx and verify the final Sapling root does not change
taddr1 = self.nodes[1].getnewaddress()
self.nodes[0].sendtoaddress(taddr1, Decimal("1.23"))
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(len(self.nodes[0].getblock("203")["tx"]), 2)
assert_equal(self.nodes[1].z_getbalance(taddr1), Decimal("1.23"))
assert_equal(root, self.nodes[0].getblock("203")["finalsaplingroot"])
# Mine a block with a Sprout shielded tx and verify the final Sapling root does not change
zaddr1 = self.nodes[1].z_getnewaddress('sprout')
recipients = []
recipients.append({"address": zaddr1, "amount": Decimal('10')})
myopid = self.nodes[0].z_sendmany(taddr0, recipients, 1, 0)
wait_and_assert_operationid_status(self.nodes[0], myopid)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(len(self.nodes[0].getblock("204")["tx"]), 2)
assert_equal(self.nodes[1].z_getbalance(zaddr1), Decimal("10"))
assert_equal(root, self.nodes[0].getblock("204")["finalsaplingroot"])
# Mine a block with a Sapling shielded recipient and verify the final Sapling root changes
saplingAddr1 = self.nodes[1].z_getnewaddress("sapling")
recipients = []
recipients.append({"address": saplingAddr1, "amount": Decimal('12.34')})
myopid = self.nodes[0].z_sendmany(saplingAddr0, recipients, 1, 0)
mytxid = wait_and_assert_operationid_status(self.nodes[0], myopid)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(len(self.nodes[0].getblock("205")["tx"]), 2)
assert_equal(self.nodes[1].z_getbalance(saplingAddr1), Decimal("12.34"))
assert(root is not self.nodes[0].getblock("205")["finalsaplingroot"])
# Verify there is a Sapling output description (its commitment was added to tree)
result = self.nodes[0].getrawtransaction(mytxid, 1)
assert_equal(len(result["vShieldedOutput"]), 2) # there is Sapling shielded change
# Mine a block with a Sapling shielded sender and transparent recipient and verify the final Sapling root doesn't change
taddr2 = self.nodes[0].getnewaddress()
recipients = []
recipients.append({"address": taddr2, "amount": Decimal('12.34')})
myopid = self.nodes[1].z_sendmany(saplingAddr1, recipients, 1, 0)
mytxid = wait_and_assert_operationid_status(self.nodes[1], myopid)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(len(self.nodes[0].getblock("206")["tx"]), 2)
assert_equal(self.nodes[0].z_getbalance(taddr2), Decimal("12.34"))
blk = self.nodes[0].getblock("206")
root = blk["finalsaplingroot"]
assert_equal(root, self.nodes[0].getblock("205")["finalsaplingroot"])
if __name__ == '__main__':
FinalSaplingRootTest().main()
|
{
"content_hash": "2ebd46015881dff41960241144e6e170",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 128,
"avg_line_length": 42.855172413793106,
"alnum_prop": 0.6483746379143869,
"repo_name": "litecoinz-project/litecoinz",
"id": "1842ef7dc0c9e2315125148edac6d57b08dc169b",
"size": "6417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/finalsaplingroot.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "730195"
},
{
"name": "C++",
"bytes": "6761814"
},
{
"name": "HTML",
"bytes": "20970"
},
{
"name": "Java",
"bytes": "30291"
},
{
"name": "M4",
"bytes": "226422"
},
{
"name": "Makefile",
"bytes": "154159"
},
{
"name": "Objective-C",
"bytes": "6536"
},
{
"name": "Objective-C++",
"bytes": "7240"
},
{
"name": "Python",
"bytes": "906305"
},
{
"name": "Shell",
"bytes": "103060"
}
],
"symlink_target": ""
}
|
from django_sqlalchemy.test import *
from django_sqlalchemy.backend import metadata
from django.db import models
# An example of a custom manager called "objects".
class PersonManager(models.Manager):
def get_fun_people(self):
return self.filter(fun=True)
class Person(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
fun = models.BooleanField()
objects = PersonManager()
def __unicode__(self):
return u"%s %s" % (self.first_name, self.last_name)
# An example of a custom manager that sets get_query_set().
class PublishedBookManager(models.Manager):
def get_query_set(self):
return super(PublishedBookManager, self).get_query_set().filter(is_published=True)
class Book(models.Model):
title = models.CharField(max_length=50)
author = models.CharField(max_length=30)
is_published = models.BooleanField()
published_objects = PublishedBookManager()
authors = models.ManyToManyField(Person, related_name='books')
def __unicode__(self):
return self.title
# An example of providing multiple custom managers.
class FastCarManager(models.Manager):
def get_query_set(self):
return super(FastCarManager, self).get_query_set().filter(top_speed__gt=150)
class Car(models.Model):
name = models.CharField(max_length=10)
mileage = models.IntegerField()
top_speed = models.IntegerField(help_text="In miles per hour.")
cars = models.Manager()
fast_cars = FastCarManager()
def __unicode__(self):
return self.name
metadata.create_all()
p1 = Person(first_name='Bugs', last_name='Bunny', fun=True)
p1.save()
p2 = Person(first_name='Droopy', last_name='Dog', fun=False)
p2.save()
b1 = Book(title='How to program', author='Rodney Dangerfield', is_published=True)
b1.save()
b2 = Book(title='How to be smart', author='Albert Einstein', is_published=False)
b2.save()
c1 = Car(name='Corvette', mileage=21, top_speed=180)
c1.save()
c2 = Car(name='Neon', mileage=31, top_speed=100)
c2.save()
class TestCustomManager(object):
def setup(self):
pass
def test_should_see_custom_manager_method(self):
assert_list_same([p1], Person.objects.get_fun_people())
def test_should_extend_default_manager(self):
assert_instance_of(PublishedBookManager, p2.books)
@raises(AttributeError)
def test_should_not_contain_a_default_manager_if_custom_provided(self):
Book.objects
def test_should_extend_default_manager_with_related_manager(self):
assert_instance_of(PersonManager, b2.authors)
def test_should_only_return_published_objects(self):
assert_list_same([b1], Book.published_objects.all())
def test_should_order_by(self):
assert_list_same([c1, c2], Car.cars.order_by('name'))
assert_list_same([c1], Car.fast_cars.all())
def test_should_return_default_manager_as_first_manager_in_class(self):
assert_list_same([c1, c2], Car._default_manager.order_by('name'))
|
{
"content_hash": "f376e9ba9b888e08369ece50dacc78a1",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 90,
"avg_line_length": 32.244680851063826,
"alnum_prop": 0.6954800395908941,
"repo_name": "brosner/django-sqlalchemy",
"id": "eaca64e35014042a9f7e5fd5db5aa4ea086467ed",
"size": "3031",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/managers/test_custom_managers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "141002"
}
],
"symlink_target": ""
}
|
"""
Utility functions for evaluating processing modules.
"""
import sets, math
from nltk.chktype import chktype
def accuracy(reference, test):
"""
Given a list of reference values and a corresponding list of test
values, return the percentage of corresponding values that are
equal. In particular, return the percentage of indices
C{0<i<=len(test)} such that C{test[i] == reference[i]}.
@type reference: C{list}
@param reference: An ordered list of reference values.
@type test: C{list}
@param test: A list of values to compare against the corresponding
reference values.
@raise ValueError: If C{reference} and C{length} do not have the
same length.
"""
assert chktype(1, reference, [])
assert chktype(2, test, [])
if len(reference) != len(test):
raise ValueError("Lists must have the same length.")
num_correct = [1 for x,y in zip(reference, test) if x==y]
return float(len(num_correct)) / len(reference)
def precision(reference, test):
"""
Given a set of reference values and a set of test values, return
the percentage of test values that appear in the reference set.
In particular, return |C{reference}S{cap}C{test}|/|C{test}|.
If C{test} is empty, then return C{None}.
@type reference: C{Set}
@param reference: A set of reference values.
@type test: C{Set}
@param test: A set of values to compare against the reference set.
@rtype: C{float} or C{None}
"""
assert chktype(1, reference, sets.BaseSet)
assert chktype(2, test, sets.BaseSet)
if len(test) == 0:
return None
else:
return float(len(reference.intersection(test)))/len(test)
def recall(reference, test):
"""
Given a set of reference values and a set of test values, return
the percentage of reference values that appear in the test set.
In particular, return |C{reference}S{cap}C{test}|/|C{reference}|.
If C{reference} is empty, then return C{None}.
@type reference: C{Set}
@param reference: A set of reference values.
@type test: C{Set}
@param test: A set of values to compare against the reference set.
@rtype: C{float} or C{None}
"""
assert chktype(1, reference, sets.BaseSet)
assert chktype(2, test, sets.BaseSet)
if len(reference) == 0:
return None
else:
return float(len(reference.intersection(test)))/len(reference)
def f_measure(reference, test, alpha=0.5):
"""
Given a set of reference values and a set of test values, return
the f-measure of the test values, when compared against the
reference values. The f-measure is the harmonic mean of the
L{precision} and L{recall}, weighted by C{alpha}. In particular,
given the precision M{p} and recall M{r} defined by:
- M{p} = |C{reference}S{cap}C{test}|/|C{test}|
- M{r} = |C{reference}S{cap}C{test}|/|C{reference}|
The f-measure is:
- 1/(C{alpha}/M{p} + (1-C{alpha})/M{r})
If either C{reference} or C{test} is empty, then C{f_measure}
returns C{None}.
@type reference: C{Set}
@param reference: A set of reference values.
@type test: C{Set}
@param test: A set of values to compare against the reference set.
@rtype: C{float} or C{None}
"""
p = precision(reference, test)
r = recall(reference, test)
if p is None or r is None:
return None
if p == 0 or r == 0:
return 0
return 1.0/(alpha/p + (1-alpha)/r)
def log_likelihood(reference, test):
"""
Given a list of reference values and a corresponding list of test
probability distributions, return the average log likelihood of
the reference values, given the probability distributions.
@param reference: A list of reference values
@type reference: C{list}
@param test: A list of probability distributions over values to
compare against the corresponding reference values.
@type test: C{list} of L{ProbDist}
"""
if len(reference) != len(test):
raise ValueError("Lists must have the same length.")
# Return the average value of dist.logprob(val).
total_likelihood = sum([dist.logprob(val)
for (val, dist) in zip(reference, test)])
return total_likelihood/len(reference)
class ConfusionMatrix:
"""
The confusion matrix between a list of reference values and a
corresponding list of test values. Entry [M{r},M{t}] of this
matrix is a count of the number of times that the reference value
M{r} corresponds to the test value M{t}. E.g.:
>>> ref = 'DET NN VB DET JJ NN NN IN DET NN'.split()
>>> test = 'DET VB VB DET NN NN NN IN DET NN'.split()
>>> cm = ConfusionMatrix(ref, test)
>>> print cm['NN', 'NN']
3
Note that the diagonal entries (M{Ri}=M{Tj}) of this matrix
corresponds to correct values; and the off-diagonal entries
correspond to incorrect values.
"""
def __init__(self, reference, test):
"""
Construct a new confusion matrix from a list of reference
values and a corresponding list of test values.
@type reference: C{list}
@param reference: An ordered list of reference values.
@type test: C{list}
@param test: A list of values to compare against the
corresponding reference values.
@raise ValueError: If C{reference} and C{length} do not have
the same length.
"""
assert chktype(1, reference, [])
assert chktype(2, test, [])
if len(reference) != len(test):
raise ValueError('Lists must have the same length.')
# Get a list of all values.
values = dict([(val,1) for val in reference+test]).keys()
# Construct a value->index dictionary
indices = dict([(val,i) for (i,val) in enumerate(values)])
# Make a confusion matrix table.
confusion = [[0 for val in values] for val in values]
max_conf = 0 # Maximum confusion
for w,g in zip(reference, test):
confusion[indices[w]][indices[g]] += 1
max_conf = max(max_conf, confusion[indices[w]][indices[g]])
#: A list of all values in C{reference} or C{test}.
self._values = values
#: A dictionary mapping values in L{self._values} to their indices.
self._indices = indices
#: The confusion matrix itself (as a list of lists of counts).
self._confusion = confusion
#: The greatest count in L{self._confusion} (used for printing).
self._max_conf = 0
#: The total number of values in the confusion matrix.
self._total = len(reference)
#: The number of correct (on-diagonal) values in the matrix.
self._correct = sum([confusion[i][i] for i in range(len(values))])
def __getitem__(self, (li,lj)):
"""
@return: The number of times that value C{li} was expected and
value C{lj} was given.
@rtype: C{int}
"""
i = self._indices[li]
j = self._indices[lj]
return self._confusion[i][j]
def __repr__(self):
return '<ConfusionMatrix: %s/%s correct>' % (self._correct,
self._total)
def __str__(self):
return self.pp()
def pp(self, show_percents=False, values_in_chart=True):
"""
@return: A multi-line string representation of this confusion
matrix.
@todo: add marginals?
"""
confusion = self._confusion
if values_in_chart:
values = self._values
else:
values = range(len(self._values))
# Construct a format string for row values
valuelen = max([len(str(val)) for val in values])
value_format = '%' + `valuelen` + 's |'
# Construct a format string for matrix entries
if show_percents:
entrylen = 6
entry_format = '%5.1f%%'
else:
entrylen = len(`self._max_conf`)
entry_format = '%' + `entrylen` + 'd'
# Write the column values.
value_strings = [str(val) for val in values]
s = ''
for i in range(valuelen):
s += (' '*valuelen)+' |'
for val in value_strings:
if i >= valuelen-len(val):
s += val[i-valuelen+len(val)].rjust(entrylen+1)
else:
s += ' '*(entrylen+1)
s += ' |\n'
# Write a dividing line
s += '%s-+-%s+\n' % ('-'*valuelen, '-'*((entrylen+1)*len(values)))
# Write the entries.
for i in range(len(values)):
s += value_format % values[i]
for j in range(len(values)):
s += ' '
if show_percents:
s += entry_format % (100.0*confusion[i][j]/self._total)
else:
s += entry_format % confusion[i][j]
s += ' |\n'
# Write a dividing line
s += '%s-+-%s+\n' % ('-'*valuelen, '-'*((entrylen+1)*len(values)))
# Write a key
s += '(row = reference; col = test)\n'
if not values_in_chart:
s += 'Value key:\n'
for i, value in enumerate(self._values):
s += '%6d: %s\n' % (i, value)
return s
def key(self):
values = self._values
str = 'Value key:\n'
indexlen = len(`len(values)-1`)
key_format = ' %'+`indexlen`+'d: %s\n'
for i in range(len(values)):
str += key_format % (i, values[i])
return str
def demo():
print '-'*75
reference = 'DET NN VB DET JJ NN NN IN DET NN'.split()
test = 'DET VB VB DET NN NN NN IN DET NN'.split()
print 'Reference =', reference
print 'Test =', test
print 'Confusion matrix:'
print ConfusionMatrix(reference, test)
print 'Accuracy:', accuracy(reference, test)
print '-'*75
reference_set = sets.Set(reference)
test_set = sets.Set(test)
print 'Reference =', reference_set
print 'Test = ', test_set
print 'Precision:', precision(reference_set, test_set)
print ' Recall:', recall(reference_set, test_set)
print 'F-Measure:', f_measure(reference_set, test_set)
print '-'*75
if __name__ == '__main__':
demo()
|
{
"content_hash": "da70c45f370d3977863ef155d4f6dd54",
"timestamp": "",
"source": "github",
"line_count": 290,
"max_line_length": 75,
"avg_line_length": 36,
"alnum_prop": 0.5837164750957854,
"repo_name": "ronaldahmed/robot-navigation",
"id": "70f9382a62e10f8ea92781fb1fcbe28bf75ac773",
"size": "10705",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neural-navigation-with-lstm/MARCO/nltk/eval.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "50665"
},
{
"name": "C++",
"bytes": "9297"
},
{
"name": "M",
"bytes": "494"
},
{
"name": "Makefile",
"bytes": "15281"
},
{
"name": "Matlab",
"bytes": "113735"
},
{
"name": "Python",
"bytes": "2915588"
}
],
"symlink_target": ""
}
|
"""
gspread.models
~~~~~~~~~~~~~~
This module contains common spreadsheets' models
"""
import re
from collections import defaultdict
from itertools import chain
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, SubElement
from . import urlencode
from .ns import _ns, _ns1, _ns2, ATOM_NS, BATCH_NS, SPREADSHEET_NS
from .urls import construct_url
from .utils import finditem, numericise_all
from .exceptions import IncorrectCellLabel, WorksheetNotFound, CellNotFound
try:
unicode
except NameError:
basestring = unicode = str
# Patch ElementTree._escape_attrib
_elementtree_escape_attrib = ElementTree._escape_attrib
def _escape_attrib(text, encoding=None, replace=None):
try:
text = _elementtree_escape_attrib(text)
except TypeError as e:
if str(e) == '_escape_attrib() takes exactly 2 arguments (1 given)':
text = _elementtree_escape_attrib(text, encoding)
entities = {'\n': ' ', '\r': ' ', '\t': '	'}
for key, value in entities.items():
text = text.replace(key, value)
return text
ElementTree._escape_attrib = _escape_attrib
class Spreadsheet(object):
""" A class for a spreadsheet object."""
def __init__(self, client, feed_entry):
self.client = client
self._sheet_list = []
self._feed_entry = feed_entry
@property
def id(self):
return self._feed_entry.find(_ns('id')).text.split('/')[-1]
def get_id_fields(self):
return {'spreadsheet_id': self.id}
def _fetch_sheets(self):
feed = self.client.get_worksheets_feed(self)
for elem in feed.findall(_ns('entry')):
self._sheet_list.append(Worksheet(self, elem))
def add_worksheet(self, title, rows, cols):
"""Adds a new worksheet to a spreadsheet.
:param title: A title of a new worksheet.
:param rows: Number of rows.
:param cols: Number of columns.
Returns a newly created :class:`worksheets <Worksheet>`.
"""
feed = Element('entry', {'xmlns': ATOM_NS,
'xmlns:gs': SPREADSHEET_NS})
SubElement(feed, 'title').text = title
SubElement(feed, 'gs:rowCount').text = str(rows)
SubElement(feed, 'gs:colCount').text = str(cols)
url = construct_url('worksheets', self)
elem = self.client.post_feed(url, ElementTree.tostring(feed))
worksheet = Worksheet(self, elem)
self._sheet_list.append(worksheet)
return worksheet
def del_worksheet(self, worksheet):
"""Deletes a worksheet from a spreadsheet.
:param worksheet: The worksheet to be deleted.
"""
self.client.del_worksheet(worksheet)
self._sheet_list.remove(worksheet)
def worksheets(self):
"""Returns a list of all :class:`worksheets <Worksheet>`
in a spreadsheet.
"""
if not self._sheet_list:
self._fetch_sheets()
return self._sheet_list[:]
def worksheet(self, title):
"""Returns a worksheet with specified `title`.
The returning object is an instance of :class:`Worksheet`.
:param title: A title of a worksheet. If there're multiple
worksheets with the same title, first one will
be returned.
Example. Getting worksheet named 'Annual bonuses'
>>> sht = client.open('Sample one')
>>> worksheet = sht.worksheet('Annual bonuses')
"""
if not self._sheet_list:
self._fetch_sheets()
try:
return finditem(lambda x: x.title == title, self._sheet_list)
except StopIteration:
raise WorksheetNotFound(title)
def get_worksheet(self, index):
"""Returns a worksheet with specified `index`.
The returning object is an instance of :class:`Worksheet`.
:param index: An index of a worksheet. Indexes start from zero.
Example. To get first worksheet of a spreadsheet:
>>> sht = client.open('My fancy spreadsheet')
>>> worksheet = sht.get_worksheet(0)
Returns `None` if the worksheet is not found.
"""
if not self._sheet_list:
self._fetch_sheets()
try:
return self._sheet_list[index]
except IndexError:
return None
@property
def sheet1(self):
"""Shortcut property for getting the first worksheet."""
return self.get_worksheet(0)
@property
def title(self):
return self._feed_entry.find(_ns('title')).text
class Worksheet(object):
"""A class for worksheet object."""
def __init__(self, spreadsheet, element):
self.spreadsheet = spreadsheet
self.client = spreadsheet.client
self._id = element.find(_ns('id')).text.split('/')[-1]
self._title = element.find(_ns('title')).text
self._element = element
try:
self.version = self._get_link(
'edit', element).get('href').split('/')[-1]
except:
# not relevant for read-only spreadsheets
self.version = None
def __repr__(self):
return '<%s %s id:%s>' % (self.__class__.__name__,
repr(self.title),
self.id)
@property
def id(self):
"""Id of a worksheet."""
return self._id
@property
def title(self):
"""Title of a worksheet."""
return self._title
@property
def row_count(self):
"""Number of rows"""
return int(self._element.find(_ns1('rowCount')).text)
@property
def col_count(self):
"""Number of columns"""
return int(self._element.find(_ns1('colCount')).text)
@property
def updated(self):
"""Updated time in RFC 3339 format"""
return self._element.find(_ns('updated')).text
def get_id_fields(self):
return {'spreadsheet_id': self.spreadsheet.id,
'worksheet_id': self.id}
def _cell_addr(self, row, col):
return 'R%sC%s' % (row, col)
def _get_link(self, link_type, feed):
return finditem(lambda x: x.get('rel') == link_type,
feed.findall(_ns('link')))
def _fetch_cells(self):
feed = self.client.get_cells_feed(self)
return [Cell(self, elem) for elem in feed.findall(_ns('entry'))]
_MAGIC_NUMBER = 64
_cell_addr_re = re.compile(r'([A-Za-z]+)(\d+)')
def get_int_addr(self, label):
"""Translates cell's label address to a tuple of integers.
The result is a tuple containing `row` and `column` numbers.
:param label: String with cell label in common format, e.g. 'B1'.
Letter case is ignored.
Example:
>>> wks.get_int_addr('A1')
(1, 1)
"""
m = self._cell_addr_re.match(label)
if m:
column_label = m.group(1).upper()
row = int(m.group(2))
col = 0
for i, c in enumerate(reversed(column_label)):
col += (ord(c) - self._MAGIC_NUMBER) * (26 ** i)
else:
raise IncorrectCellLabel(label)
return (row, col)
def get_addr_int(self, row, col):
"""Translates cell's tuple of integers to a cell label.
The result is a string containing the cell's coordinates in label form.
:param row: The row of the cell to be converted.
Rows start at index 1.
:param col: The column of the cell to be converted.
Columns start at index 1.
Example:
>>> wks.get_addr_int(1, 1)
A1
"""
row = int(row)
col = int(col)
if row < 1 or col < 1:
raise IncorrectCellLabel('(%s, %s)' % (row, col))
div = col
column_label = ''
while div:
(div, mod) = divmod(div, 26)
if mod == 0:
mod = 26
div -= 1
column_label = chr(mod + self._MAGIC_NUMBER) + column_label
label = '%s%s' % (column_label, row)
return label
def acell(self, label):
"""Returns an instance of a :class:`Cell`.
:param label: String with cell label in common format, e.g. 'B1'.
Letter case is ignored.
Example:
>>> wks.acell('A1') # this could be 'a1' as well
<Cell R1C1 "I'm cell A1">
"""
return self.cell(*(self.get_int_addr(label)))
def cell(self, row, col):
"""Returns an instance of a :class:`Cell` positioned in `row`
and `col` column.
:param row: Integer row number.
:param col: Integer column number.
Example:
>>> wks.cell(1, 1)
<Cell R1C1 "I'm cell A1">
"""
feed = self.client.get_cells_cell_id_feed(self,
self._cell_addr(row, col))
return Cell(self, feed)
def range(self, alphanum):
"""Returns a list of :class:`Cell` objects from specified range.
:param alphanum: A string with range value in common format,
e.g. 'A1:A5'.
"""
feed = self.client.get_cells_feed(self, params={'range': alphanum,
'return-empty': 'true'})
return [Cell(self, elem) for elem in feed.findall(_ns('entry'))]
def get_all_values(self):
"""Returns a list of lists containing all cells' values as strings."""
cells = self._fetch_cells()
# defaultdicts fill in gaps for empty rows/cells not returned by gdocs
rows = defaultdict(lambda: defaultdict(str))
for cell in cells:
row = rows.setdefault(int(cell.row), defaultdict(str))
row[cell.col] = cell.value
# we return a whole rectangular region worth of cells, including
# empties
if not rows:
return []
all_row_keys = chain.from_iterable(row.keys() for row in rows.values())
rect_cols = range(1, max(all_row_keys) + 1)
rect_rows = range(1, max(rows.keys()) + 1)
return [[rows[i][j] for j in rect_cols] for i in rect_rows]
def get_all_records(self, empty2zero=False, head=1):
"""Returns a list of dictionaries, all of them having:
- the contents of the spreadsheet's with the head row as keys,
And each of these dictionaries holding
- the contents of subsequent rows of cells as values.
Cell values are numericised (strings that can be read as ints
or floats are converted).
:param empty2zero: determines whether empty cells are converted to zeros.
:param head: determines wich row to use as keys, starting from 1
following the numeration of the spreadsheet."""
idx = head - 1
data = self.get_all_values()
keys = data[idx]
values = [numericise_all(row, empty2zero) for row in data[idx + 1:]]
return [dict(zip(keys, row)) for row in values]
def row_values(self, row):
"""Returns a list of all values in a `row`.
Empty cells in this list will be rendered as :const:`None`.
"""
start_cell = self.get_addr_int(row, 1)
end_cell = self.get_addr_int(row, self.col_count)
row_cells = self.range('%s:%s' % (start_cell, end_cell))
return [cell.value for cell in row_cells]
def col_values(self, col):
"""Returns a list of all values in column `col`.
Empty cells in this list will be rendered as :const:`None`.
"""
start_cell = self.get_addr_int(1, col)
end_cell = self.get_addr_int(self.row_count, col)
row_cells = self.range('%s:%s' % (start_cell, end_cell))
return [cell.value for cell in row_cells]
def update_acell(self, label, val):
"""Sets the new value to a cell.
:param label: String with cell label in common format, e.g. 'B1'.
Letter case is ignored.
:param val: New value.
Example:
>>> wks.update_acell('A1', '42') # this could be 'a1' as well
<Cell R1C1 "I'm cell A1">
"""
return self.update_cell(*(self.get_int_addr(label)), val=val)
def update_cell(self, row, col, val):
"""Sets the new value to a cell.
:param row: Row number.
:param col: Column number.
:param val: New value.
"""
feed = self.client.get_cells_cell_id_feed(self,
self._cell_addr(row, col))
cell_elem = feed.find(_ns1('cell'))
cell_elem.set('inputValue', unicode(val))
uri = self._get_link('edit', feed).get('href')
self.client.put_feed(uri, ElementTree.tostring(feed))
def list_rows(self):
"""List rows in current spreadsheet."""
rows = self.client.list_rows(self)
for row in rows:
yield Row(row)
def delete_row(self, row):
"""Deletes given row.
:param row: :class:`row <Row>`.
"""
self.client.delete_row(row.edit_link)
def _create_update_feed(self, cell_list):
feed = Element('feed', {'xmlns': ATOM_NS,
'xmlns:batch': BATCH_NS,
'xmlns:gs': SPREADSHEET_NS})
id_elem = SubElement(feed, 'id')
id_elem.text = construct_url('cells', self)
for cell in cell_list:
entry = SubElement(feed, 'entry')
SubElement(entry, 'batch:id').text = cell.element.find(
_ns('title')).text
SubElement(entry, 'batch:operation', {'type': 'update'})
SubElement(entry, 'id').text = cell.element.find(_ns('id')).text
edit_link = finditem(lambda x: x.get('rel') == 'edit',
cell.element.findall(_ns('link')))
SubElement(entry, 'link', {'rel': 'edit',
'type': edit_link.get('type'),
'href': edit_link.get('href')})
SubElement(entry, 'gs:cell', {'row': str(cell.row),
'col': str(cell.col),
'inputValue': unicode(cell.value)})
return feed
def update_cells(self, cell_list):
"""Updates cells in batch.
:param cell_list: List of a :class:`Cell` objects to update.
"""
feed = self._create_update_feed(cell_list)
self.client.post_cells(self, ElementTree.tostring(feed))
def resize(self, rows=None, cols=None):
"""Resizes the worksheet.
:param rows: New rows number.
:param cols: New columns number.
"""
if rows is None and cols is None:
raise TypeError("Either 'rows' or 'cols' should be specified.")
self_uri = self._get_link('self', self._element).get('href')
feed = self.client.get_feed(self_uri)
uri = self._get_link('edit', feed).get('href')
if rows:
elem = feed.find(_ns1('rowCount'))
elem.text = str(rows)
if cols:
elem = feed.find(_ns1('colCount'))
elem.text = str(cols)
# Send request and store result
self._element = self.client.put_feed(uri, ElementTree.tostring(feed))
def add_rows(self, rows):
"""Adds rows to worksheet.
:param rows: Rows number to add.
"""
self.resize(rows=self.row_count + rows)
def add_cols(self, cols):
"""Adds colums to worksheet.
:param cols: Columns number to add.
"""
self.resize(cols=self.col_count + cols)
def append_row(self, values):
"""Adds a row to the worksheet and populates it with values.
Widens the worksheet if there are more values than columns.
Note that a new Google Sheet has 100 or 1000 rows by default. You
may need to scroll down to find the new row.
:param values: List of values for the new row.
"""
self.add_rows(1)
new_row = self.row_count
data_width = len(values)
if self.col_count < data_width:
self.resize(cols=data_width)
cell_list = []
for i, value in enumerate(values, start=1):
cell = self.cell(new_row, i)
cell.value = value
cell_list.append(cell)
self.update_cells(cell_list)
def insert_row(self, values, index=1):
""""Adds a row to the worksheet at the specified index and populates it with values.
Widens the worksheet if there are more values than columns.
:param values: List of values for the new row.
"""
if index == self.row_count + 1:
return self.append_row(values)
elif index > self.row_count + 1:
raise IndexError('Row index out of range')
self.add_rows(1)
data_width = len(values)
if self.col_count < data_width:
self.resize(cols=data_width)
# Retrieve all Cells at or below `index` using a single batch query
top_left = self.get_addr_int(index, 1)
bottom_right = self.get_addr_int(self.row_count, self.col_count)
range_str = '%s:%s' % (top_left, bottom_right)
cells_after_insert = self.range(range_str)
for ind, cell in reversed(list(enumerate(cells_after_insert))):
if ind < self.col_count:
# For the first row, take the cell values from `values`
new_val = values[ind] if ind < len(values) else ''
else:
# For all other rows, take the cell values from the row above
new_val = cells_after_insert[ind - self.col_count].value
cell.value = new_val
self.update_cells(cells_after_insert)
def _finder(self, func, query):
cells = self._fetch_cells()
if isinstance(query, basestring):
match = lambda x: x.value == query
else:
match = lambda x: query.search(x.value)
return func(match, cells)
def find(self, query):
"""Finds first cell matching query.
:param query: A text string or compiled regular expression.
"""
try:
return self._finder(finditem, query)
except StopIteration:
raise CellNotFound(query)
def findall(self, query):
"""Finds all cells matching query.
:param query: A text string or compiled regular expression.
"""
return self._finder(filter, query)
def export(self, format='csv'):
"""Export the worksheet in specified format.
:param format: A format of the output.
"""
export_link = self._get_link(
'http://schemas.google.com/spreadsheets/2006#exportcsv',
self._element).get('href')
url, qs = export_link.split('?')
params = dict(param.split('=') for param in qs.split('&'))
params['format'] = format
params = urlencode(params)
export_link = '%s?%s' % (url, params)
return self.client.session.get(export_link)
class Cell(object):
"""An instance of this class represents a single cell
in a :class:`worksheet <Worksheet>`.
"""
def __init__(self, worksheet, element):
self.element = element
cell_elem = element.find(_ns1('cell'))
self._row = int(cell_elem.get('row'))
self._col = int(cell_elem.get('col'))
self.input_value = cell_elem.get('inputValue')
numeric_value = cell_elem.get('numericValue')
self.numeric_value = float(numeric_value) if numeric_value else None
#: Value of the cell.
self.value = cell_elem.text or ''
@property
def row(self):
"""Row number of the cell."""
return self._row
@property
def col(self):
"""Column number of the cell."""
return self._col
def __repr__(self):
return '<%s R%sC%s %s>' % (self.__class__.__name__,
self.row,
self.col,
repr(self.value))
class Row(object):
"""An instance of this class represents a single row
in a :class:`worksheet <Worksheet>`.
"""
def __init__(self, element):
self.element = element
self.id = self.element.find(_ns('id')).text
self.title = self.element.find(_ns('title')).text
self.value = self.element.find(_ns('content')).text
for link in self.element.findall(_ns('link')):
if link.get('rel') == 'self':
self.view_link = link.get('href')
elif link.get('rel') == 'edit':
self.edit_link = link.get('href')
def __getattr__(self, name):
return self.element.find(_ns2(name)).text
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__,
repr(self.title))
|
{
"content_hash": "fa342a716106a72014a5464b50ed00f1",
"timestamp": "",
"source": "github",
"line_count": 685,
"max_line_length": 92,
"avg_line_length": 30.88029197080292,
"alnum_prop": 0.5559022360894436,
"repo_name": "brakhane/gspread",
"id": "533a8e63b11ca2322b9ac0c729111fc0602fba62",
"size": "21178",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gspread/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "5968"
},
{
"name": "Python",
"bytes": "60644"
}
],
"symlink_target": ""
}
|
############################################################################
##
## Copyright (C) 2005-2005 Trolltech AS. All rights reserved.
##
## This file is part of the example classes of the Qt Toolkit.
##
## This file may be used under the terms of the GNU General Public
## License version 2.0 as published by the Free Software Foundation
## and appearing in the file LICENSE.GPL included in the packaging of
## this file. Please review the following information to ensure GNU
## General Public Licensing requirements will be met:
## http://www.trolltech.com/products/qt/opensource.html
##
## If you are unsure which license is appropriate for your use, please
## review the following information:
## http://www.trolltech.com/products/qt/licensing.html or contact the
## sales department at sales@trolltech.com.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
############################################################################
# This is only needed for Python v2 but is harmless for Python v3.
#import sip
#sip.setapi('QString', 2)
from PySide import QtCore, QtGui
import fridgemagnets_rc
class DragLabel(QtGui.QLabel):
def __init__(self, text, parent):
super(DragLabel, self).__init__(parent)
metric = QtGui.QFontMetrics(self.font())
size = metric.size(QtCore.Qt.TextSingleLine, text)
image = QtGui.QImage(size.width() + 12, size.height() + 12,
QtGui.QImage.Format_ARGB32_Premultiplied)
image.fill(QtGui.qRgba(0, 0, 0, 0))
font = QtGui.QFont()
font.setStyleStrategy(QtGui.QFont.ForceOutline)
painter = QtGui.QPainter()
painter.begin(image)
painter.setRenderHint(QtGui.QPainter.Antialiasing)
painter.setBrush(QtCore.Qt.white)
painter.drawRoundedRect(QtCore.QRectF(0.5, 0.5, image.width()-1,
image.height()-1), 25, 25, QtCore.Qt.RelativeSize)
painter.setFont(font)
painter.setBrush(QtCore.Qt.black)
painter.drawText(QtCore.QRect(QtCore.QPoint(6, 6), size),
QtCore.Qt.AlignCenter, text)
painter.end()
self.setPixmap(QtGui.QPixmap.fromImage(image))
self.labelText = text
def mousePressEvent(self, event):
itemData = QtCore.QByteArray()
dataStream = QtCore.QDataStream(itemData, QtCore.QIODevice.WriteOnly)
dataStream << QtCore.QByteArray(str(self.labelText)) << QtCore.QPoint(event.pos() - self.rect().topLeft())
mimeData = QtCore.QMimeData()
mimeData.setData('application/x-fridgemagnet', itemData)
mimeData.setText(self.labelText)
drag = QtGui.QDrag(self)
drag.setMimeData(mimeData)
drag.setHotSpot(event.pos() - self.rect().topLeft())
drag.setPixmap(self.pixmap())
self.hide()
if drag.exec_(QtCore.Qt.MoveAction | QtCore.Qt.CopyAction, QtCore.Qt.CopyAction) == QtCore.Qt.MoveAction:
self.close()
else:
self.show()
class DragWidget(QtGui.QWidget):
def __init__(self, parent=None):
super(DragWidget, self).__init__(parent)
dictionaryFile = QtCore.QFile(':/dictionary/words.txt')
dictionaryFile.open(QtCore.QFile.ReadOnly)
x = 5
y = 5
for word in QtCore.QTextStream(dictionaryFile).readAll().split():
wordLabel = DragLabel(word, self)
wordLabel.move(x, y)
wordLabel.show()
x += wordLabel.width() + 2
if x >= 245:
x = 5
y += wordLabel.height() + 2
newPalette = self.palette()
newPalette.setColor(QtGui.QPalette.Window, QtCore.Qt.white)
self.setPalette(newPalette)
self.setMinimumSize(400, max(200, y))
self.setWindowTitle("Fridge Magnets")
self.setAcceptDrops(True)
def dragEnterEvent(self, event):
if event.mimeData().hasFormat('application/x-fridgemagnet'):
if event.source() in self.children():
event.setDropAction(QtCore.Qt.MoveAction)
event.accept()
else:
event.acceptProposedAction()
elif event.mimeData().hasText():
event.acceptProposedAction()
else:
event.ignore()
dragMoveEvent = dragEnterEvent
def dropEvent(self, event):
if event.mimeData().hasFormat('application/x-fridgemagnet'):
mime = event.mimeData()
itemData = mime.data('application/x-fridgemagnet')
dataStream = QtCore.QDataStream(itemData, QtCore.QIODevice.ReadOnly)
text = QtCore.QByteArray()
offset = QtCore.QPoint()
dataStream >> text >> offset
try:
# Python v3.
text = str(text, encoding='latin1')
except TypeError:
# Python v2.
text = str(text)
newLabel = DragLabel(text, self)
newLabel.move(event.pos() - offset)
newLabel.show()
if event.source() in self.children():
event.setDropAction(QtCore.Qt.MoveAction)
event.accept()
else:
event.acceptProposedAction()
elif event.mimeData().hasText():
pieces = event.mimeData().text().split()
position = event.pos()
for piece in pieces:
newLabel = DragLabel(piece, self)
newLabel.move(position)
newLabel.show()
position += QtCore.QPoint(newLabel.width(), 0)
event.acceptProposedAction()
else:
event.ignore()
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
window = DragWidget()
window.show()
sys.exit(app.exec_())
|
{
"content_hash": "70070746a488c89487c77b5282f480fc",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 114,
"avg_line_length": 34.55113636363637,
"alnum_prop": 0.577372142739681,
"repo_name": "cherry-wb/SideTools",
"id": "c82afa9e98f6f0ab7f46cad8c0283d993164fdc2",
"size": "6104",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/draganddrop/fridgemagnets/fridgemagnets.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "9501"
},
{
"name": "Python",
"bytes": "4071976"
},
{
"name": "Shell",
"bytes": "182"
},
{
"name": "TypeScript",
"bytes": "25292"
}
],
"symlink_target": ""
}
|
"""
Print the NCBI taxonomy as a spreadsheet
"""
from taxon import get_taxonomy_db, get_taxonomy, all_species_ids
want = ['superkingdom', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species']
def printtaxa(i, c):
"""
Print out the taxonomy
:param i: identifier
:param c: database connection
:return:
"""
names = {w: "" for w in want}
t, n = get_taxonomy(i, c)
if t.rank in want:
names[t.rank] = n.get_name()
while t.parent != 1 and t.taxid != 1:
t, n = get_taxonomy(t.parent, c)
if t.rank in want:
names[t.rank] = n.get_name()
print("\t".join([str(i)] + [names[w] for w in want]))
if __name__ == '__main__':
c = get_taxonomy_db()
for i in all_species_ids(c):
printtaxa(i[0], c)
|
{
"content_hash": "3788f3a822750d502fd832106a0b84be",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 92,
"avg_line_length": 24.875,
"alnum_prop": 0.5590452261306532,
"repo_name": "linsalrob/EdwardsLab",
"id": "bb3cd0a91ddc13c238cbf86fd7472cc443801faf",
"size": "796",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ncbi/tax2spreadsheetdb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "227276"
},
{
"name": "C++",
"bytes": "21508"
},
{
"name": "Jupyter Notebook",
"bytes": "490830"
},
{
"name": "Makefile",
"bytes": "936"
},
{
"name": "Perl",
"bytes": "280086"
},
{
"name": "Python",
"bytes": "1102051"
},
{
"name": "Shell",
"bytes": "13759"
}
],
"symlink_target": ""
}
|
import rospy
from naoqi_sensors.naoqi_camera import NaoqiCam
if __name__ == "__main__":
naocam = NaoqiCam()
naocam.start()
rospy.spin()
|
{
"content_hash": "34e1610d5c132f162d4176b04e5872c0",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 47,
"avg_line_length": 20.428571428571427,
"alnum_prop": 0.6713286713286714,
"repo_name": "lsouchet/naoqi_bridge",
"id": "b174c8698e60a85225a80fede25eadaaed2b1265",
"size": "165",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "naoqi_sensors_py/nodes/camera.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "1792"
},
{
"name": "CMake",
"bytes": "11666"
},
{
"name": "Python",
"bytes": "612267"
}
],
"symlink_target": ""
}
|
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
DESCRIPTOR = _descriptor.FileDescriptor(
name='messages.proto',
package='',
serialized_pb='\n\x0emessages.proto\"\x18\n\x07Request\x12\r\n\x05value\x18\x01 \x03(\x05\"\x19\n\x08Response\x12\r\n\x05value\x18\x01 \x03(\x02')
_REQUEST = _descriptor.Descriptor(
name='Request',
full_name='Request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='Request.value', index=0,
number=1, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=18,
serialized_end=42,
)
_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='Response.value', index=0,
number=1, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=44,
serialized_end=69,
)
DESCRIPTOR.message_types_by_name['Request'] = _REQUEST
DESCRIPTOR.message_types_by_name['Response'] = _RESPONSE
class Request(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _REQUEST
# @@protoc_insertion_point(class_scope:Request)
class Response(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _RESPONSE
# @@protoc_insertion_point(class_scope:Response)
# @@protoc_insertion_point(module_scope)
|
{
"content_hash": "8e6c2ca9c454b8fde52c6db72811562a",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 148,
"avg_line_length": 25.179775280898877,
"alnum_prop": 0.7014725568942436,
"repo_name": "sashafrey/python_cpp_interop",
"id": "07a6a6b0ac476e8842ca8dd8dabed3c1ae221288",
"size": "2326",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "messages_pb2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "40518"
},
{
"name": "C++",
"bytes": "648773"
},
{
"name": "Python",
"bytes": "611"
},
{
"name": "Shell",
"bytes": "2001"
}
],
"symlink_target": ""
}
|
"""
pybench2.py: Test speed of one or more Pythons on a set of simple
code-string benchmarks. A function, to allow stmts to vary.
This system itself runs on both 2.X and 3.X, and may spawn both.
Version 2: adds setup statement support, with extra stmts list slot.
Uses timeit to test either the Python running this script by API
calls, or a set of Pythons by reading spawned command-line outputs
(os.popen) with Python's -m flag to find timeit on module search path.
Replaces $listif3 with a list() around generators for 3.X and an
empty string for 2.X, so 3.X does same work as 2.X. In command-line
mode only, must split multiline statements into one separate quoted
argument per line so all will be run (else might run/time first line
only), and replace all \t in indentation with 4 spaces for uniformity.
Caveats: command-line mode (only) may fail if test stmt embeds double
quotes, quoted stmt string is incompatible with shell in general, or
command-line exceeds a length limit on platform's shell--use API call
mode or homegrown timer in such cases.
"""
import sys, os, timeit
defnum, defrep= 1000, 5 # may vary per stmt
def runner(stmts, pythons=None, tracecmd=False):
"""
Main logic: run tests per input lists, caller handles usage modes.
stmts: [(number?, repeat?, setup-string, stmt-string)], replaces $listif3 in stmt
pythons: None=this python only, or [(ispy3?, python-executable-path)]
"""
print(sys.version)
for (number, repeat, setup, stmt) in stmts:
number = number or defnum
repeat = repeat or defrep # 0=default
if not pythons:
# run stmt on this python: API call
# no need to split lines or quote here
ispy3 = sys.version[0] == '3'
stmt = stmt.replace('$listif3', 'list' if ispy3 else '')
best = min(timeit.repeat(setup=setup, stmt=stmt, number=number, repeat=repeat))
print('%.4f [%r]' % (best, stmt[:70]))
else:
# run stmt on all pythons: command line
# split lines into quoted arguments
print('-' * 80)
print('[%r]' % stmt)
# setup handled like stmt, but no $listif3: not timed
setup = setup.replace('\t', ' ' * 4)
setup = ' '.join('-s "%s"' % line for line in setup.split('\n'))
for (ispy3, python) in pythons:
stmt1 = stmt.replace('$listif3', 'list' if ispy3 else '')
stmt1 = stmt1.replace('\t', ' ' * 4)
lines = stmt1.split('\n')
args = ' '.join('"%s"' % line for line in lines)
cmd = '%s -m timeit -n %s -r %s %s %s' % (python, number, repeat, setup, args)
print(python)
if tracecmd: print(cmd)
print('\t' + os.popen(cmd).read().rstrip())
|
{
"content_hash": "c984c7851fcc6e8f8907e6a63e5a8d56",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 94,
"avg_line_length": 44.453125,
"alnum_prop": 0.6200351493848858,
"repo_name": "dreadrel/UWF_2014_spring_COP3990C-2507",
"id": "01eb698620ab6fe822d1a619570fa7e9741e7ebe",
"size": "2845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notebooks/scripts/book_code/code/pybench2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1796"
},
{
"name": "Python",
"bytes": "493591"
}
],
"symlink_target": ""
}
|
"""Container for Google Cloud Bigtable Cells and Streaming Row Contents."""
import copy
import six
import grpc
from google.api_core import exceptions
from google.api_core import retry
from google.cloud._helpers import _datetime_from_microseconds
from google.cloud._helpers import _to_bytes
from google.cloud.bigtable_v2.proto import bigtable_pb2 as data_messages_v2_pb2
from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2
_MISSING_COLUMN_FAMILY = "Column family {} is not among the cells stored in this row."
_MISSING_COLUMN = (
"Column {} is not among the cells stored in this row in the " "column family {}."
)
_MISSING_INDEX = (
"Index {!r} is not valid for the cells stored in this row for column {} "
"in the column family {}. There are {} such cells."
)
class Cell(object):
"""Representation of a Google Cloud Bigtable Cell.
:type value: bytes
:param value: The value stored in the cell.
:type timestamp_micros: int
:param timestamp_micros: The timestamp_micros when the cell was stored.
:type labels: list
:param labels: (Optional) List of strings. Labels applied to the cell.
"""
def __init__(self, value, timestamp_micros, labels=None):
self.value = value
self.timestamp_micros = timestamp_micros
self.labels = list(labels) if labels is not None else []
@classmethod
def from_pb(cls, cell_pb):
"""Create a new cell from a Cell protobuf.
:type cell_pb: :class:`._generated.data_pb2.Cell`
:param cell_pb: The protobuf to convert.
:rtype: :class:`Cell`
:returns: The cell corresponding to the protobuf.
"""
if cell_pb.labels:
return cls(cell_pb.value, cell_pb.timestamp_micros, labels=cell_pb.labels)
else:
return cls(cell_pb.value, cell_pb.timestamp_micros)
@property
def timestamp(self):
return _datetime_from_microseconds(self.timestamp_micros)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return (
other.value == self.value
and other.timestamp_micros == self.timestamp_micros
and other.labels == self.labels
)
def __ne__(self, other):
return not self == other
def __repr__(self):
return "<{name} value={value!r} timestamp={timestamp}>".format(
name=self.__class__.__name__, value=self.value, timestamp=self.timestamp
)
class PartialCellData(object):
"""Representation of partial cell in a Google Cloud Bigtable Table.
These are expected to be updated directly from a
:class:`._generated.bigtable_service_messages_pb2.ReadRowsResponse`
:type row_key: bytes
:param row_key: The key for the row holding the (partial) cell.
:type family_name: str
:param family_name: The family name of the (partial) cell.
:type qualifier: bytes
:param qualifier: The column qualifier of the (partial) cell.
:type timestamp_micros: int
:param timestamp_micros: The timestamp (in microsecods) of the
(partial) cell.
:type labels: list of str
:param labels: labels assigned to the (partial) cell
:type value: bytes
:param value: The (accumulated) value of the (partial) cell.
"""
def __init__(
self, row_key, family_name, qualifier, timestamp_micros, labels=(), value=b""
):
self.row_key = row_key
self.family_name = family_name
self.qualifier = qualifier
self.timestamp_micros = timestamp_micros
self.labels = labels
self.value = value
def append_value(self, value):
"""Append bytes from a new chunk to value.
:type value: bytes
:param value: bytes to append
"""
self.value += value
class PartialRowData(object):
"""Representation of partial row in a Google Cloud Bigtable Table.
These are expected to be updated directly from a
:class:`._generated.bigtable_service_messages_pb2.ReadRowsResponse`
:type row_key: bytes
:param row_key: The key for the row holding the (partial) data.
"""
def __init__(self, row_key):
self._row_key = row_key
self._cells = {}
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return other._row_key == self._row_key and other._cells == self._cells
def __ne__(self, other):
return not self == other
def to_dict(self):
"""Convert the cells to a dictionary.
This is intended to be used with HappyBase, so the column family and
column qualiers are combined (with ``:``).
:rtype: dict
:returns: Dictionary containing all the data in the cells of this row.
"""
result = {}
for column_family_id, columns in six.iteritems(self._cells):
for column_qual, cells in six.iteritems(columns):
key = _to_bytes(column_family_id) + b":" + _to_bytes(column_qual)
result[key] = cells
return result
@property
def cells(self):
"""Property returning all the cells accumulated on this partial row.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_data_cells]
:end-before: [END bigtable_row_data_cells]
:rtype: dict
:returns: Dictionary of the :class:`Cell` objects accumulated. This
dictionary has two-levels of keys (first for column families
and second for column names/qualifiers within a family). For
a given column, a list of :class:`Cell` objects is stored.
"""
return self._cells
@property
def row_key(self):
"""Getter for the current (partial) row's key.
:rtype: bytes
:returns: The current (partial) row's key.
"""
return self._row_key
def find_cells(self, column_family_id, column):
"""Get a time series of cells stored on this instance.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_find_cells]
:end-before: [END bigtable_row_find_cells]
Args:
column_family_id (str): The ID of the column family. Must be of the
form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
column (bytes): The column within the column family where the cells
are located.
Returns:
List[~google.cloud.bigtable.row_data.Cell]: The cells stored in the
specified column.
Raises:
KeyError: If ``column_family_id`` is not among the cells stored
in this row.
KeyError: If ``column`` is not among the cells stored in this row
for the given ``column_family_id``.
"""
try:
column_family = self._cells[column_family_id]
except KeyError:
raise KeyError(_MISSING_COLUMN_FAMILY.format(column_family_id))
try:
cells = column_family[column]
except KeyError:
raise KeyError(_MISSING_COLUMN.format(column, column_family_id))
return cells
def cell_value(self, column_family_id, column, index=0):
"""Get a single cell value stored on this instance.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_cell_value]
:end-before: [END bigtable_row_cell_value]
Args:
column_family_id (str): The ID of the column family. Must be of the
form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
column (bytes): The column within the column family where the cell
is located.
index (Optional[int]): The offset within the series of values. If
not specified, will return the first cell.
Returns:
~google.cloud.bigtable.row_data.Cell value: The cell value stored
in the specified column and specified index.
Raises:
KeyError: If ``column_family_id`` is not among the cells stored
in this row.
KeyError: If ``column`` is not among the cells stored in this row
for the given ``column_family_id``.
IndexError: If ``index`` cannot be found within the cells stored
in this row for the given ``column_family_id``, ``column``
pair.
"""
cells = self.find_cells(column_family_id, column)
try:
cell = cells[index]
except (TypeError, IndexError):
num_cells = len(cells)
msg = _MISSING_INDEX.format(index, column, column_family_id, num_cells)
raise IndexError(msg)
return cell.value
def cell_values(self, column_family_id, column, max_count=None):
"""Get a time series of cells stored on this instance.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_cell_values]
:end-before: [END bigtable_row_cell_values]
Args:
column_family_id (str): The ID of the column family. Must be of the
form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
column (bytes): The column within the column family where the cells
are located.
max_count (int): The maximum number of cells to use.
Returns:
A generator which provides: cell.value, cell.timestamp_micros
for each cell in the list of cells
Raises:
KeyError: If ``column_family_id`` is not among the cells stored
in this row.
KeyError: If ``column`` is not among the cells stored in this row
for the given ``column_family_id``.
"""
cells = self.find_cells(column_family_id, column)
if max_count is None:
max_count = len(cells)
for index, cell in enumerate(cells):
if index == max_count:
break
yield cell.value, cell.timestamp_micros
class InvalidReadRowsResponse(RuntimeError):
"""Exception raised to to invalid response data from back-end."""
class InvalidChunk(RuntimeError):
"""Exception raised to to invalid chunk data from back-end."""
def _retry_read_rows_exception(exc):
if isinstance(exc, grpc.RpcError):
exc = exceptions.from_grpc_error(exc)
return isinstance(exc, (exceptions.ServiceUnavailable, exceptions.DeadlineExceeded))
DEFAULT_RETRY_READ_ROWS = retry.Retry(
predicate=_retry_read_rows_exception,
initial=1.0,
maximum=15.0,
multiplier=2.0,
deadline=60.0, # 60 seconds
)
"""The default retry strategy to be used on retry-able errors.
Used by
:meth:`~google.cloud.bigtable.row_data.PartialRowsData._read_next_response`.
"""
class PartialRowsData(object):
"""Convenience wrapper for consuming a ``ReadRows`` streaming response.
:type read_method: :class:`client._table_data_client.read_rows`
:param read_method: ``ReadRows`` method.
:type request: :class:`data_messages_v2_pb2.ReadRowsRequest`
:param request: The ``ReadRowsRequest`` message used to create a
ReadRowsResponse iterator. If the iterator fails, a new
iterator is created, allowing the scan to continue from
the point just beyond the last successfully read row,
identified by self.last_scanned_row_key. The retry happens
inside of the Retry class, using a predicate for the
expected exceptions during iteration.
:type retry: :class:`~google.api_core.retry.Retry`
:param retry: (Optional) Retry delay and deadline arguments. To override,
the default value :attr:`DEFAULT_RETRY_READ_ROWS` can be
used and modified with the
:meth:`~google.api_core.retry.Retry.with_delay` method
or the
:meth:`~google.api_core.retry.Retry.with_deadline` method.
"""
NEW_ROW = "New row" # No cells yet complete for row
ROW_IN_PROGRESS = "Row in progress" # Some cells complete for row
CELL_IN_PROGRESS = "Cell in progress" # Incomplete cell for row
STATE_NEW_ROW = 1
STATE_ROW_IN_PROGRESS = 2
STATE_CELL_IN_PROGRESS = 3
read_states = {
STATE_NEW_ROW: NEW_ROW,
STATE_ROW_IN_PROGRESS: ROW_IN_PROGRESS,
STATE_CELL_IN_PROGRESS: CELL_IN_PROGRESS,
}
def __init__(self, read_method, request, retry=DEFAULT_RETRY_READ_ROWS):
# Counter for rows returned to the user
self._counter = 0
# In-progress row, unset until first response, after commit/reset
self._row = None
# Last complete row, unset until first commit
self._previous_row = None
# In-progress cell, unset until first response, after completion
self._cell = None
# Last complete cell, unset until first completion, after new row
self._previous_cell = None
# May be cached from previous response
self.last_scanned_row_key = None
self.read_method = read_method
self.request = request
self.retry = retry
self.response_iterator = read_method(request)
self.rows = {}
self._state = self.STATE_NEW_ROW
# Flag to stop iteration, for any reason not related to self.retry()
self._cancelled = False
@property
def state(self):
"""State machine state.
:rtype: str
:returns: name of state corresponding to current row / chunk
processing.
"""
return self.read_states[self._state]
def cancel(self):
"""Cancels the iterator, closing the stream."""
self._cancelled = True
self.response_iterator.cancel()
def consume_all(self, max_loops=None):
"""Consume the streamed responses until there are no more.
.. warning::
This method will be removed in future releases. Please use this
class as a generator instead.
:type max_loops: int
:param max_loops: (Optional) Maximum number of times to try to consume
an additional ``ReadRowsResponse``. You can use this
to avoid long wait times.
"""
for row in self:
self.rows[row.row_key] = row
def _create_retry_request(self):
"""Helper for :meth:`__iter__`."""
req_manager = _ReadRowsRequestManager(
self.request, self.last_scanned_row_key, self._counter
)
return req_manager.build_updated_request()
def _on_error(self, exc):
"""Helper for :meth:`__iter__`."""
# restart the read scan from AFTER the last successfully read row
retry_request = self.request
if self.last_scanned_row_key:
retry_request = self._create_retry_request()
self.response_iterator = self.read_method(retry_request)
def _read_next(self):
"""Helper for :meth:`__iter__`."""
return six.next(self.response_iterator)
def _read_next_response(self):
"""Helper for :meth:`__iter__`."""
return self.retry(self._read_next, on_error=self._on_error)()
def __iter__(self):
"""Consume the ``ReadRowsResponse`` s from the stream.
Read the rows and yield each to the reader
Parse the response and its chunks into a new/existing row in
:attr:`_rows`. Rows are returned in order by row key.
"""
while not self._cancelled:
try:
response = self._read_next_response()
except StopIteration:
if self.state != self.NEW_ROW:
raise ValueError("The row remains partial / is not committed.")
break
for chunk in response.chunks:
if self._cancelled:
break
self._process_chunk(chunk)
if chunk.commit_row:
self.last_scanned_row_key = self._previous_row.row_key
self._counter += 1
yield self._previous_row
resp_last_key = response.last_scanned_row_key
if resp_last_key and resp_last_key > self.last_scanned_row_key:
self.last_scanned_row_key = resp_last_key
def _process_chunk(self, chunk):
if chunk.reset_row:
self._validate_chunk_reset_row(chunk)
self._row = None
self._cell = self._previous_cell = None
self._state = self.STATE_NEW_ROW
return
self._update_cell(chunk)
if self._row is None:
if (
self._previous_row is not None
and self._cell.row_key <= self._previous_row.row_key
):
raise InvalidChunk()
self._row = PartialRowData(self._cell.row_key)
if chunk.value_size == 0:
self._state = self.STATE_ROW_IN_PROGRESS
self._save_current_cell()
else:
self._state = self.STATE_CELL_IN_PROGRESS
if chunk.commit_row:
if chunk.value_size > 0:
raise InvalidChunk()
self._previous_row = self._row
self._row = None
self._previous_cell = None
self._state = self.STATE_NEW_ROW
def _update_cell(self, chunk):
if self._cell is None:
qualifier = None
if chunk.HasField("qualifier"):
qualifier = chunk.qualifier.value
family = None
if chunk.HasField("family_name"):
family = chunk.family_name.value
self._cell = PartialCellData(
chunk.row_key,
family,
qualifier,
chunk.timestamp_micros,
chunk.labels,
chunk.value,
)
self._copy_from_previous(self._cell)
self._validate_cell_data_new_cell()
else:
self._cell.append_value(chunk.value)
def _validate_cell_data_new_cell(self):
cell = self._cell
if not cell.row_key or not cell.family_name or cell.qualifier is None:
raise InvalidChunk()
prev = self._previous_cell
if prev and prev.row_key != cell.row_key:
raise InvalidChunk()
def _validate_chunk_reset_row(self, chunk):
# No reset for new row
_raise_if(self._state == self.STATE_NEW_ROW)
# No reset with other keys
_raise_if(chunk.row_key)
_raise_if(chunk.HasField("family_name"))
_raise_if(chunk.HasField("qualifier"))
_raise_if(chunk.timestamp_micros)
_raise_if(chunk.labels)
_raise_if(chunk.value_size)
_raise_if(chunk.value)
_raise_if(chunk.commit_row)
def _save_current_cell(self):
"""Helper for :meth:`consume_next`."""
row, cell = self._row, self._cell
family = row._cells.setdefault(cell.family_name, {})
qualified = family.setdefault(cell.qualifier, [])
complete = Cell.from_pb(cell)
qualified.append(complete)
self._cell, self._previous_cell = None, cell
def _copy_from_previous(self, cell):
"""Helper for :meth:`consume_next`."""
previous = self._previous_cell
if previous is not None:
if not cell.row_key:
cell.row_key = previous.row_key
if not cell.family_name:
cell.family_name = previous.family_name
# NOTE: ``cell.qualifier`` **can** be empty string.
if cell.qualifier is None:
cell.qualifier = previous.qualifier
class _ReadRowsRequestManager(object):
""" Update the ReadRowsRequest message in case of failures by
filtering the already read keys.
:type message: class:`data_messages_v2_pb2.ReadRowsRequest`
:param message: Original ReadRowsRequest containing all of the parameters
of API call
:type last_scanned_key: bytes
:param last_scanned_key: last successfully scanned key
:type rows_read_so_far: int
:param rows_read_so_far: total no of rows successfully read so far.
this will be used for updating rows_limit
"""
def __init__(self, message, last_scanned_key, rows_read_so_far):
self.message = message
self.last_scanned_key = last_scanned_key
self.rows_read_so_far = rows_read_so_far
def build_updated_request(self):
""" Updates the given message request as per last scanned key
"""
r_kwargs = {
"table_name": self.message.table_name,
"filter": self.message.filter,
}
if self.message.rows_limit != 0:
r_kwargs["rows_limit"] = max(
1, self.message.rows_limit - self.rows_read_so_far
)
# if neither RowSet.row_keys nor RowSet.row_ranges currently exist,
# add row_range that starts with last_scanned_key as start_key_open
# to request only rows that have not been returned yet
if not self.message.HasField("rows"):
row_range = data_v2_pb2.RowRange(start_key_open=self.last_scanned_key)
r_kwargs["rows"] = data_v2_pb2.RowSet(row_ranges=[row_range])
else:
row_keys = self._filter_rows_keys()
row_ranges = self._filter_row_ranges()
r_kwargs["rows"] = data_v2_pb2.RowSet(
row_keys=row_keys, row_ranges=row_ranges
)
return data_messages_v2_pb2.ReadRowsRequest(**r_kwargs)
def _filter_rows_keys(self):
""" Helper for :meth:`build_updated_request`"""
return [
row_key
for row_key in self.message.rows.row_keys
if row_key > self.last_scanned_key
]
def _filter_row_ranges(self):
""" Helper for :meth:`build_updated_request`"""
new_row_ranges = []
for row_range in self.message.rows.row_ranges:
# if current end_key (open or closed) is set, return its value,
# if not, set to empty string ('').
# NOTE: Empty string in end_key means "end of table"
end_key = self._end_key_set(row_range)
# if end_key is already read, skip to the next row_range
if end_key and self._key_already_read(end_key):
continue
# if current start_key (open or closed) is set, return its value,
# if not, then set to empty string ('')
# NOTE: Empty string in start_key means "beginning of table"
start_key = self._start_key_set(row_range)
# if start_key was already read or doesn't exist,
# create a row_range with last_scanned_key as start_key_open
# to be passed to retry request
retry_row_range = row_range
if self._key_already_read(start_key):
retry_row_range = copy.deepcopy(row_range)
retry_row_range.start_key_closed = _to_bytes("")
retry_row_range.start_key_open = self.last_scanned_key
new_row_ranges.append(retry_row_range)
return new_row_ranges
def _key_already_read(self, key):
""" Helper for :meth:`_filter_row_ranges`"""
return key <= self.last_scanned_key
@staticmethod
def _start_key_set(row_range):
""" Helper for :meth:`_filter_row_ranges`"""
return row_range.start_key_open or row_range.start_key_closed
@staticmethod
def _end_key_set(row_range):
""" Helper for :meth:`_filter_row_ranges`"""
return row_range.end_key_open or row_range.end_key_closed
def _raise_if(predicate, *args):
"""Helper for validation methods."""
if predicate:
raise InvalidChunk(*args)
|
{
"content_hash": "dbf3bf8f4e849b039f9996eb42fd9bab",
"timestamp": "",
"source": "github",
"line_count": 685,
"max_line_length": 88,
"avg_line_length": 35.25985401459854,
"alnum_prop": 0.5914379166149133,
"repo_name": "tswast/google-cloud-python",
"id": "24078b8496d891f79a605d04f74cbfad139ecfd6",
"size": "24728",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bigtable/google/cloud/bigtable/row_data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1094"
},
{
"name": "Python",
"bytes": "33785371"
},
{
"name": "Shell",
"bytes": "9148"
}
],
"symlink_target": ""
}
|
"""Builds the eye network.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use inputs() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import tensorflow as tf
import eye_input
FLAGS = tf.app.flags.FLAGS
# Basic model parameters.
tf.app.flags.DEFINE_integer('batch_size', 64,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_string('data_dir', '/home/hp/Documents/DeepLearning/MyProjects/Data/eye/new96',
"""Path to the eye data directory.""")
tf.app.flags.DEFINE_boolean('use_fp16', False,
"""Train the model using fp16.""")
# Global constants describing the eye data set.
IMAGE_SIZE = eye_input.IMAGE_SIZE
NUM_CLASSES = eye_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = eye_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = eye_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 2000.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
def _activation_summary(x):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)
tf.histogram_summary(tensor_name + '/activations', x)
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputs():
"""Construct distorted input for eye training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
images, labels = eye_input.distorted_inputs(FLAGS.data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inputs(eval_data):
"""Construct input for eye evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
images, labels = eye_input.inputs(eval_data=eval_data,
data_dir=FLAGS.data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inference(images):
"""Build the eye model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# conv1
with tf.variable_scope('conv1') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 3, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
bias = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv1)
# pool1
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
# norm1
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
# conv2
with tf.variable_scope('conv2') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 64, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
bias = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv2)
# norm2
norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm2')
# pool2
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
# More conv and pooling
# conv3
with tf.variable_scope('conv3') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 64, 64],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0))
bias = tf.nn.bias_add(conv, biases)
conv3 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv3)
# pool1
pool3 = tf.nn.max_pool(conv3, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool3')
# norm3
norm3 = tf.nn.lrn(pool3, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm3')
# conv4
with tf.variable_scope('conv4') as scope:
kernel = _variable_with_weight_decay('weights',
shape=[5, 5, 64, 128],
stddev=5e-2,
wd=0.0)
conv = tf.nn.conv2d(norm3, kernel, [1, 1, 1, 1], padding='SAME')
biases = _variable_on_cpu('biases', [128], tf.constant_initializer(0.1))
bias = tf.nn.bias_add(conv, biases)
conv4 = tf.nn.relu(bias, name=scope.name)
_activation_summary(conv4)
# norm4
norm4 = tf.nn.lrn(conv4, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm4')
# pool4
pool4 = tf.nn.max_pool(norm4, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool4')
###
# local5
with tf.variable_scope('local5') as scope:
# Move everything into depth so we can perform a single matrix multiply.
reshape = tf.reshape(pool4, [FLAGS.batch_size, -1])
dim = reshape.get_shape()[1].value
weights = _variable_with_weight_decay('weights', shape=[dim, 384],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))
local5 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
_activation_summary(local5)
# local6
with tf.variable_scope('local6') as scope:
weights = _variable_with_weight_decay('weights', shape=[384, 192],
stddev=0.04, wd=0.004)
biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1))
local6 = tf.nn.relu(tf.matmul(local5, weights) + biases, name=scope.name)
_activation_summary(local6)
# softmax, i.e. softmax(WX + b)
with tf.variable_scope('softmax_linear') as scope:
weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES],
stddev=1 / 192.0, wd=0.0)
biases = _variable_on_cpu('biases', [NUM_CLASSES],
tf.constant_initializer(0.0))
softmax_linear = tf.add(tf.matmul(local6, weights), biases, name=scope.name)
_activation_summary(softmax_linear)
return softmax_linear
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits, labels, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def _add_loss_summaries(total_loss):
"""Add summaries for losses in eye model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(l.op.name + ' (raw)', l)
tf.scalar_summary(l.op.name, loss_averages.average(l))
return loss_averages_op
def train(total_loss, global_step):
"""Train eye model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.scalar_summary('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_averages_op = _add_loss_summaries(total_loss)
# Compute gradients.
with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.histogram_summary(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.histogram_summary(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
|
{
"content_hash": "3b04fbb572e6604d141e4c0c82bfd19f",
"timestamp": "",
"source": "github",
"line_count": 389,
"max_line_length": 99,
"avg_line_length": 37.97943444730077,
"alnum_prop": 0.6082983619872749,
"repo_name": "callofdutyops/YXH2016724098982",
"id": "bc024592c453029dee9f2241e1aa2ec2b68f61c4",
"size": "14774",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eye_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "65344"
},
{
"name": "Shell",
"bytes": "100"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import django_mc.mixins
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.MC_LAYOUT_MODEL),
]
operations = [
migrations.CreateModel(
name='ComponentBase',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('_poly_ct', models.ForeignKey(related_name='+', editable=False, to='contenttypes.ContentType')),
],
options={
'abstract': False,
'verbose_name': 'Component Base',
'swappable': 'MC_COMPONENT_BASE_MODEL',
'verbose_name_plural': 'Component Bases',
},
),
migrations.CreateModel(
name='Layout',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50)),
('slug', models.SlugField(unique=True)),
('parent', models.ForeignKey(blank=True, to='self', help_text='Select a layout which shall be extended by this layout according to region extend rules.', null=True)),
],
options={
'abstract': False,
'verbose_name': 'Layout',
'swappable': 'MC_LAYOUT_MODEL',
'verbose_name_plural': 'Layouts',
},
bases=(django_mc.mixins.TemplateHintProvider, models.Model),
),
migrations.CreateModel(
name='Region',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50)),
('slug', models.SlugField(unique=True)),
('component_extend_rule', models.CharField(help_text='Define how page components that is added to this region change the layout components.', max_length=16, choices=[(b'combine', 'Add to existing layout components'), (b'overwrite', 'Replace existing layout components')])),
('available_components', models.ManyToManyField(to='contenttypes.ContentType')),
],
options={
'verbose_name': 'Region',
'verbose_name_plural': 'Regions',
},
bases=(django_mc.mixins.TemplateHintProvider, models.Model),
),
]
|
{
"content_hash": "38f06a9a7edf6f2340932c0ada5ee59e",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 289,
"avg_line_length": 44.483333333333334,
"alnum_prop": 0.5680029973772949,
"repo_name": "team23/django_mc",
"id": "ef39ab9a34593e1f94d44fba7fe89e46f63b7a96",
"size": "2693",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_mc/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "54954"
}
],
"symlink_target": ""
}
|
"""
celery.canvas
~~~~~~~~~~~~~
Composing task workflows.
Documentation for some of these types are in :mod:`celery`.
You should import these from :mod:`celery` and not this module.
"""
from __future__ import absolute_import
from copy import deepcopy
from functools import partial as _partial, reduce
from operator import itemgetter
from itertools import chain as _chain
from kombu.utils import cached_property, fxrange, kwdict, reprcall, uuid
from celery._state import current_app
from celery.utils.functional import (
maybe_list, is_list, regen,
chunks as _chunks,
)
from celery.utils.text import truncate
__all__ = ['Signature', 'chain', 'xmap', 'xstarmap', 'chunks',
'group', 'chord', 'signature', 'maybe_signature']
class _getitem_property(object):
"""Attribute -> dict key descriptor.
The target object must support ``__getitem__``,
and optionally ``__setitem__``.
Example:
class Me(dict):
deep = defaultdict(dict)
foo = _getitem_property('foo')
deep_thing = _getitem_property('deep.thing')
>>> me = Me()
>>> me.foo
None
>>> me.foo = 10
>>> me.foo
10
>>> me['foo']
10
>>> me.deep_thing = 42
>>> me.deep_thing
42
>>> me.deep:
defaultdict(<type 'dict'>, {'thing': 42})
"""
def __init__(self, keypath):
path, _, self.key = keypath.rpartition('.')
self.path = path.split('.') if path else None
def _path(self, obj):
return (reduce(lambda d, k: d[k], [obj] + self.path) if self.path
else obj)
def __get__(self, obj, type=None):
if obj is None:
return type
return self._path(obj).get(self.key)
def __set__(self, obj, value):
self._path(obj)[self.key] = value
class Signature(dict):
"""Class that wraps the arguments and execution options
for a single task invocation.
Used as the parts in a :class:`group` and other constructs,
or to pass tasks around as callbacks while being compatible
with serializers with a strict type subset.
:param task: Either a task class/instance, or the name of a task.
:keyword args: Positional arguments to apply.
:keyword kwargs: Keyword arguments to apply.
:keyword options: Additional options to :meth:`Task.apply_async`.
Note that if the first argument is a :class:`dict`, the other
arguments will be ignored and the values in the dict will be used
instead.
>>> s = signature('tasks.add', args=(2, 2))
>>> signature(s)
{'task': 'tasks.add', args=(2, 2), kwargs={}, options={}}
"""
TYPES = {}
_app = _type = None
@classmethod
def register_type(cls, subclass, name=None):
cls.TYPES[name or subclass.__name__] = subclass
return subclass
@classmethod
def from_dict(self, d, app=None):
typ = d.get('subtask_type')
if typ:
return self.TYPES[typ].from_dict(kwdict(d), app=app)
return Signature(d, app=app)
def __init__(self, task=None, args=None, kwargs=None, options=None,
type=None, subtask_type=None, immutable=False,
app=None, **ex):
self._app = app
init = dict.__init__
if isinstance(task, dict):
return init(self, task) # works like dict(d)
# Also supports using task class/instance instead of string name.
try:
task_name = task.name
except AttributeError:
task_name = task
else:
self._type = task
init(self,
task=task_name, args=tuple(args or ()),
kwargs=kwargs or {},
options=dict(options or {}, **ex),
subtask_type=subtask_type,
immutable=immutable)
def __call__(self, *partial_args, **partial_kwargs):
args, kwargs, _ = self._merge(partial_args, partial_kwargs, None)
return self.type(*args, **kwargs)
def delay(self, *partial_args, **partial_kwargs):
return self.apply_async(partial_args, partial_kwargs)
def apply(self, args=(), kwargs={}, **options):
"""Apply this task locally."""
# For callbacks: extra args are prepended to the stored args.
args, kwargs, options = self._merge(args, kwargs, options)
return self.type.apply(args, kwargs, **options)
def _merge(self, args=(), kwargs={}, options={}):
if self.immutable:
return (self.args, self.kwargs,
dict(self.options, **options) if options else self.options)
return (tuple(args) + tuple(self.args) if args else self.args,
dict(self.kwargs, **kwargs) if kwargs else self.kwargs,
dict(self.options, **options) if options else self.options)
def clone(self, args=(), kwargs={}, **opts):
# need to deepcopy options so origins links etc. is not modified.
if args or kwargs or opts:
args, kwargs, opts = self._merge(args, kwargs, opts)
else:
args, kwargs, opts = self.args, self.kwargs, self.options
s = Signature.from_dict({'task': self.task, 'args': tuple(args),
'kwargs': kwargs, 'options': deepcopy(opts),
'subtask_type': self.subtask_type,
'immutable': self.immutable}, app=self._app)
s._type = self._type
return s
partial = clone
def freeze(self, _id=None):
opts = self.options
try:
tid = opts['task_id']
except KeyError:
tid = opts['task_id'] = _id or uuid()
if 'reply_to' not in opts:
opts['reply_to'] = self.app.oid
return self.AsyncResult(tid)
_freeze = freeze
def replace(self, args=None, kwargs=None, options=None):
s = self.clone()
if args is not None:
s.args = args
if kwargs is not None:
s.kwargs = kwargs
if options is not None:
s.options = options
return s
def set(self, immutable=None, **options):
if immutable is not None:
self.set_immutable(immutable)
self.options.update(options)
return self
def set_immutable(self, immutable):
self.immutable = immutable
def apply_async(self, args=(), kwargs={}, **options):
# For callbacks: extra args are prepended to the stored args.
if args or kwargs or options:
args, kwargs, options = self._merge(args, kwargs, options)
else:
args, kwargs, options = self.args, self.kwargs, self.options
return self._apply_async(args, kwargs, **options)
def append_to_list_option(self, key, value):
items = self.options.setdefault(key, [])
if value not in items:
items.append(value)
return value
def link(self, callback):
return self.append_to_list_option('link', callback)
def link_error(self, errback):
return self.append_to_list_option('link_error', errback)
def flatten_links(self):
return list(_chain.from_iterable(_chain(
[[self]],
(link.flatten_links()
for link in maybe_list(self.options.get('link')) or [])
)))
def __or__(self, other):
if not isinstance(self, chain) and isinstance(other, chain):
return chain((self, ) + other.tasks, app=self._app)
elif isinstance(other, chain):
return chain(*self.tasks + other.tasks, app=self._app)
elif isinstance(other, Signature):
if isinstance(self, chain):
return chain(*self.tasks + (other, ), app=self._app)
return chain(self, other, app=self._app)
return NotImplemented
def __deepcopy__(self, memo):
memo[id(self)] = self
return dict(self)
def __invert__(self):
return self.apply_async().get()
def __reduce__(self):
# for serialization, the task type is lazily loaded,
# and not stored in the dict itself.
return subtask, (dict(self), )
def reprcall(self, *args, **kwargs):
args, kwargs, _ = self._merge(args, kwargs, {})
return reprcall(self['task'], args, kwargs)
def election(self):
type = self.type
app = type.app
tid = self.options.get('task_id') or uuid()
with app.producer_or_acquire(None) as P:
props = type.backend.on_task_call(P, tid)
app.control.election(tid, 'task', self.clone(task_id=tid, **props),
connection=P.connection)
return type.AsyncResult(tid)
def __repr__(self):
return self.reprcall()
@cached_property
def type(self):
return self._type or self.app.tasks[self['task']]
@cached_property
def app(self):
return self._app or current_app
@cached_property
def AsyncResult(self):
try:
return self.type.AsyncResult
except KeyError: # task not registered
return self.app.AsyncResult
@cached_property
def _apply_async(self):
try:
return self.type.apply_async
except KeyError:
return _partial(self.app.send_task, self['task'])
id = _getitem_property('options.task_id')
task = _getitem_property('task')
args = _getitem_property('args')
kwargs = _getitem_property('kwargs')
options = _getitem_property('options')
subtask_type = _getitem_property('subtask_type')
immutable = _getitem_property('immutable')
@Signature.register_type
class chain(Signature):
def __init__(self, *tasks, **options):
tasks = (regen(tasks[0]) if len(tasks) == 1 and is_list(tasks[0])
else tasks)
Signature.__init__(
self, 'celery.chain', (), {'tasks': tasks}, **options
)
self.tasks = tasks
self.subtask_type = 'chain'
def __call__(self, *args, **kwargs):
if self.tasks:
return self.apply_async(args, kwargs)
@classmethod
def from_dict(self, d, app=None):
tasks = d['kwargs']['tasks']
if d['args'] and tasks:
# partial args passed on to first task in chain (Issue #1057).
tasks[0]['args'] = tasks[0]._merge(d['args'])[0]
return chain(*d['kwargs']['tasks'], app=app, **kwdict(d['options']))
@property
def type(self):
try:
return self._type or self.tasks[0].type.app.tasks['celery.chain']
except KeyError:
return self.app.tasks['celery.chain']
def __repr__(self):
return ' | '.join(repr(t) for t in self.tasks)
class _basemap(Signature):
_task_name = None
_unpack_args = itemgetter('task', 'it')
def __init__(self, task, it, **options):
Signature.__init__(
self, self._task_name, (),
{'task': task, 'it': regen(it)}, immutable=True, **options
)
def apply_async(self, args=(), kwargs={}, **opts):
# need to evaluate generators
task, it = self._unpack_args(self.kwargs)
return self.type.apply_async(
(), {'task': task, 'it': list(it)}, **opts
)
@classmethod
def from_dict(cls, d, app=None):
return cls(*cls._unpack_args(d['kwargs']), app=app, **d['options'])
@Signature.register_type
class xmap(_basemap):
_task_name = 'celery.map'
def __repr__(self):
task, it = self._unpack_args(self.kwargs)
return '[{0}(x) for x in {1}]'.format(task.task,
truncate(repr(it), 100))
@Signature.register_type
class xstarmap(_basemap):
_task_name = 'celery.starmap'
def __repr__(self):
task, it = self._unpack_args(self.kwargs)
return '[{0}(*x) for x in {1}]'.format(task.task,
truncate(repr(it), 100))
@Signature.register_type
class chunks(Signature):
_unpack_args = itemgetter('task', 'it', 'n')
def __init__(self, task, it, n, **options):
Signature.__init__(
self, 'celery.chunks', (),
{'task': task, 'it': regen(it), 'n': n},
immutable=True, **options
)
@classmethod
def from_dict(self, d, app=None):
return chunks(*self._unpack_args(d['kwargs']), app=app, **d['options'])
def apply_async(self, args=(), kwargs={}, **opts):
return self.group().apply_async(args, kwargs, **opts)
def __call__(self, **options):
return self.group()(**options)
def group(self):
# need to evaluate generators
task, it, n = self._unpack_args(self.kwargs)
return group((xstarmap(task, part, app=self._app)
for part in _chunks(iter(it), n)),
app=self._app)
@classmethod
def apply_chunks(cls, task, it, n, app=None):
return cls(task, it, n, app=app)()
def _maybe_group(tasks):
if isinstance(tasks, group):
tasks = list(tasks.tasks)
elif isinstance(tasks, Signature):
tasks = [tasks]
else:
tasks = regen(tasks)
return tasks
def _maybe_clone(tasks, app):
return [s.clone() if isinstance(s, Signature) else signature(s, app=app)
for s in tasks]
@Signature.register_type
class group(Signature):
def __init__(self, *tasks, **options):
if len(tasks) == 1:
tasks = _maybe_group(tasks[0])
Signature.__init__(
self, 'celery.group', (), {'tasks': tasks}, **options
)
self.tasks, self.subtask_type = tasks, 'group'
@classmethod
def from_dict(self, d, app=None):
tasks = d['kwargs']['tasks']
if d['args'] and tasks:
# partial args passed on to all tasks in the group (Issue #1057).
for task in tasks:
task['args'] = task._merge(d['args'])[0]
return group(tasks, app=app, **kwdict(d['options']))
def apply_async(self, args=(), kwargs=None, **options):
tasks = _maybe_clone(self.tasks, app=self._app)
if not tasks:
return self.freeze()
# taking the app from the first task in the list,
# there may be a better solution to this, e.g.
# consolidate tasks with the same app and apply them in
# batches.
type = tasks[0].type.app.tasks[self['task']]
return type(*type.prepare(dict(self.options, **options),
tasks, args))
def set_immutable(self, immutable):
for task in self.tasks:
task.set_immutable(immutable)
def link(self, sig):
# Simply link to first task
sig = sig.clone().set(immutable=True)
return self.tasks[0].link(sig)
def link_error(self, sig):
sig = sig.clone().set(immutable=True)
return self.tasks[0].link_error(sig)
def apply(self, *args, **kwargs):
if not self.tasks:
return self.freeze() # empty group returns GroupResult
return Signature.apply(self, *args, **kwargs)
def __call__(self, *partial_args, **options):
return self.apply_async(partial_args, **options)
def freeze(self, _id=None):
opts = self.options
try:
gid = opts['task_id']
except KeyError:
gid = opts['task_id'] = uuid()
new_tasks, results = [], []
for task in self.tasks:
task = maybe_signature(task, app=self._app).clone()
results.append(task._freeze())
new_tasks.append(task)
self.tasks = self.kwargs['tasks'] = new_tasks
return self.app.GroupResult(gid, results)
_freeze = freeze
def skew(self, start=1.0, stop=None, step=1.0):
it = fxrange(start, stop, step, repeatlast=True)
for task in self.tasks:
task.set(countdown=next(it))
return self
def __iter__(self):
return iter(self.tasks)
def __repr__(self):
return repr(self.tasks)
@property
def type(self):
return self._type or self.tasks[0].type.app.tasks[self['task']]
@Signature.register_type
class chord(Signature):
def __init__(self, header, body=None, task='celery.chord',
args=(), kwargs={}, **options):
Signature.__init__(
self, task, args,
dict(kwargs, header=_maybe_group(header),
body=maybe_signature(body, app=self._app)), **options
)
self.subtask_type = 'chord'
@classmethod
def from_dict(self, d, app=None):
args, d['kwargs'] = self._unpack_args(**kwdict(d['kwargs']))
return self(*args, app=app, **kwdict(d))
@staticmethod
def _unpack_args(header=None, body=None, **kwargs):
# Python signatures are better at extracting keys from dicts
# than manually popping things off.
return (header, body), kwargs
@property
def type(self):
return self._type or self.tasks[0].type.app.tasks['celery.chord']
def apply_async(self, args=(), kwargs={}, task_id=None, **options):
body = kwargs.get('body') or self.kwargs['body']
kwargs = dict(self.kwargs, **kwargs)
body = body.clone(**options)
_chord = self._type or body.type.app.tasks['celery.chord']
if _chord.app.conf.CELERY_ALWAYS_EAGER:
return self.apply((), kwargs, task_id=task_id, **options)
res = body.freeze(task_id)
parent = _chord(self.tasks, body, args, **options)
res.parent = parent
return res
def __call__(self, body=None, **options):
return self.apply_async((), {'body': body} if body else {}, **options)
def clone(self, *args, **kwargs):
s = Signature.clone(self, *args, **kwargs)
# need to make copy of body
try:
s.kwargs['body'] = s.kwargs['body'].clone()
except (AttributeError, KeyError):
pass
return s
def link(self, callback):
self.body.link(callback)
return callback
def link_error(self, errback):
self.body.link_error(errback)
return errback
def set_immutable(self, immutable):
# changes mutability of header only, not callback.
for task in self.tasks:
task.set_immutable(immutable)
def __repr__(self):
if self.body:
return self.body.reprcall(self.tasks)
return '<chord without body: {0.tasks!r}>'.format(self)
tasks = _getitem_property('kwargs.header')
body = _getitem_property('kwargs.body')
def signature(varies, *args, **kwargs):
if not (args or kwargs) and isinstance(varies, dict):
if isinstance(varies, Signature):
return varies.clone()
return Signature.from_dict(varies)
return Signature(varies, *args, **kwargs)
subtask = signature # XXX compat
def maybe_signature(d, app=None):
if d is not None and isinstance(d, dict):
if not isinstance(d, Signature):
return signature(d, app=app)
if app is not None:
d._app = app
return d
maybe_subtask = maybe_signature # XXX compat
|
{
"content_hash": "47b63fb95c5561daf5602844ebfb835b",
"timestamp": "",
"source": "github",
"line_count": 607,
"max_line_length": 79,
"avg_line_length": 31.769357495881383,
"alnum_prop": 0.5698506533914126,
"repo_name": "sivaprakashniet/push_pull",
"id": "ef18dd976790751a82cf2c450f4277ea38a564c8",
"size": "19308",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "p2p/lib/python2.7/site-packages/celery/canvas.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "33347"
},
{
"name": "CSS",
"bytes": "111284"
},
{
"name": "CoffeeScript",
"bytes": "21"
},
{
"name": "HTML",
"bytes": "933220"
},
{
"name": "JavaScript",
"bytes": "260224"
},
{
"name": "Nginx",
"bytes": "4758"
},
{
"name": "Python",
"bytes": "9725308"
},
{
"name": "Roff",
"bytes": "17679"
},
{
"name": "Shell",
"bytes": "6008"
}
],
"symlink_target": ""
}
|
import logging
from experiment_setup import ExperimentSetup
from model_learn import ModelLearn
from plot_results import PlotOutput
from multiprocessing.pool import Pool
class ExperimentRunner:
def __init__(self):
pass
@staticmethod
def get_configs():
"""
Builds an array of configuration for running (in parallel)
"""
configs = []
expr_names = ExperimentRunner.get_experiments()
methods = ['full', 'mix1']
sparse_factor = [1.0, 0.5, 0.2, 0.1]
run_ids = [1]
for e in expr_names:
for m in methods:
for s in sparse_factor:
for run_id in run_ids:
configs.append({'method': m,
'sparse_factor': s,
'method_to_run': e,
'run_id': run_id,
'log_level': ExperimentRunner.get_log_level()
})
return configs
@staticmethod
def get_experiments():
"""
Builds an array of experiments to run in parallel (the array can contain more than one experiment)
"""
# uncomment to run desired experiment
return [ExperimentSetup.boston_data.__name__]
# return [ExperimentSetup.wisconsin_breast_cancer_data.__name__]
# return [ExperimentSetup.USPS_data.__name__]
# return [ExperimentSetup.creep_data.__name__]
# return [ExperimentSetup.abalone_data.__name__]
# return [ExperimentSetup.mining_data.__name__]
#
@staticmethod
def run_parallel(n_process):
"""
Creates a process for each element in the array returned by ``get_configs()`` and the experiment corresponding
the each element. The maximum number of processes to run in parallel is determined by ``n_process``
"""
p = Pool(n_process)
p.map(run_config, ExperimentRunner.get_configs())
@staticmethod
def get_log_level():
""" debug level """
return logging.DEBUG
# return logging.INFO
@staticmethod
def boston_experiment():
ExperimentSetup.boston_data({'method': 'mix2', 'sparse_factor': 0.8, 'run_id': 3, 'log_level': logging.DEBUG})
@staticmethod
def wisconsin_breast_experiment():
ExperimentSetup.wisconsin_breast_cancer_data(
{'method': 'full', 'sparse_factor': 1.0, 'run_id': 1, 'log_level': logging.DEBUG})
@staticmethod
def abalone_experiment():
ExperimentSetup.abalone_data({'method': 'full', 'sparse_factor': 1.0, 'run_id': 1, 'log_level': logging.DEBUG})
@staticmethod
def creep_experiment():
ExperimentSetup.creep_data({'method': 'full', 'sparse_factor': 1.0, 'run_id': 1, 'log_level': logging.DEBUG})
@staticmethod
def USPS_experiment():
ExperimentSetup.USPS_data({'method': 'full', 'sparse_factor': 0.1, 'run_id': 1, 'log_level': logging.DEBUG})
@staticmethod
def mining_experiment():
ExperimentSetup.mining_data({'method': 'mix1', 'sparse_factor': 1.0, 'run_id': 1, 'log_level': logging.DEBUG})
@staticmethod
def sarcos_experiment():
ExperimentSetup.sarcos_data({'method': 'full',
'sparse_factor': 0.04,
'run_id': 0,
'log_level': logging.DEBUG,
'n_thread': 15,
'partition_size': 2000,
# 'image': '../results/all/'
})
@staticmethod
def sarcos_all_joins_experiment():
ExperimentSetup.sarcos_all_joints_data({'method': 'full',
'sparse_factor': 0.04,
'run_id': 0,
'log_level': logging.DEBUG,
'n_thread': 15,
'partition_size': 3000,
#'image': '../results/sarcos_1/'
})
@staticmethod
def mnist_experiment():
ExperimentSetup.MNIST_data({'method': 'full',
'sparse_factor': 0.004,
'run_id': 1,
'log_level': logging.DEBUG,
'n_thread': 20,
'partition_size': 2000,
# 'image': '../results/mnist_1/'
})
@staticmethod
def mnist_binary_experiment():
ExperimentSetup.MNIST_binary_data({'method': 'full',
'sparse_factor': 200. / 60000,
'run_id': 1,
'log_level': logging.DEBUG,
'n_thread': 20,
'partition_size': 2000,
# 'image': '../results/mnist_1/'
})
@staticmethod
def mnist_binary_inducing_experiment():
ExperimentSetup.MNIST_binary_inducing_data({'method': 'full',
'sparse_factor': 200. / 60000,
'run_id': 1,
'log_level': logging.DEBUG,
'n_thread': 8,
'partition_size': 1000,
# 'image': '../results/mnist_1/'
})
@staticmethod
def plot():
PlotOutput.plot_output_all('boston', ModelLearn.get_output_path(),
lambda x: x['method'] == 'full', False)
# plots all the files
# PlotOutput.plot_output_all('boston', Experiments.get_output_path(),
# None, False)
# plots for an specific experiment
# PlotOutput.plot_output_all('abalone_graph', Experiments.get_output_path(),
# lambda x: x['experiment'] == 'abalone', False)
def run_config(config):
try:
logger.info('started config: ' + str(config))
getattr(ExperimentSetup, config['method_to_run'])(config)
logger.info('finished config: ' + str(config))
except Exception as e:
logger.exception(config)
if __name__ == '__main__':
logger = ModelLearn.get_logger(ModelLearn.get_logger_path(), 'general_' + ModelLearn.get_ID(), logging.DEBUG)
# uncomment to run experiments in parallel
# ExperimentRunner.run_parallel(3)
# runs an individual configuration
# ExperimentRunner.boston_experiment()
# ExperimentRunner.wisconsin_breast_experiment()
# ExperimentRunner.USPS_experiment()
# ExperimentRunner.mining_experiment()
# ExperimentRunner.abalone_experiment()
# ExperimentRunner.mnist_binary_inducing_experiment()
ExperimentRunner.mnist_binary_experiment()
# ExperimentRunner.sarcos_all_joins_experiment()
# ExperimentRunner.sarcos_experiment()
# uncomment to plots the outputs in results folder
# ExperimentRunner.plot()
|
{
"content_hash": "f925fa2e20738ae571a9d7f1c2c92881",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 119,
"avg_line_length": 37.04639175257732,
"alnum_prop": 0.5135661611242521,
"repo_name": "adezfouli/savigp",
"id": "bc9a29309063f20499b9d4de7b6a33571ff92185",
"size": "7187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GP/experiment_run.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Matlab",
"bytes": "10305"
},
{
"name": "Python",
"bytes": "213689"
},
{
"name": "R",
"bytes": "11104"
},
{
"name": "Shell",
"bytes": "799"
}
],
"symlink_target": ""
}
|
"""Family module for Translate Wiki."""
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
from pywikibot import family
# The Wikimedia i18n family
class Family(family.SingleSiteFamily):
"""Family class for Translate Wiki."""
name = 'i18n'
domain = 'translatewiki.net'
def protocol(self, code):
"""Return https as the protocol for this family."""
return "https"
|
{
"content_hash": "5bfbbe04066fa45bdec0020c0dfb334e",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 59,
"avg_line_length": 22.526315789473685,
"alnum_prop": 0.6682242990654206,
"repo_name": "smalyshev/pywikibot-core",
"id": "6647623502ba3349622f7b3d56334fdcc4865744",
"size": "453",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pywikibot/families/i18n_family.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "97"
},
{
"name": "Python",
"bytes": "4195506"
}
],
"symlink_target": ""
}
|
from sqlalchemy import (
Column,
Integer,
Text,
)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import (
scoped_session,
sessionmaker,
)
from zope.sqlalchemy import ZopeTransactionExtension
DBSession = scoped_session(sessionmaker(extension=ZopeTransactionExtension()))
Base = declarative_base()
class TaskItem(Base):
__tablename__ = 'tasks'
id = Column(Integer, primary_key=True)
task = Column(Text, unique=True)
|
{
"content_hash": "9b81b16af8bd799fd833b5c65455df32",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 78,
"avg_line_length": 22.181818181818183,
"alnum_prop": 0.7213114754098361,
"repo_name": "miohtama/pyramid_celery",
"id": "0a232ffeb6cc405e197470b94842ee785fd43ff0",
"size": "488",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/long_running_with_tm/long_running_with_tm/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "479"
},
{
"name": "Python",
"bytes": "19638"
}
],
"symlink_target": ""
}
|
import re
calls="/lscr2/andersenlab/kml436/git_repos2/Transposons2/results/kinship/kin_C_matrix_full.txt"
CALLS=open(calls,"r")
absences={}
references={}
call={}
first_line=True
for line in CALLS:
line=line.rstrip('\n')
items=re.split("[\t]",line)
strain=items[0]
if first_line:
ab=items.index('absent_TRANS_CER1_C') #get index of CER1 absence column
ref=items.index('reference_TRANS_CER1_C') #get index of CER1 reference column
first_line=False
else:
absences[strain]=items[ab] #key:strain, value: absent_TRANS_CER1 column
references[strain]=items[ref] #key:strain, value: reference_TRANS_CER1 column
for i in absences.keys():
if absences[i] == references[i]:
print "ERROR: TE cannot be both present and absent" # not necessarily error, could both be NAs, adjust if needed
if absences[i]=="1":
call[i]="absent"
elif references[i]=="1":
call[i]="present"
else:
call[i]="NA"
CALLS.close()
#########################################################################################################
ng="/lscr2/andersenlab/kml436/git_repos2/Transposons2/files/cer_PA.txt"
NG=open(ng, "r")
truth_call={}
for line in NG:
line=line.rstrip('\n')
items=re.split("[\t]",line)
name=items[0]
pa=items[1]
if name in call.keys():
if pa=="1":
truth_call[name]="absent"
elif pa=="0":
truth_call[name]="present"
else:
truth_call[name]="NA"
#########################################################################################################
outfile="/lscr2/andersenlab/kml436/git_repos2/Transposons2/files/cer_comparison.txt"
OUTFILE=open(outfile, "w")
for i in sorted(call.keys()):
found=call[i]
if i in truth_call.keys():
truth=truth_call[i]
if call[i] =="NA" or truth_call[i]=="NA":
comparison="NA" #if either the TE callers or truth caller output an "NA" result, no comparison can be made
elif call[i]==truth_call[i]:
comparison="CORRECT" #if the TEMP/TELOCATE transposon caller result equals the truth call, it was correct
else:
comparison="INCORRECT" #if the TEMP/TELOCATE transposon caller result equals the truth call, it was incorrect
else:
truth="NA"
comparison="NA"
OUTFILE.write("{i}\t{found}\t{truth}\t{comparison}\n".format(**locals()))
|
{
"content_hash": "e3b252d3666525f042a3287b578b7f82",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 114,
"avg_line_length": 33.07462686567164,
"alnum_prop": 0.6358303249097473,
"repo_name": "klaricch/Transposons2",
"id": "1a6921fca2e74ace65435f08fadcceb5f7882e47",
"size": "2712",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/finds_cers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "731"
},
{
"name": "Python",
"bytes": "337144"
},
{
"name": "R",
"bytes": "68986"
},
{
"name": "Shell",
"bytes": "732668"
}
],
"symlink_target": ""
}
|
"""Defines access permissions for the API."""
from __future__ import absolute_import
from rest_framework import permissions
from readthedocs.core.permissions import AdminPermission
class IsOwner(permissions.BasePermission):
"""Custom permission to only allow owners of an object to edit it."""
def has_object_permission(self, request, view, obj):
# Write permissions are only allowed to the owner of the snippet
return request.user in obj.users.all()
class CommentModeratorOrReadOnly(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True # TODO: Similar logic to #1084
return AdminPermission.is_admin(request.user, obj.node.project)
class RelatedProjectIsOwner(permissions.BasePermission):
"""Custom permission to only allow owners of an object to edit it."""
def has_permission(self, request, view):
return (request.method in permissions.SAFE_METHODS)
def has_object_permission(self, request, view, obj):
# Write permissions are only allowed to the owner of the snippet
return (
request.method in permissions.SAFE_METHODS or
(request.user in obj.project.users.all())
)
class APIPermission(permissions.IsAuthenticatedOrReadOnly):
"""
Control users access to the API.
This permission should allow authenticated users readonly access to the API,
and allow admin users write access. This should be used on API resources
that need to implement write operations to resources that were based on the
ReadOnlyViewSet
"""
def has_permission(self, request, view):
has_perm = super(APIPermission, self).has_permission(request, view)
return has_perm or (request.user and request.user.is_staff)
def has_object_permission(self, request, view, obj):
has_perm = super(APIPermission, self).has_object_permission(
request, view, obj)
return has_perm or (request.user and request.user.is_staff)
class APIRestrictedPermission(permissions.BasePermission):
"""
Allow admin write, authenticated and anonymous read only.
This differs from :py:class:`APIPermission` by not allowing for
authenticated POSTs. This permission is endpoints like ``/api/v2/build/``,
which are used by admin users to coordinate build instance creation, but
only should be readable by end users.
"""
def has_permission(self, request, view):
return (
request.method in permissions.SAFE_METHODS or
(request.user and request.user.is_staff)
)
def has_object_permission(self, request, view, obj):
return (
request.method in permissions.SAFE_METHODS or
(request.user and request.user.is_staff)
)
|
{
"content_hash": "2d9ede7e8d34d25254909ba3dcae41e3",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 80,
"avg_line_length": 34.23809523809524,
"alnum_prop": 0.6981919332406119,
"repo_name": "safwanrahman/readthedocs.org",
"id": "615872d307e9af98d5d91660dbac03ab32e5e173",
"size": "2876",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "readthedocs/restapi/permissions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4515"
},
{
"name": "CSS",
"bytes": "63656"
},
{
"name": "HTML",
"bytes": "192701"
},
{
"name": "JavaScript",
"bytes": "425566"
},
{
"name": "Makefile",
"bytes": "4594"
},
{
"name": "Python",
"bytes": "1337480"
},
{
"name": "Shell",
"bytes": "358"
}
],
"symlink_target": ""
}
|
# Copyright (c) 2009-2014, Mario Vilas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Debugging API wrappers in ctypes.
"""
__revision__ = "$Id$"
from winappdbg.win32 import defines
from winappdbg.win32 import kernel32
from winappdbg.win32 import user32
from winappdbg.win32 import advapi32
from winappdbg.win32 import wtsapi32
from winappdbg.win32 import shell32
from winappdbg.win32 import shlwapi
from winappdbg.win32 import psapi
from winappdbg.win32 import dbghelp
from winappdbg.win32 import ntdll
from winappdbg.win32.defines import *
from winappdbg.win32.kernel32 import *
from winappdbg.win32.user32 import *
from winappdbg.win32.advapi32 import *
from winappdbg.win32.wtsapi32 import *
from winappdbg.win32.shell32 import *
from winappdbg.win32.shlwapi import *
from winappdbg.win32.psapi import *
from winappdbg.win32.dbghelp import *
from winappdbg.win32.ntdll import *
# This calculates the list of exported symbols.
_all = set()
_all.update(defines._all)
_all.update(kernel32._all)
_all.update(user32._all)
_all.update(advapi32._all)
_all.update(wtsapi32._all)
_all.update(shell32._all)
_all.update(shlwapi._all)
_all.update(psapi._all)
_all.update(dbghelp._all)
_all.update(ntdll._all)
__all__ = [_x for _x in _all if not _x.startswith('_')]
__all__.sort()
|
{
"content_hash": "5300b89e1ef425e6dbbb76afd49ee7e9",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 78,
"avg_line_length": 40.98571428571429,
"alnum_prop": 0.7434646218194493,
"repo_name": "SlicerRt/SlicerDebuggingTools",
"id": "e716bf3e96129db5507a1c44a7f36c684d77ae18",
"size": "2917",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "PyDevRemoteDebug/ptvsd-4.1.3/ptvsd/_vendored/pydevd/pydevd_attach_to_process/winappdbg/win32/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "406"
},
{
"name": "C",
"bytes": "13361"
},
{
"name": "C++",
"bytes": "105521"
},
{
"name": "CMake",
"bytes": "21408"
},
{
"name": "Cython",
"bytes": "69580"
},
{
"name": "Makefile",
"bytes": "2063"
},
{
"name": "Python",
"bytes": "3900091"
},
{
"name": "Shell",
"bytes": "737"
}
],
"symlink_target": ""
}
|
"""
Created on Mon Mar 9 14:58:39 2015
@author: jpk
This script produces a report of observations taken the last 7 days at SALT
and print it out on the terminal and writes it to a file.
The script runs from today and queries the sdb for data going back 7 days.
"""
global __version__
__version__ = "1.0"
__date__ = "20 March 2015"
__author__ = "Paul Kotze"
__doc__="\nSALT Observing Summary Plots Generator, version "+__version__ +'''
This script uses queries generated by the report_queries.py script and plots
the relevant data in .png format
Usage: python weekly_summary_plots.py [OPTIONS]
OPTIONS are as follows, arguments are compulsory for both long and short forms.
Example formats are shown:
-h --help Prints this help
-s --startdate=20141220 Sets the start date for the query
-e --enddate=20150214 Sets the end date for the qeury
-d --date=20150215 Sets the date for the query, if this option
is used a start date is not required, but an
interval is required
-i --interval=7 Set the interval, in days, to go back in
history for the query
'''
__usage__='''Usage: python weekly_summary_plots.py [OPTIONS]
OPTIONS are as follows, arguments are compulsory for both long and short forms.
Example formats are shown:
-h --help Prints this help
Specifying a date range:
-s --startdate=20141220 Sets the start date for the query
-e --enddate=20150214 Sets the end date for the qeury
Specifying a date and a range in days to query in the past:
-d --date=20150215 Sets the date for the query, if this option
is used a start date is not required, but an
interval is required
-i --interval=7 Set the interval, in days, to go back in
history for the query
'''
import os
import sys
import getopt
import pandas as pd
import pandas.io.sql as psql
import MySQLdb
import matplotlib.pyplot as pl
import numpy as np
from datetime import datetime
import saltefficiency.util.report_queries as rq
import getopt
def usage():
print __usage__
raise SystemExit(2)
def validate_date(date_text):
'''
this function validate the dates provided
'''
try:
date = datetime.strptime(date_text, '%Y%m%d')
except ValueError:
raise ValueError("Incorrect data format, date should be YYYYMMDD")
return date
def validate_int(number):
try:
num = int(number)
except ValueError:
raise ValueError('The interval should be an integer')
return num
def parse_commandline(argv):
# executes if module is run from the command line
# check whether a commandline has been specified
if len(argv)==0:
usage()
sys.exit(2)
else:
pass
# read command line options
try:
opts,args = getopt.getopt(argv,"hs:e:d:i:",
["help","startdate=","enddate=", "date=","interval="])
except getopt.GetoptError, inst:
print inst
print 'Use --help to get a list of options'
sys.exit(2)
# initiate the val_flags
s_val = False
e_val = False
d_val = False
i_val = False
# parse them to the relevant variables
for opt, arg in opts:
if opt in ('--help'):
usage()
elif opt in ('-s','--startdate'):
s_date = validate_date(arg)
s_val = True
elif opt in ('-e','--enddate'):
e_date = validate_date(arg)
e_val = True
elif opt in ('-d','--date'):
d_date = validate_date(arg)
d_val = True
elif opt in ('-i','--interval'):
inter = validate_int(arg)
i_val = True
else:
print 'Unknown option: ' + opt
usage()
# check all the flags and inform the user if there are missing values
if s_val + e_val == 2:
# check that the end date is after the start date
date_diff = (e_date - s_date).days
if date_diff < 0:
raise ValueError('The start date cannot be later than the end date')
else:
date = datetime.strftime(e_date, '%Y-%m-%d')
interval = date_diff
# check that a start AND an end date is specified
elif s_val + e_val == 1:
if s_val:
raise ValueError('You have to specify an end date')
elif e_val:
raise ValueError('You have to specify a start date')
else:
pass
else:
pass
if d_val + i_val == 2:
date = datetime.strftime(d_date, '%Y-%m-%d')
interval = inter
# check that a date AND an interval is specified
elif d_val + i_val == 1:
if d_val:
raise ValueError('You have to specify an interval with a date')
elif i_val:
raise ValueError('You have to specify a date with an interval')
else:
pass
return date, interval
def string_header(dr):
'''
format the header to be printed and written to file
'''
s = dr.ix[0].to_string().split('\n')
txt = '''
*************** SALT Weekly Observing Stats *****************
A report for %s to
%s
''' %(s[0], s[1])
return txt
def string_weekly_total_time_breakdown(wttb):
# determine the percantages of time broken down in catagories
t = pd.Series(wttb.stack(), index = wttb.stack().index)
t.index = t.index.get_level_values(1)
per = pd.Series(np.zeros(len(t)), index = t.index)
per['Weather':'Science'] = t['Weather':'Science'] / t.Total * 100
per['TimeLostToWeather': 'ScienceTime'] = per['Weather':'Science']
# write out the string:
txt = '''
-------------------------------------------------------------
Time Breakdown:
---------------
Science time: {} ({:,.0f}%)
Engineering time: {} ({:,.0f}%)
Weather: {} ({:,.0f}%)
Problems: {} ({:,.0f}%)
--
Total: {}
'''.format(t.ScienceTime, per.Science,
t.EngineeringTime, per.Engineering,
t.TimeLostToWeather, per.Weather,
t.TimeLostToProblems, per.Problems,
t.NightLength)
return txt
def string_weekly_priority_breakdown(wpb):
# create a percentage column
wpb['per'] = pd.Series(np.zeros(len(wpb)), index = wpb.index)
# determine the percentage from the Time column which is in seconds
wpb.per = (wpb.Tsec / wpb.Tsec.sum()) * 100
txt = wpb.to_string(columns=['Priority', 'No. Blocks', 'per'],
index=False,
header=False,
formatters={'per':'({:,.0f}%)'.format,
'Priority':' {:>5} '.format,
'No. Blocks':' {0:,.0f} '.format})
hdr = '''
-------------------------------------------------------------
Priority BreakDown:
-------------------
Priority No. Blocks
'''
ftr = '''
--
Total {0:,.0f}
'''.format(wpb['No. Blocks'].sum())
return hdr + txt + ftr
def string_weekly_subsystem_breakdown(wsb):
# calculate the percentage of time breakdown
# create a new percentage column
wsb['per'] = pd.Series(np.zeros(len(wsb)), index = wsb.index)
# determine the percentage from the Time column which is in seconds
wsb.per = (wsb.Time / wsb.Time.sum()) * 100
# create a string object to be printed and written to file
txt = wsb.to_string(columns=['SaltSubsystem', 'TotalTime', 'per'],
index=False,
header=False,
formatters={'SaltSubsystem':' {:>11} '.format,
'per':'({:,.0f}%)'.format,
'TotalTime':' {} '.format })
hdr = '''
-------------------------------------------------------------
Problems Time Breakdown
---------------------
SALT Subsystem Total Time
'''
return hdr + txt
def print_to_screen(txt):
'''
this function prints the formatted string to the terminal
'''
ftr = '''
****************** End of Weekly Report *********************
'''
print txt + ftr
return
def write_to_file(dr, txt, dirname='./logs/'):
'''
this function writes the text to a file and names the report accorting
to the date range specified
'''
filename = 'weekly_report_' + datetime.strftime(dr.StartDate[0], '%Y%m%d') + \
'-' + datetime.strftime(dr.EndDate[0], '%Y%m%d') + '.txt'
ftr = '''
****************** End of Weekly Report *********************
'''
with open(dirname+filename, 'w') as f:
f.write(txt + ftr)
if __name__=='__main__':
# parse line arguments
date, interval = parse_commandline(sys.argv[1:])
# open mysql connection to the sdb
mysql_con = MySQLdb.connect(host='sdb.cape.saao.ac.za',
port=3306, user=os.environ['SDBUSER'],
passwd=os.environ['SDBPASS'], db='sdb')
# use the connection to get the required data: _d
dr_d = rq.date_range(mysql_con, date, interval=interval)
wpb_d = rq.weekly_priority_breakdown(mysql_con, date, interval=interval)
wtb_d = rq.weekly_time_breakdown(mysql_con, date, interval=interval)
wttb_d = rq.weekly_total_time_breakdown(mysql_con, date, interval=interval)
wsb_d = rq.weekly_subsystem_breakdown(mysql_con, date, interval=interval)
# TESTING: save the dataframes
# dr_d.save('dr_d')
# wpb_d.save('wpd_d')
# wtb_d.save('wtb_d')
# wttb_d.save('wttd_d')
# wsb_d.save('wsb_d')
# format the string needed to print and write to file: _t
dr_t = string_header(dr_d)
wpd_t = string_weekly_priority_breakdown(wpb_d)
wttb_t = string_weekly_total_time_breakdown(wttb_d)
wsb_t = string_weekly_subsystem_breakdown(wsb_d)
# print the report to the terminal
print_to_screen(dr_t + wpd_t + wttb_t + wsb_t)
# write the report to file
write_to_file(dr_d, dr_t + wpd_t + wttb_t + wsb_t)
mysql_con.close()
|
{
"content_hash": "e63403bea407d8329e7e72f6ada70e59",
"timestamp": "",
"source": "github",
"line_count": 343,
"max_line_length": 82,
"avg_line_length": 29.682215743440235,
"alnum_prop": 0.5596699734800118,
"repo_name": "hettlage/saltefficiency",
"id": "81c2cd31a85ebda0d00fea308d7d4d447532ccb5",
"size": "10205",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saltefficiency/plot/summary_report.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "108277"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AmaraVideo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('video_url', models.URLField()),
('video_id', models.CharField(max_length=100)),
('transcript', jsonfield.fields.JSONField(null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('event', models.ForeignKey(to='main.Event')),
],
options={
},
bases=(models.Model,),
),
]
|
{
"content_hash": "b520f3a2d2c03e443cb5a3c0db452173",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 114,
"avg_line_length": 31.620689655172413,
"alnum_prop": 0.5463467829880043,
"repo_name": "anjalymehla/airmozilla",
"id": "afc5b3ba8f1070161cebc35b19e0e75f4df173bd",
"size": "941",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "airmozilla/subtitles/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4527"
},
{
"name": "Brightscript",
"bytes": "67473"
},
{
"name": "CSS",
"bytes": "1715776"
},
{
"name": "HTML",
"bytes": "2419012"
},
{
"name": "JavaScript",
"bytes": "3175629"
},
{
"name": "Makefile",
"bytes": "13548"
},
{
"name": "Puppet",
"bytes": "6677"
},
{
"name": "Python",
"bytes": "1744030"
},
{
"name": "Ruby",
"bytes": "4978"
},
{
"name": "Shell",
"bytes": "3302"
},
{
"name": "Smarty",
"bytes": "2384"
}
],
"symlink_target": ""
}
|
"""
Module implementing an XYZ file object class.
"""
import re
from io import StringIO
import pandas as pd
from monty.io import zopen
from pymatgen.core.structure import Molecule
class XYZ:
"""
Basic class for importing and exporting Molecules or Structures in XYZ
format.
.. note::
Exporting periodic structures in the XYZ format will lose information
about the periodicity. Essentially, only cartesian coordinates are
written in this format and no information is retained about the
lattice.
"""
def __init__(self, mol: Molecule, coord_precision: int = 6):
"""
Args:
mol: Input molecule or list of molecules
coord_precision: Precision to be used for coordinates.
"""
if isinstance(mol, Molecule) or not isinstance(mol, list):
self._mols = [mol]
else:
self._mols = mol
self.precision = coord_precision
@property
def molecule(self) -> Molecule:
"""
Returns molecule associated with this XYZ. In case multiple frame
XYZ, returns the last frame.
"""
return self._mols[-1]
@property
def all_molecules(self):
"""
Returns all the frames of molecule associated with this XYZ.
"""
return self._mols
@staticmethod
def _from_frame_string(contents):
"""
Convert a single frame XYZ string to a molecule
"""
lines = contents.split("\n")
num_sites = int(lines[0])
coords = []
sp = []
coord_patt = re.compile(r"(\w+)\s+([0-9\-\+\.*^eEdD]+)\s+([0-9\-\+\.*^eEdD]+)\s+" r"([0-9\-\+\.*^eEdD]+)")
for i in range(2, 2 + num_sites):
m = coord_patt.search(lines[i])
if m:
sp.append(m.group(1)) # this is 1-indexed
# this is 0-indexed
# in case of 0.0D+00 or 0.00d+01 old double precision writing
# replace d or D by e for ten power exponent,
# and some files use *^ convention in place of e
xyz = [val.lower().replace("d", "e").replace("*^", "e") for val in m.groups()[1:4]]
coords.append([float(val) for val in xyz])
return Molecule(sp, coords)
@staticmethod
def from_string(contents):
"""
Creates XYZ object from a string.
Args:
contents: String representing an XYZ file.
Returns:
XYZ object
"""
if contents[-1] != "\n":
contents += "\n"
white_space = r"[ \t\r\f\v]"
natoms_line = white_space + r"*\d+" + white_space + r"*\n"
comment_line = r"[^\n]*\n"
coord_lines = r"(\s*\w+\s+[0-9\-\+\.*^eEdD]+\s+[0-9\-\+\.*^eEdD]+" r"\s+[0-9\-\+\.*^eEdD]+.*\n)+"
frame_pattern_text = natoms_line + comment_line + coord_lines
pat = re.compile(frame_pattern_text, re.MULTILINE)
mols = []
for xyz_match in pat.finditer(contents):
xyz_text = xyz_match.group(0)
mols.append(XYZ._from_frame_string(xyz_text))
return XYZ(mols)
@staticmethod
def from_file(filename):
"""
Creates XYZ object from a file.
Args:
filename: XYZ filename
Returns:
XYZ object
"""
with zopen(filename, "rt") as f:
return XYZ.from_string(f.read())
def as_dataframe(self):
"""
Generates a coordinates data frame with columns: atom, x, y, and z
In case of multiple frame XYZ, returns the last frame.
Returns:
pandas.DataFrame
"""
lines = str(self)
sio = StringIO(lines)
df = pd.read_csv(
sio,
header=None,
skiprows=[0, 1],
comment="#",
delim_whitespace=True,
names=["atom", "x", "y", "z"],
)
df.index += 1
return df
def _frame_str(self, frame_mol):
output = [str(len(frame_mol)), frame_mol.composition.formula]
fmtstr = "{{}} {{:.{0}f}} {{:.{0}f}} {{:.{0}f}}".format(self.precision)
for site in frame_mol:
output.append(fmtstr.format(site.specie, site.x, site.y, site.z))
return "\n".join(output)
def __str__(self):
return "\n".join([self._frame_str(mol) for mol in self._mols])
def write_file(self, filename):
"""
Writes XYZ to file.
Args:
filename: File name of output file.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
|
{
"content_hash": "506491294ba76722a1b672201f2679c6",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 114,
"avg_line_length": 29.954838709677418,
"alnum_prop": 0.5285375834589705,
"repo_name": "gmatteo/pymatgen",
"id": "2fa144a2de5fd80f950cb9cefbdbc4fd05eda76e",
"size": "4753",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pymatgen/io/xyz.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "277"
},
{
"name": "Python",
"bytes": "7840569"
},
{
"name": "Shell",
"bytes": "711"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.contrib.auth.models import User
# TODO: Create index on email
class Contact(models.Model):
email = models.CharField(max_length=200)
validated = models.BooleanField()
def __unicode__(self):
return self.email
class Admin:
pass
# TODO: Create index on in_reply_to
class Video(models.Model):
user = models.ForeignKey(User)
contact = models.ForeignKey(Contact)
# Type 1: upload, 2: youtube url
type = models.IntegerField()
add_datetime = models.DateTimeField()
title = models.CharField(max_length=200)
short_description = models.TextField(null=True)
description = models.TextField(null=True)
duration_seconds = models.IntegerField(null=True)
duration_string = models.TextField(null=True)
viewcount = models.IntegerField()
votecount = models.IntegerField()
reportcount = models.IntegerField()
in_reply_to = models.IntegerField(null=True)
replycount = models.IntegerField()
validation_hash = models.CharField(max_length=100)
visible = models.BooleanField()
youtube_id = models.CharField(max_length=100)
def __unicode__(self):
return str(self.id) + "_" + self.title
class Admin:
pass
class Upload(models.Model):
video = models.ForeignKey(Video)
file = models.FileField(upload_to='videos/')
uploaded = models.BooleanField()
processed = models.BooleanField()
validated = models.BooleanField()
def __unicode__(self):
return str(self.id) + "_" + self.file
class Admin:
pass
|
{
"content_hash": "d62fa2d207f373f02ffe7fdf0a252bd6",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 51,
"avg_line_length": 22.723076923076924,
"alnum_prop": 0.7298578199052133,
"repo_name": "jeztek/youbama",
"id": "c3ef33f172b49450ed24c14c2bde550f4172c8b6",
"size": "1477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PHP",
"bytes": "21937"
},
{
"name": "Python",
"bytes": "19228"
}
],
"symlink_target": ""
}
|
from opsgenie_base_test_case import OpsGenieBaseActionTestCase
from list_groups import ListGroupsAction
class ListGroupsTestCase(OpsGenieBaseActionTestCase):
__test__ = True
action_cls = ListGroupsAction
def test_run_api_404(self):
action, adapter = self._get_action_status_code(
'GET',
"mock://api.opsgenie.com/v1/json/group",
status_code=404)
self.assertRaises(ValueError,
action.run)
def test_run_invalid_json(self):
action, adapter = self._get_action_invalid_json(
'GET',
"mock://api.opsgenie.com/v1/json/group")
self.assertRaises(ValueError,
action.run)
def test_run_api_success(self):
expected = self.load_json("list_groups.json")
action, adapter = self._get_mocked_action()
adapter.register_uri('GET',
"mock://api.opsgenie.com/v1/json/group",
text=self.get_fixture_content("list_groups.json"))
result = action.run()
self.assertEqual(result, expected)
|
{
"content_hash": "c886fa812229055542e7e0bf973081c5",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 79,
"avg_line_length": 32.25714285714286,
"alnum_prop": 0.5863596102745793,
"repo_name": "StackStorm/st2contrib",
"id": "88109ee4c1981aa344183e8e0d4d082ccf287279",
"size": "1876",
"binary": false,
"copies": "4",
"ref": "refs/heads/st2contrib-deprecated-archive",
"path": "archive/packs/opsgenie/tests/test_action_list_groups.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "8532"
},
{
"name": "Makefile",
"bytes": "5581"
},
{
"name": "Python",
"bytes": "1362240"
},
{
"name": "Ruby",
"bytes": "3081"
},
{
"name": "Shell",
"bytes": "7781"
}
],
"symlink_target": ""
}
|
import simulation_job_config as config
import threading
import subprocess
import json
class SimulationJob:
@staticmethod
def simulate(job):
json_input = job.data
returncode = 1
stderr = ""
stdout = "" # mirror back input in case of errors
try:
process = subprocess.Popen(config.simulation_path_program_path, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = process.communicate(json.dumps(json_input))
returncode = process.returncode
except Exception as e:
stderr = stderr + "\n" + str(e)
import os
stderr = stderr + "\n" + "Filepath: " + os.path.dirname(os.path.realpath(__file__))
stderr = stderr + "\n" + "Working Directory: " + os.getcwd()
if not stdout:
stdout = json_input
job.data = {"stdout": stdout, "stderr": stderr, "code": returncode}
if returncode == 0:
job.complete("finished")
else:
job.complete("failed")
|
{
"content_hash": "fbd436dfa72bb8b36fb30a9546d17e04",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 150,
"avg_line_length": 26.305555555555557,
"alnum_prop": 0.6779303062302007,
"repo_name": "walachey/football-worldcup-simulator",
"id": "dde0c53e231e200cf2bb0f2ee90600c8a839f0df",
"size": "947",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "worker/simulation_job.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "598"
},
{
"name": "C++",
"bytes": "69299"
},
{
"name": "CSS",
"bytes": "13688"
},
{
"name": "HTML",
"bytes": "45292"
},
{
"name": "JavaScript",
"bytes": "34566"
},
{
"name": "Makefile",
"bytes": "659"
},
{
"name": "Python",
"bytes": "81841"
}
],
"symlink_target": ""
}
|
"""
A Python module that provides simple object-oriented abstraction layer for creating command-line interfaces.
Read README.md for usage documentation
"""
##
## argcommand
## https://github.com/avinoamr/argcommand
##
## This module was orginally developed at Win (win.com) by:
## Roi Avinoam <avinoamr@gmail.com>
## Oran Ben Zur <oranb83@gmail.com>
## Nir Naor <nirnaori@gmail.com>
##
##
## The MIT License
##
## Copyright (c) 2012-2013 argcommand authors
##
## Permission is hereby granted, free of charge, to any person obtaining a copy
## of this software and associated documentation files (the "Software"), to deal
## in the Software without restriction, including without limitation the rights
## to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
## copies of the Software, and to permit persons to whom the Software is
## furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in
## all copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
## THE SOFTWARE.
import argparse
import inspect
__version__ = "0.2"
##
class Command( object ):
"""
The Command class represents a single CLI command
"""
# list of Command classes that should be included in this Command
subcommands = []
# abstract method, needs to be implemented by the concrete Command classes
def run( self ):
raise NotImplementedError
#
def __init__( self, cli_args = None, **kargs ):
"""
Command constructor, replaces the instance's arguments with their values. You should never instantiate
Command classes on your own, but use the Command .execute() method instead.
"""
# support textual cli arguments
if cli_args is not None:
parser = self._configure()
kargs.update( vars( parser.parse_args( cli_args.split() ) ) )
self.args = kargs
for name, arg in self.__class__.getargs():
setattr( self, name, kargs[ arg.name ] )
#
@classmethod
def getargs( cls ):
""" Returns the sorted list of Arguments that were defined by this class """
members = inspect.getmembers( cls )
args = filter( lambda member: isinstance( member[ 1 ], Argument ), members )
args.sort( key = lambda arg: arg[ 1 ]._creation_order )
return args
# converts this class definition to an argparse ArgumentParser instance, including its entire subparsers tree
@classmethod
def _configure( cls, parser = None ):
# create the parser
if parser is None:
desc = getattr( cls, "command_description", cls.__doc__ )
parser = CommandParser( description = desc, data = { "command": cls } )
# add the command arguments to this parser
for name, arg in cls.getargs():
args, kargs = arg.params
parser.add_argument( *args, **kargs )
# create the subparsers that were defined in the Command's subcommands property
subcommands = getattr( cls, "subcommands", [] )
sub = None
for Subcommand in subcommands:
if sub is None:
sub = parser.add_subparsers( parser_class = CommandParser )
# create the subparser
name = getattr( Subcommand, "command_name", Subcommand.__name__.lower() )
desc = getattr( Subcommand, "command_description", Subcommand.__doc__ ) or ""
desc = desc.strip()
subparser = sub.add_parser( name, help = desc, description = desc, data = { "command": Subcommand } )
Subcommand._configure( subparser )
return parser
#
@classmethod
def execute( cls, *args ):
""" Process the command-line arguments and run the relevant Command """
parser = cls._configure()
parsed = parser.parse_args( *args )
Command = parsed.data[ "command" ]
Command( **vars( parsed ) ).run()
##
class Argument( object ):
"""
The Argument class provides a low-level of abstraction on top of argparse's add_argument method.
It's basically an interface for including command-line arguments in the Command classes in
an OOP native way
"""
# trick for maintaining the order in which the Arguments were created in every classes.
# inspired by Django's implementation of the same requirement
_creation_order = 0
# this constructor doesn't really do much except for storing the input for argparse's
# add_argument method which is later used by the Command classes in order to generate the parsers tree
def __init__( self, *args, **kwargs ):
"""
Creates a new command-line argument. See argparse add_argument() documentation
"""
Argument._creation_order += 1
self._creation_order = Argument._creation_order
self.name = args[ 0 ].replace( "-", "" )
self.params = ( args, kwargs )
##
class CommandParser( argparse.ArgumentParser ):
"""
This class is a simple wrapped around argparse's ArgumentParser that provides a facility for
storing arbitrary data for this parser, and add this data to the parsed arguments
Usage:
parser = CommandParser( data = 5 )
args = parser.parse_args([])
print args.data # print 5
Note that unlike most of argparse's behavior, here the data argument will allow subparsers to override
their parent's data arguments.
"""
_data = None
# store the arbitrary data for this parser
def __init__( self, *args, **kargs ):
if "data" in kargs:
self._data = kargs[ "data" ]
del kargs[ "data" ]
super( CommandParser, self ).__init__( *args, **kargs )
# attached the previously stored data to the returned namespace
def parse_known_args( self, *args, **kargs ):
namespace, arg_strings = super( CommandParser, self ).parse_known_args( *args, **kargs )
# assign the data only if it wasn't already assigned by a different Command
if not hasattr( namespace, "data" ):
setattr( namespace, "data", self._data )
return namespace, arg_strings
## convinient boolean conversion type that accepts boolean-like strings (yes, true, on)
def bool( value ):
value = str( value ).lower().strip()
return value in [ "true", "yes", "on", "1" ]
|
{
"content_hash": "6371c8b5cbde1439a0c60b31b2464d29",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 113,
"avg_line_length": 37.10752688172043,
"alnum_prop": 0.6506809620399884,
"repo_name": "avinoamr/argcommand",
"id": "21433e320e7ec4eea7dc5c764cd63797e1a3eb87",
"size": "6902",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "argcommand.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7331"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ringapp', '0037_suggestion_response'),
]
operations = [
migrations.CreateModel(
name='News',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.CharField(blank=True, max_length=256, null=True)),
],
),
]
|
{
"content_hash": "42c3fc6edcee45a4b6e3385ee6644023",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 114,
"avg_line_length": 26.55,
"alnum_prop": 0.5800376647834274,
"repo_name": "rschwiebert/RingApp",
"id": "1cb7e82d99f9839508b9f08a82d623aa268d8ab6",
"size": "603",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ringapp/migrations/0038_news.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "645"
},
{
"name": "HTML",
"bytes": "112733"
},
{
"name": "JavaScript",
"bytes": "1823"
},
{
"name": "Procfile",
"bytes": "40"
},
{
"name": "Python",
"bytes": "275458"
}
],
"symlink_target": ""
}
|
from collections import Sequence # noqa
import logging
from django.conf import settings
from horizon import exceptions
import six
__all__ = ('APIResourceWrapper', 'APIDictWrapper',
'get_service_from_catalog', 'url_for',)
LOG = logging.getLogger(__name__)
class APIVersionManager(object):
"""Object to store and manage API versioning data and utility methods."""
SETTINGS_KEY = "OPENSTACK_API_VERSIONS"
def __init__(self, service_type, preferred_version=None):
self.service_type = service_type
self.preferred = preferred_version
self._active = None
self.supported = {}
# As a convenience, we can drop in a placeholder for APIs that we
# have not yet needed to version. This is useful, for example, when
# panels such as the admin metadata_defs wants to check the active
# version even though it's not explicitly defined. Previously
# this caused a KeyError.
if self.preferred:
self.supported[self.preferred] = {"version": self.preferred}
@property
def active(self):
if self._active is None:
self.get_active_version()
return self._active
def load_supported_version(self, version, data):
self.supported[version] = data
def get_active_version(self):
if self._active is not None:
return self.supported[self._active]
key = getattr(settings, self.SETTINGS_KEY, {}).get(self.service_type)
if key is None:
# TODO(gabriel): support API version discovery here; we'll leave
# the setting in as a way of overriding the latest available
# version.
key = self.preferred
# Since we do a key lookup in the supported dict the type matters,
# let's ensure people know if they use a string when the key isn't.
if isinstance(key, six.string_types):
msg = ('The version "%s" specified for the %s service should be '
'either an integer or a float, not a string.' %
(key, self.service_type))
raise exceptions.ConfigurationError(msg)
# Provide a helpful error message if the specified version isn't in the
# supported list.
if key not in self.supported:
choices = ", ".join(str(k) for k in six.iterkeys(self.supported))
msg = ('%s is not a supported API version for the %s service, '
' choices are: %s' % (key, self.service_type, choices))
raise exceptions.ConfigurationError(msg)
self._active = key
return self.supported[self._active]
def clear_active_cache(self):
self._active = None
class APIResourceWrapper(object):
"""Simple wrapper for api objects.
Define _attrs on the child class and pass in the
api object as the only argument to the constructor
"""
_attrs = []
_apiresource = None # Make sure _apiresource is there even in __init__.
def __init__(self, apiresource):
self._apiresource = apiresource
def __getattribute__(self, attr):
try:
return object.__getattribute__(self, attr)
except AttributeError:
if attr not in self._attrs:
raise
# __getattr__ won't find properties
return getattr(self._apiresource, attr)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__,
dict((attr, getattr(self, attr))
for attr in self._attrs
if hasattr(self, attr)))
class APIDictWrapper(object):
"""Simple wrapper for api dictionaries
Some api calls return dictionaries. This class provides identical
behavior as APIResourceWrapper, except that it will also behave as a
dictionary, in addition to attribute accesses.
Attribute access is the preferred method of access, to be
consistent with api resource objects from novaclient.
"""
_apidict = {} # Make sure _apidict is there even in __init__.
def __init__(self, apidict):
self._apidict = apidict
def __getattribute__(self, attr):
try:
return object.__getattribute__(self, attr)
except AttributeError:
if attr not in self._apidict:
raise
return self._apidict[attr]
def __getitem__(self, item):
try:
return getattr(self, item)
except (AttributeError, TypeError) as e:
# caller is expecting a KeyError
raise KeyError(e)
def __contains__(self, item):
try:
return hasattr(self, item)
except TypeError:
return False
def get(self, item, default=None):
try:
return getattr(self, item)
except (AttributeError, TypeError):
return default
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self._apidict)
class Quota(object):
"""Wrapper for individual limits in a quota."""
def __init__(self, name, limit):
self.name = name
self.limit = limit
def __repr__(self):
return "<Quota: (%s, %s)>" % (self.name, self.limit)
class QuotaSet(Sequence):
"""Wrapper for client QuotaSet objects which turns the individual quotas
into Quota objects for easier handling/iteration.
`QuotaSet` objects support a mix of `list` and `dict` methods; you can use
the bracket notiation (`qs["my_quota"] = 0`) to add new quota values, and
use the `get` method to retrieve a specific quota, but otherwise it
behaves much like a list or tuple, particularly in supporting iteration.
"""
def __init__(self, apiresource=None):
self.items = []
if apiresource:
if hasattr(apiresource, '_info'):
items = apiresource._info.items()
else:
items = apiresource.items()
for k, v in items:
if k == 'id':
continue
self[k] = v
def __setitem__(self, k, v):
v = int(v) if v is not None else v
q = Quota(k, v)
self.items.append(q)
def __getitem__(self, index):
return self.items[index]
def __add__(self, other):
"""Merge another QuotaSet into this one. Existing quotas are
not overridden.
"""
if not isinstance(other, QuotaSet):
msg = "Can only add QuotaSet to QuotaSet, " \
"but received %s instead" % type(other)
raise ValueError(msg)
for item in other:
if self.get(item.name).limit is None:
self.items.append(item)
return self
def __len__(self):
return len(self.items)
def __repr__(self):
return repr(self.items)
def get(self, key, default=None):
match = [quota for quota in self.items if quota.name == key]
return match.pop() if len(match) else Quota(key, default)
def add(self, other):
return self.__add__(other)
def get_service_from_catalog(catalog, service_type):
if catalog:
for service in catalog:
if service['type'] == service_type:
return service
return None
def get_version_from_service(service):
if service and service.get('endpoints'):
endpoint = service['endpoints'][0]
if 'interface' in endpoint:
return 3
else:
return 2.0
return 2.0
# Mapping of V2 Catalog Endpoint_type to V3 Catalog Interfaces
ENDPOINT_TYPE_TO_INTERFACE = {
'publicURL': 'public',
'internalURL': 'internal',
'adminURL': 'admin',
}
def get_url_for_service(service, region, endpoint_type):
identity_version = get_version_from_service(service)
available_endpoints = [endpoint for endpoint in service['endpoints']
if region == endpoint['region']]
"""if we are dealing with the identity service and there is no endpoint
in the current region, it is okay to use the first endpoint for any
identity service endpoints and we can assume that it is global
"""
if service['type'] == 'identity' and not available_endpoints:
available_endpoints = [endpoint for endpoint in service['endpoints']]
for endpoint in available_endpoints:
try:
if identity_version < 3:
return endpoint[endpoint_type]
else:
interface = \
ENDPOINT_TYPE_TO_INTERFACE.get(endpoint_type, '')
if endpoint['interface'] == interface:
return endpoint['url']
except (IndexError, KeyError):
"""it could be that the current endpoint just doesn't match the
type, continue trying the next one
"""
pass
return None
def url_for(request, service_type, endpoint_type=None, region=None):
endpoint_type = endpoint_type or getattr(settings,
'OPENSTACK_ENDPOINT_TYPE',
'publicURL')
fallback_endpoint_type = getattr(settings, 'SECONDARY_ENDPOINT_TYPE', None)
catalog = request.user.service_catalog
service = get_service_from_catalog(catalog, service_type)
if service:
if not region:
region = request.user.services_region
url = get_url_for_service(service,
region,
endpoint_type)
if not url and fallback_endpoint_type:
url = get_url_for_service(service,
region,
fallback_endpoint_type)
if url:
return url
raise exceptions.ServiceCatalogException(service_type)
def is_service_enabled(request, service_type, service_name=None):
service = get_service_from_catalog(request.user.service_catalog,
service_type)
if service:
region = request.user.services_region
for endpoint in service['endpoints']:
# ignore region for identity
if service['type'] == 'identity' or \
endpoint['region'] == region:
if service_name:
return service['name'] == service_name
else:
return True
return False
|
{
"content_hash": "0b1dbcd38bd492033ad0c93edcc0a95f",
"timestamp": "",
"source": "github",
"line_count": 308,
"max_line_length": 79,
"avg_line_length": 34.09090909090909,
"alnum_prop": 0.5815238095238096,
"repo_name": "orbitfp7/horizon",
"id": "675c9b5d7c46384fc54465c5b78fc40b18cac366",
"size": "11264",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openstack_dashboard/api/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "1536"
},
{
"name": "CSS",
"bytes": "70531"
},
{
"name": "HTML",
"bytes": "420092"
},
{
"name": "JavaScript",
"bytes": "277460"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "4417610"
},
{
"name": "Shell",
"bytes": "18318"
}
],
"symlink_target": ""
}
|
def reverse(data):
for index in range(len(data)-1, -1, -1):
yield data[index]
if __name__ == "__main__":
import sys
for char in reverse('golf'):
print char
print "gtest"
|
{
"content_hash": "c5fec4dd21b32d020c7e8fde7e0fca90",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 44,
"avg_line_length": 21.6,
"alnum_prop": 0.5138888888888888,
"repo_name": "sarpulas/idLog",
"id": "5b87c5a71cd2a9ca101f810e20249a76e2ce3d4e",
"size": "216",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37728"
}
],
"symlink_target": ""
}
|
import json
import six
import webob
import wsgiservice
def test_mount():
"""mount decorator adds the path as an attribute _path."""
@wsgiservice.mount('/{id}')
class Document(wsgiservice.Resource):
pass
assert Document.__name__ == 'Document'
assert Document._path == '/{id}'
def test_validate_resource():
"""validate decorator adds rules to the _validations attribute list."""
@wsgiservice.validate('id', re=r'[-0-9a-zA-Z]{36}',
doc='Document ID, must be a valid UUID.')
class User(wsgiservice.Resource):
pass
print(User._validations)
assert User.__name__ == 'User'
assert User._validations['id'] == {'re': r'[-0-9a-zA-Z]{36}',
'convert': None, 'doc': 'Document ID, must be a valid UUID.'}
def test_validate_method():
"""validate decorator adds rules to the _validations attribute list."""
class User(wsgiservice.Resource):
@wsgiservice.validate('password', doc="User's password")
@wsgiservice.validate('username', re='[a-z]+')
def PUT(self, password):
pass
print(User.PUT._validations)
assert User.PUT.__name__ == 'PUT'
assert User.PUT._validations['password'] == {'re': None,
'convert': None, 'doc': "User's password"}
assert User.PUT._validations['username'] == {'re': '[a-z]+',
'convert': None, 'doc': None}
def test_default_value():
"""Request parameters can have default values."""
class User(wsgiservice.Resource):
def GET(self, foo, bar, id=5):
return {'id': id, 'bar': bar, 'foo': foo}
req = webob.Request.blank('/?foo=baz1&bar=baz2',
headers={'Accept': 'application/json'})
res = webob.Response()
usr = User(request=req, response=res, path_params={})
res = usr()
print(res)
obj = json.loads(res.body)
print(obj)
assert obj == {'id': 5, 'foo': 'baz1', 'bar': 'baz2'}
def test_default_value_overwrite():
"""Parameters with default values can be overwritten in the request."""
class User(wsgiservice.Resource):
def GET(self, foo, id=5):
return {'id': id, 'foo': foo}
req = webob.Request.blank('/?id=8&foo=bar',
headers={'Accept': 'application/json'})
res = webob.Response()
usr = User(request=req, response=res, path_params={})
res = usr()
print(res)
obj = json.loads(res.body)
print(obj)
assert obj == {'id': '8', 'foo': 'bar'}
def test_default_value_validate_novalue():
"""Default parameters are validated correctly when not passed in."""
class User(wsgiservice.Resource):
@wsgiservice.validate('id', doc='Foo')
def GET(self, foo, id=5):
return {'id': id, 'foo': foo}
req = webob.Request.blank('/?foo=bar',
headers={'Accept': 'application/json'})
res = webob.Response()
usr = User(request=req, response=res, path_params={})
res = usr()
print(res)
obj = json.loads(res.body)
print(obj)
assert obj == {'id': 5, 'foo': 'bar'}
def test_default_value_validate():
"""Default parameters are validated correctly when passed in."""
class User(wsgiservice.Resource):
@wsgiservice.validate('id', doc='Foo')
def GET(self, foo, id=5):
return {'id': id, 'foo': foo}
req = webob.Request.blank('/?id=&foo=bar',
headers={'Accept': 'application/json'})
res = webob.Response()
usr = User(request=req, response=res, path_params={})
res = usr()
print(res)
assert res.status_int == 400
obj = json.loads(res.body)
print(obj)
assert obj == {"error": "Value for id must not be empty."}
def test_convert_params():
"""Convert parameters using the function given."""
class User(wsgiservice.Resource):
@wsgiservice.validate('foo', convert=int)
@wsgiservice.validate('bar', convert=repr)
def GET(self, foo, bar):
return {'foo': foo, 'foo_type': str(type(foo)),
'bar': bar, 'bar_type': str(type(bar))}
req = webob.Request.blank('/?foo=193&bar=testing',
headers={'Accept': 'application/json'})
res = webob.Response()
usr = User(request=req, response=res, path_params={})
res = usr()
print(res)
assert res.status_int == 200
obj = json.loads(res.body)
assert obj['foo'] == 193
if six.PY2:
assert obj['bar'] == "u'testing'"
assert obj['foo_type'] == "<type 'int'>"
assert obj['bar_type'] == "<type 'str'>"
else:
assert obj['bar'] == "'testing'"
assert obj['foo_type'] == "<class 'int'>"
assert obj['bar_type'] == "<class 'str'>"
def test_latin1_submit():
"""Don't access request.POST magically if method doesn't expect params.
This way if a web service wants to handle non-expected data (WebOb only
allows UTF-8), it can do so manually inside the method.
"""
class User(wsgiservice.Resource):
def POST(self):
return {'body': repr(self.request.body)}
req = webob.Request.blank('/test', {'REQUEST_METHOD': 'POST'},
headers={'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded'})
req.body = u'Fühler'.encode('latin1')
res = webob.Response()
usr = User(request=req, response=res, path_params={})
res = usr()
print(res)
assert res.status_int == 200
obj = json.loads(res.body)
if six.PY2:
assert obj == {'body': "'F\\xfchler'"}
else:
assert obj == {'body': "b'F\\xfchler'"}
def test_convert_params_validate():
"""Use the conversion function to validate as well."""
class User(wsgiservice.Resource):
@wsgiservice.validate('a', convert=int)
def GET(self, a):
return {'a': a}
req = webob.Request.blank('/?a=b', headers={'Accept': 'application/json'})
res = webob.Response()
usr = User(request=req, response=res, path_params={})
res = usr()
print(res)
assert res.status_int == 400
obj = json.loads(res.body)
assert obj == {"error": "a value b does not validate."}
def test_ignore_robotstxt():
"""Ignore the robots.txt resource on root resources."""
class Dummy(wsgiservice.Resource):
_path = '/{id}'
def GET(self, id):
return id
req = webob.Request.blank('/robots.txt')
res = webob.Response()
usr = Dummy(request=req, response=res, path_params={})
res = usr()
print(res)
assert res.status_int == 404
def test_ignore_favicon():
"""Ignore the favicon.ico resource on root resources."""
class Dummy(wsgiservice.Resource):
_path = '/{id}'
def GET(self, id):
return id
req = webob.Request.blank('/favicon.ico')
res = webob.Response()
usr = Dummy(request=req, response=res, path_params={})
res = usr()
print(res)
assert res.status_int == 404
def test_ignore_favicon_overwrite():
"""Don't ignore favicon.ico when IGNORED_PATHS is empty."""
class Dummy(wsgiservice.Resource):
_path = '/{id}'
IGNORED_PATHS = ()
def GET(self, id):
return id
req = webob.Request.blank('/favicon.ico')
res = webob.Response()
usr = Dummy(request=req, response=res, path_params={})
res = usr()
print(res)
assert res.status_int == 200
def test_ignore_favicon_not_root():
"""Don't ignore favicon.ico on non-root requests."""
class Dummy(wsgiservice.Resource):
_path = '/foo/{id}'
def GET(self, id):
return id
req = webob.Request.blank('/foo/favicon.ico')
res = webob.Response()
usr = Dummy(request=req, response=res, path_params={})
res = usr()
print(res)
assert res.status_int == 200
def test_ignore_favicon_query_param():
"""Don't ignore favicon.ico with query parameters"""
class Dummy(wsgiservice.Resource):
_path = '/{id}'
def GET(self, id):
return id
req = webob.Request.blank('/favicon.ico?x=1')
res = webob.Response()
usr = Dummy(request=req, response=res, path_params={})
res = usr()
print(res)
assert res.status_int == 200
def test_ignore_favicon_post():
"""Only ignore favicon.ico for GET requests."""
class Dummy(wsgiservice.Resource):
_path = '/{id}'
def POST(self, id):
return id
req = webob.Request.blank('/favicon.ico?', {'REQUEST_METHOD': 'POST'})
res = webob.Response()
usr = Dummy(request=req, response=res, path_params={})
res = usr()
print(res)
assert res.status_int == 200
def test_default_mimetype():
"""Use the first item of EXTENSION_MAP as the default."""
class Dummy(wsgiservice.Resource):
EXTENSION_MAP = [
('.txt', 'text/plain'),
('.xml', 'text/xml'),
]
_path = '/status'
def GET(self, id):
return 'OK'
def to_text_plain(self, raw):
return raw
req = webob.Request.blank('/status')
res = webob.Response()
usr = Dummy(request=req, response=res, path_params={})
res = usr()
print(res)
assert res.headers['Content-Type'] == 'text/plain; charset=UTF-8'
def test_invalid_accept():
"""Again use the first item of EXTENSION_MAP as the default if the
`Accept` header has an unknown value.
"""
class Dummy(wsgiservice.Resource):
_path = '/test'
def GET(self, id):
return {'status': 'success'}
req = webob.Request.blank('/test', headers={'Accept': 'text/json'})
res = webob.Response()
usr = Dummy(request=req, response=res, path_params={})
res = usr()
print(res)
assert res.headers['Content-Type'] == 'text/xml; charset=UTF-8'
def test_raise_404():
"""Use NotFoundResource when a 404 response is raised."""
class Dummy(wsgiservice.Resource):
_path = '/test'
def GET(self):
wsgiservice.raise_404(self)
req = webob.Request.blank('/test')
res = webob.Response()
usr = Dummy(request=req, response=res, path_params={})
res = usr()
print(res)
assert res.headers['Content-Type'] == 'text/xml; charset=UTF-8'
|
{
"content_hash": "b764b681b203379f31a985fd631f9dfb",
"timestamp": "",
"source": "github",
"line_count": 363,
"max_line_length": 78,
"avg_line_length": 28.046831955922865,
"alnum_prop": 0.5901188488360671,
"repo_name": "pneff/wsgiservice",
"id": "f3a1035cbcd9c2f5a23b9db60222610870c2895a",
"size": "10206",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_resource.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "129935"
}
],
"symlink_target": ""
}
|
""" This module contains the base class for handlers as used by the
Dispatcher """
class Handler(object):
"""
The base class for all update handlers. You can create your own handlers
by inheriting from this class.
Args:
callback (function): A function that takes ``bot, update`` as
positional arguments. It will be called when the ``check_update``
has determined that an update should be processed by this handler.
pass_update_queue (optional[bool]): If set to ``True``, a keyword argument called
``update_queue`` will be passed to the callback function. It will be the ``Queue``
instance used by the ``Updater`` and ``Dispatcher`` that contains new updates which can
be used to insert updates. Default is ``False``.
pass_job_queue (optional[bool]): If set to ``True``, a keyword argument called
``job_queue`` will be passed to the callback function. It will be a ``JobQueue``
instance created by the ``Updater`` which can be used to schedule new jobs.
Default is ``False``.
pass_user_data (optional[bool]): If set to ``True``, a keyword argument called
``user_data`` will be passed to the callback function. It will be a ``dict`` you
can use to keep any data related to the user that sent the update. For each update of
the same user, it will be the same ``dict``. Default is ``False``.
pass_chat_data (optional[bool]): If set to ``True``, a keyword argument called
``chat_data`` will be passed to the callback function. It will be a ``dict`` you
can use to keep any data related to the chat that the update was sent in.
For each update in the same chat, it will be the same ``dict``. Default is ``False``.
"""
def __init__(self,
callback,
pass_update_queue=False,
pass_job_queue=False,
pass_user_data=False,
pass_chat_data=False):
self.callback = callback
self.pass_update_queue = pass_update_queue
self.pass_job_queue = pass_job_queue
self.pass_user_data = pass_user_data
self.pass_chat_data = pass_chat_data
def check_update(self, update):
"""
This method is called to determine if an update should be handled by
this handler instance. It should always be overridden.
Args:
update (object): The update to be tested
Returns:
bool
"""
raise NotImplementedError
def handle_update(self, update, dispatcher):
"""
This method is called if it was determined that an update should indeed
be handled by this instance. It should also be overridden, but in most
cases call ``self.callback(dispatcher.bot, update)``, possibly along with
optional arguments. To work with the ``ConversationHandler``, this method should return the
value returned from ``self.callback``
Args:
update (object): The update to be handled
dispatcher (telegram.ext.Dispatcher): The dispatcher to collect optional args
"""
raise NotImplementedError
def collect_optional_args(self, dispatcher, update=None):
"""
Prepares the optional arguments that are the same for all types of
handlers
Args:
dispatcher (telegram.ext.Dispatcher):
"""
optional_args = dict()
if self.pass_update_queue:
optional_args['update_queue'] = dispatcher.update_queue
if self.pass_job_queue:
optional_args['job_queue'] = dispatcher.job_queue
if self.pass_user_data or self.pass_chat_data:
chat = update.effective_chat
user = update.effective_user
if self.pass_user_data:
optional_args['user_data'] = dispatcher.user_data[user.id if user else None]
if self.pass_chat_data:
optional_args['chat_data'] = dispatcher.chat_data[chat.id if chat else None]
return optional_args
|
{
"content_hash": "1cf2e4bd95c3c0d461cf952a7385c583",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 99,
"avg_line_length": 43.395833333333336,
"alnum_prop": 0.6178588574171867,
"repo_name": "thonkify/thonkify",
"id": "97a544f1a61851cb16c0b02112bbf41134dc2e91",
"size": "4975",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/lib/telegram/ext/handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10460214"
},
{
"name": "Shell",
"bytes": "1470"
}
],
"symlink_target": ""
}
|
"""HTTP Client for asyncio."""
import asyncio
import base64
import hashlib
import os
import sys
import traceback
import warnings
from multidict import CIMultiDict, MultiDict, MultiDictProxy, istr
from yarl import URL
import aiohttp
from . import hdrs, helpers
from ._ws_impl import WS_KEY, WebSocketParser, WebSocketWriter
from .client_reqrep import ClientRequest, ClientResponse
from .client_ws import ClientWebSocketResponse
from .cookiejar import CookieJar
from .errors import WSServerHandshakeError
from .helpers import Timeout
__all__ = ('ClientSession', 'request', 'get', 'options', 'head',
'delete', 'post', 'put', 'patch', 'ws_connect')
PY_35 = sys.version_info >= (3, 5)
class ClientSession:
"""First-class interface for making HTTP requests."""
_source_traceback = None
_connector = None
def __init__(self, *, connector=None, loop=None, cookies=None,
headers=None, skip_auto_headers=None,
auth=None, request_class=ClientRequest,
response_class=ClientResponse,
ws_response_class=ClientWebSocketResponse,
version=aiohttp.HttpVersion11,
cookie_jar=None):
if connector is None:
connector = aiohttp.TCPConnector(loop=loop)
loop = connector._loop # never None
else:
if loop is None:
loop = connector._loop # never None
elif connector._loop is not loop:
raise ValueError("loop argument must agree with connector")
self._loop = loop
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
if cookie_jar is None:
cookie_jar = CookieJar(loop=loop)
self._cookie_jar = cookie_jar
if cookies is not None:
self._cookie_jar.update_cookies(cookies)
self._connector = connector
self._default_auth = auth
self._version = version
# Convert to list of tuples
if headers:
headers = CIMultiDict(headers)
else:
headers = CIMultiDict()
self._default_headers = headers
if skip_auto_headers is not None:
self._skip_auto_headers = frozenset([istr(i)
for i in skip_auto_headers])
else:
self._skip_auto_headers = frozenset()
self._request_class = request_class
self._response_class = response_class
self._ws_response_class = ws_response_class
def __del__(self, _warnings=warnings):
if not self.closed:
self.close()
_warnings.warn("Unclosed client session {!r}".format(self),
ResourceWarning)
context = {'client_session': self,
'message': 'Unclosed client session'}
if self._source_traceback is not None:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
def request(self, method, url, *,
params=None,
data=None,
headers=None,
skip_auto_headers=None,
auth=None,
allow_redirects=True,
max_redirects=10,
encoding='utf-8',
version=None,
compress=None,
chunked=None,
expect100=False,
read_until_eof=True,
proxy=None,
proxy_auth=None,
timeout=5*60):
"""Perform HTTP request."""
return _RequestContextManager(
self._request(
method,
url,
params=params,
data=data,
headers=headers,
skip_auto_headers=skip_auto_headers,
auth=auth,
allow_redirects=allow_redirects,
max_redirects=max_redirects,
encoding=encoding,
version=version,
compress=compress,
chunked=chunked,
expect100=expect100,
read_until_eof=read_until_eof,
proxy=proxy,
proxy_auth=proxy_auth,
timeout=timeout))
@asyncio.coroutine
def _request(self, method, url, *,
params=None,
data=None,
headers=None,
skip_auto_headers=None,
auth=None,
allow_redirects=True,
max_redirects=10,
encoding='utf-8',
version=None,
compress=None,
chunked=None,
expect100=False,
read_until_eof=True,
proxy=None,
proxy_auth=None,
timeout=5*60):
if version is not None:
warnings.warn("HTTP version should be specified "
"by ClientSession constructor", DeprecationWarning)
else:
version = self._version
if self.closed:
raise RuntimeError('Session is closed')
redirects = 0
history = []
# Merge with default headers and transform to CIMultiDict
headers = self._prepare_headers(headers)
if auth is None:
auth = self._default_auth
# It would be confusing if we support explicit Authorization header
# with `auth` argument
if (headers is not None and
auth is not None and
hdrs.AUTHORIZATION in headers):
raise ValueError("Can't combine `Authorization` header with "
"`auth` argument")
skip_headers = set(self._skip_auto_headers)
if skip_auto_headers is not None:
for i in skip_auto_headers:
skip_headers.add(istr(i))
if proxy is not None:
proxy = URL(proxy)
while True:
url = URL(url).with_fragment(None)
cookies = self._cookie_jar.filter_cookies(url)
req = self._request_class(
method, url, params=params, headers=headers,
skip_auto_headers=skip_headers, data=data,
cookies=cookies, encoding=encoding,
auth=auth, version=version, compress=compress, chunked=chunked,
expect100=expect100,
loop=self._loop, response_class=self._response_class,
proxy=proxy, proxy_auth=proxy_auth, timeout=timeout)
with Timeout(timeout, loop=self._loop):
conn = yield from self._connector.connect(req)
try:
resp = req.send(conn.writer, conn.reader)
try:
yield from resp.start(conn, read_until_eof)
except:
resp.close()
conn.close()
raise
except (aiohttp.HttpProcessingError,
aiohttp.ServerDisconnectedError) as exc:
raise aiohttp.ClientResponseError() from exc
except OSError as exc:
raise aiohttp.ClientOSError(*exc.args) from exc
self._cookie_jar.update_cookies(resp.cookies, resp.url_obj)
# redirects
if resp.status in (301, 302, 303, 307) and allow_redirects:
redirects += 1
history.append(resp)
if max_redirects and redirects >= max_redirects:
resp.close()
break
else:
# TODO: close the connection if BODY is large enough
# Redirect with big BODY is forbidden by HTTP protocol
# but malformed server may send illegal response.
# Small BODIES with text like "Not Found" are still
# perfectly fine and should be accepted.
yield from resp.release()
# For 301 and 302, mimic IE behaviour, now changed in RFC.
# Details: https://github.com/kennethreitz/requests/pull/269
if (resp.status == 303 and resp.method != hdrs.METH_HEAD) \
or (resp.status in (301, 302) and
resp.method == hdrs.METH_POST):
method = hdrs.METH_GET
data = None
if headers.get(hdrs.CONTENT_LENGTH):
headers.pop(hdrs.CONTENT_LENGTH)
r_url = URL(resp.headers.get(hdrs.LOCATION) or
resp.headers.get(hdrs.URI))
scheme = r_url.scheme
if scheme not in ('http', 'https', ''):
resp.close()
raise ValueError('Can redirect only to http or https')
elif not scheme:
r_url = url.join(r_url)
url = r_url
params = None
yield from resp.release()
continue
break
resp._history = tuple(history)
return resp
def ws_connect(self, url, *,
protocols=(),
timeout=10.0,
autoclose=True,
autoping=True,
auth=None,
origin=None,
headers=None,
proxy=None,
proxy_auth=None):
"""Initiate websocket connection."""
return _WSRequestContextManager(
self._ws_connect(url,
protocols=protocols,
timeout=timeout,
autoclose=autoclose,
autoping=autoping,
auth=auth,
origin=origin,
headers=headers,
proxy=proxy,
proxy_auth=proxy_auth))
@asyncio.coroutine
def _ws_connect(self, url, *,
protocols=(),
timeout=10.0,
autoclose=True,
autoping=True,
auth=None,
origin=None,
headers=None,
proxy=None,
proxy_auth=None):
sec_key = base64.b64encode(os.urandom(16))
if headers is None:
headers = CIMultiDict()
default_headers = {
hdrs.UPGRADE: hdrs.WEBSOCKET,
hdrs.CONNECTION: hdrs.UPGRADE,
hdrs.SEC_WEBSOCKET_VERSION: '13',
hdrs.SEC_WEBSOCKET_KEY: sec_key.decode(),
}
for key, value in default_headers.items():
if key not in headers:
headers[key] = value
if protocols:
headers[hdrs.SEC_WEBSOCKET_PROTOCOL] = ','.join(protocols)
if origin is not None:
headers[hdrs.ORIGIN] = origin
# send request
resp = yield from self.get(url, headers=headers,
read_until_eof=False,
auth=auth,
proxy=proxy,
proxy_auth=proxy_auth)
try:
# check handshake
if resp.status != 101:
raise WSServerHandshakeError(
message='Invalid response status',
code=resp.status,
headers=resp.headers)
if resp.headers.get(hdrs.UPGRADE, '').lower() != 'websocket':
raise WSServerHandshakeError(
message='Invalid upgrade header',
code=resp.status,
headers=resp.headers)
if resp.headers.get(hdrs.CONNECTION, '').lower() != 'upgrade':
raise WSServerHandshakeError(
message='Invalid connection header',
code=resp.status,
headers=resp.headers)
# key calculation
key = resp.headers.get(hdrs.SEC_WEBSOCKET_ACCEPT, '')
match = base64.b64encode(
hashlib.sha1(sec_key + WS_KEY).digest()).decode()
if key != match:
raise WSServerHandshakeError(
message='Invalid challenge response',
code=resp.status,
headers=resp.headers)
# websocket protocol
protocol = None
if protocols and hdrs.SEC_WEBSOCKET_PROTOCOL in resp.headers:
resp_protocols = [
proto.strip() for proto in
resp.headers[hdrs.SEC_WEBSOCKET_PROTOCOL].split(',')]
for proto in resp_protocols:
if proto in protocols:
protocol = proto
break
reader = resp.connection.reader.set_parser(WebSocketParser)
resp.connection.writer.set_tcp_nodelay(True)
writer = WebSocketWriter(resp.connection.writer, use_mask=True)
except Exception:
resp.close()
raise
else:
return self._ws_response_class(reader,
writer,
protocol,
resp,
timeout,
autoclose,
autoping,
self._loop)
def _prepare_headers(self, headers):
""" Add default headers and transform it to CIMultiDict
"""
# Convert headers to MultiDict
result = CIMultiDict(self._default_headers)
if headers:
if not isinstance(headers, (MultiDictProxy, MultiDict)):
headers = CIMultiDict(headers)
added_names = set()
for key, value in headers.items():
if key in added_names:
result.add(key, value)
else:
result[key] = value
added_names.add(key)
return result
def get(self, url, *, allow_redirects=True, **kwargs):
"""Perform HTTP GET request."""
return _RequestContextManager(
self._request(hdrs.METH_GET, url,
allow_redirects=allow_redirects,
**kwargs))
def options(self, url, *, allow_redirects=True, **kwargs):
"""Perform HTTP OPTIONS request."""
return _RequestContextManager(
self._request(hdrs.METH_OPTIONS, url,
allow_redirects=allow_redirects,
**kwargs))
def head(self, url, *, allow_redirects=False, **kwargs):
"""Perform HTTP HEAD request."""
return _RequestContextManager(
self._request(hdrs.METH_HEAD, url,
allow_redirects=allow_redirects,
**kwargs))
def post(self, url, *, data=None, **kwargs):
"""Perform HTTP POST request."""
return _RequestContextManager(
self._request(hdrs.METH_POST, url,
data=data,
**kwargs))
def put(self, url, *, data=None, **kwargs):
"""Perform HTTP PUT request."""
return _RequestContextManager(
self._request(hdrs.METH_PUT, url,
data=data,
**kwargs))
def patch(self, url, *, data=None, **kwargs):
"""Perform HTTP PATCH request."""
return _RequestContextManager(
self._request(hdrs.METH_PATCH, url,
data=data,
**kwargs))
def delete(self, url, **kwargs):
"""Perform HTTP DELETE request."""
return _RequestContextManager(
self._request(hdrs.METH_DELETE, url,
**kwargs))
def close(self):
"""Close underlying connector.
Release all acquired resources.
"""
if not self.closed:
self._connector.close()
self._connector = None
ret = helpers.create_future(self._loop)
ret.set_result(None)
return ret
@property
def closed(self):
"""Is client session closed.
A readonly property.
"""
return self._connector is None or self._connector.closed
@property
def connector(self):
"""Connector instance used for the session."""
return self._connector
@property
def cookie_jar(self):
"""The session cookies."""
return self._cookie_jar
@property
def version(self):
"""The session HTTP protocol version."""
return self._version
@property
def loop(self):
"""Session's loop."""
return self._loop
def detach(self):
"""Detach connector from session without closing the former.
Session is switched to closed state anyway.
"""
self._connector = None
def __enter__(self):
warnings.warn("Use async with instead", DeprecationWarning)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
if PY_35:
@asyncio.coroutine
def __aenter__(self):
return self
@asyncio.coroutine
def __aexit__(self, exc_type, exc_val, exc_tb):
yield from self.close()
if PY_35:
from collections.abc import Coroutine
base = Coroutine
else:
base = object
class _BaseRequestContextManager(base):
__slots__ = ('_coro', '_resp', 'send', 'throw', 'close')
def __init__(self, coro):
self._coro = coro
self._resp = None
self.send = coro.send
self.throw = coro.throw
self.close = coro.close
@property
def gi_frame(self):
return self._coro.gi_frame
@property
def gi_running(self):
return self._coro.gi_running
@property
def gi_code(self):
return self._coro.gi_code
def __next__(self):
return self.send(None)
@asyncio.coroutine
def __iter__(self):
resp = yield from self._coro
return resp
if PY_35:
def __await__(self):
resp = yield from self._coro
return resp
@asyncio.coroutine
def __aenter__(self):
self._resp = yield from self._coro
return self._resp
if not PY_35:
try:
from asyncio import coroutines
coroutines._COROUTINE_TYPES += (_BaseRequestContextManager,)
except: # pragma: no cover
pass # Python 3.4.2 and 3.4.3 has no coroutines._COROUTINE_TYPES
class _RequestContextManager(_BaseRequestContextManager):
if PY_35:
@asyncio.coroutine
def __aexit__(self, exc_type, exc, tb):
if exc_type is not None:
self._resp.close()
else:
yield from self._resp.release()
class _WSRequestContextManager(_BaseRequestContextManager):
if PY_35:
@asyncio.coroutine
def __aexit__(self, exc_type, exc, tb):
yield from self._resp.close()
class _DetachedRequestContextManager(_RequestContextManager):
__slots__ = _RequestContextManager.__slots__ + ('_session', )
def __init__(self, coro, session):
super().__init__(coro)
self._session = session
@asyncio.coroutine
def __iter__(self):
try:
return (yield from self._coro)
except:
yield from self._session.close()
raise
if PY_35:
def __await__(self):
try:
return (yield from self._coro)
except:
yield from self._session.close()
raise
def __del__(self):
self._session.detach()
class _DetachedWSRequestContextManager(_WSRequestContextManager):
__slots__ = _WSRequestContextManager.__slots__ + ('_session', )
def __init__(self, coro, session):
super().__init__(coro)
self._session = session
def __del__(self):
self._session.detach()
def request(method, url, *,
params=None,
data=None,
headers=None,
skip_auto_headers=None,
cookies=None,
auth=None,
allow_redirects=True,
max_redirects=10,
encoding='utf-8',
version=None,
compress=None,
chunked=None,
expect100=False,
connector=None,
loop=None,
read_until_eof=True,
request_class=None,
response_class=None,
proxy=None,
proxy_auth=None):
"""Constructs and sends a request. Returns response object.
method - HTTP method
url - request url
params - (optional) Dictionary or bytes to be sent in the query
string of the new request
data - (optional) Dictionary, bytes, or file-like object to
send in the body of the request
headers - (optional) Dictionary of HTTP Headers to send with
the request
cookies - (optional) Dict object to send with the request
auth - (optional) BasicAuth named tuple represent HTTP Basic Auth
auth - aiohttp.helpers.BasicAuth
allow_redirects - (optional) If set to False, do not follow
redirects
version - Request HTTP version.
compress - Set to True if request has to be compressed
with deflate encoding.
chunked - Set to chunk size for chunked transfer encoding.
expect100 - Expect 100-continue response from server.
connector - BaseConnector sub-class instance to support
connection pooling.
read_until_eof - Read response until eof if response
does not have Content-Length header.
request_class - (optional) Custom Request class implementation.
response_class - (optional) Custom Response class implementation.
loop - Optional event loop.
Usage::
>>> import aiohttp
>>> resp = yield from aiohttp.request('GET', 'http://python.org/')
>>> resp
<ClientResponse(python.org/) [200]>
>>> data = yield from resp.read()
"""
warnings.warn("Use ClientSession().request() instead", DeprecationWarning)
if connector is None:
connector = aiohttp.TCPConnector(loop=loop, force_close=True)
kwargs = {}
if request_class is not None:
kwargs['request_class'] = request_class
if response_class is not None:
kwargs['response_class'] = response_class
session = ClientSession(loop=loop,
cookies=cookies,
connector=connector,
**kwargs)
return _DetachedRequestContextManager(
session._request(method, url,
params=params,
data=data,
headers=headers,
skip_auto_headers=skip_auto_headers,
auth=auth,
allow_redirects=allow_redirects,
max_redirects=max_redirects,
encoding=encoding,
version=version,
compress=compress,
chunked=chunked,
expect100=expect100,
read_until_eof=read_until_eof,
proxy=proxy,
proxy_auth=proxy_auth,),
session=session)
def get(url, **kwargs):
warnings.warn("Use ClientSession().get() instead", DeprecationWarning)
return request(hdrs.METH_GET, url, **kwargs)
def options(url, **kwargs):
warnings.warn("Use ClientSession().options() instead", DeprecationWarning)
return request(hdrs.METH_OPTIONS, url, **kwargs)
def head(url, **kwargs):
warnings.warn("Use ClientSession().head() instead", DeprecationWarning)
return request(hdrs.METH_HEAD, url, **kwargs)
def post(url, **kwargs):
warnings.warn("Use ClientSession().post() instead", DeprecationWarning)
return request(hdrs.METH_POST, url, **kwargs)
def put(url, **kwargs):
warnings.warn("Use ClientSession().put() instead", DeprecationWarning)
return request(hdrs.METH_PUT, url, **kwargs)
def patch(url, **kwargs):
warnings.warn("Use ClientSession().patch() instead", DeprecationWarning)
return request(hdrs.METH_PATCH, url, **kwargs)
def delete(url, **kwargs):
warnings.warn("Use ClientSession().delete() instead", DeprecationWarning)
return request(hdrs.METH_DELETE, url, **kwargs)
def ws_connect(url, *, protocols=(), timeout=10.0, connector=None, auth=None,
ws_response_class=ClientWebSocketResponse, autoclose=True,
autoping=True, loop=None, origin=None, headers=None):
warnings.warn("Use ClientSession().ws_connect() instead",
DeprecationWarning)
if loop is None:
loop = asyncio.get_event_loop()
if connector is None:
connector = aiohttp.TCPConnector(loop=loop, force_close=True)
session = aiohttp.ClientSession(loop=loop, connector=connector, auth=auth,
ws_response_class=ws_response_class,
headers=headers)
return _DetachedWSRequestContextManager(
session._ws_connect(url,
protocols=protocols,
timeout=timeout,
autoclose=autoclose,
autoping=autoping,
origin=origin),
session=session)
|
{
"content_hash": "5940175baa4d7608d58abec52064e17f",
"timestamp": "",
"source": "github",
"line_count": 778,
"max_line_length": 79,
"avg_line_length": 33.2814910025707,
"alnum_prop": 0.5255474452554745,
"repo_name": "moden-py/aiohttp",
"id": "94e5e41742332bb4826f7f799032d27d2fbbeb56",
"size": "25893",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aiohttp/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "CSS",
"bytes": "112"
},
{
"name": "HTML",
"bytes": "4885"
},
{
"name": "Makefile",
"bytes": "3042"
},
{
"name": "PLpgSQL",
"bytes": "765"
},
{
"name": "Python",
"bytes": "1096434"
},
{
"name": "Shell",
"bytes": "2298"
}
],
"symlink_target": ""
}
|
try:
import urllib.request as urllib_request
import urllib.error as urllib_error
except ImportError:
import urllib2 as urllib_request
import urllib2 as urllib_error
try:
from cStringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
from .twitter_globals import POST_ACTIONS
from .auth import NoAuth
import re
import gzip
try:
import http.client as http_client
except ImportError:
import httplib as http_client
import json
class _DEFAULT(object):
pass
class TwitterError(Exception):
"""
Base Exception thrown by the Twitter object when there is a
general error interacting with the API.
"""
pass
class TwitterHTTPError(TwitterError):
"""
Exception thrown by the Twitter object when there is an
HTTP error interacting with twitter.com.
"""
def __init__(self, e, uri, format, uriparts):
self.e = e
self.uri = uri
self.format = format
self.uriparts = uriparts
try:
data = self.e.fp.read()
except http_client.IncompleteRead as e:
# can't read the error text
# let's try some of it
data = e.partial
if self.e.headers.get('Content-Encoding') == 'gzip':
buf = StringIO(data)
f = gzip.GzipFile(fileobj=buf)
self.response_data = f.read()
else:
self.response_data = data
def __str__(self):
fmt = ("." + self.format) if self.format else ""
return (
"Twitter sent status %i for URL: %s%s using parameters: "
"(%s)\ndetails: %s" %(
self.e.code, self.uri, fmt, self.uriparts,
self.response_data))
class TwitterResponse(object):
"""
Response from a twitter request. Behaves like a list or a string
(depending on requested format) but it has a few other interesting
attributes.
`headers` gives you access to the response headers as an
httplib.HTTPHeaders instance. You can do
`response.headers.get('h')` to retrieve a header.
"""
def __init__(self, headers):
self.headers = headers
@property
def rate_limit_remaining(self):
"""
Remaining requests in the current rate-limit.
"""
return int(self.headers.get('X-Rate-Limit-Remaining', "0"))
@property
def rate_limit_limit(self):
"""
The rate limit ceiling for that given request.
"""
return int(self.headers.get('X-Rate-Limit-Limit', "0"))
@property
def rate_limit_reset(self):
"""
Time in UTC epoch seconds when the rate limit will reset.
"""
return int(self.headers.get('X-Rate-Limit-Reset', "0"))
def wrap_response(response, headers):
response_typ = type(response)
if response_typ is bool:
# HURF DURF MY NAME IS PYTHON AND I CAN'T SUBCLASS bool.
response_typ = int
elif response_typ is str:
return response
class WrappedTwitterResponse(response_typ, TwitterResponse):
__doc__ = TwitterResponse.__doc__
def __init__(self, response, headers):
response_typ.__init__(self, response)
TwitterResponse.__init__(self, headers)
def __new__(cls, response, headers):
return response_typ.__new__(cls, response)
return WrappedTwitterResponse(response, headers)
class TwitterCall(object):
def __init__(
self, auth, format, domain, callable_cls, uri="",
uriparts=None, secure=True):
self.auth = auth
self.format = format
self.domain = domain
self.callable_cls = callable_cls
self.uri = uri
self.uriparts = uriparts
self.secure = secure
def __getattr__(self, k):
try:
return object.__getattr__(self, k)
except AttributeError:
def extend_call(arg):
return self.callable_cls(
auth=self.auth, format=self.format, domain=self.domain,
callable_cls=self.callable_cls, uriparts=self.uriparts \
+ (arg,),
secure=self.secure)
if k == "_":
return extend_call
else:
return extend_call(k)
def __call__(self, **kwargs):
# Build the uri.
uriparts = []
for uripart in self.uriparts:
# If this part matches a keyword argument, use the
# supplied value otherwise, just use the part.
uriparts.append(str(kwargs.pop(uripart, uripart)))
uri = '/'.join(uriparts)
method = kwargs.pop('_method', None)
if not method:
method = "GET"
for action in POST_ACTIONS:
if re.search("%s(/\d+)?$" % action, uri):
method = "POST"
break
# If an id kwarg is present and there is no id to fill in in
# the list of uriparts, assume the id goes at the end.
id = kwargs.pop('id', None)
if id:
uri += "/%s" %(id)
# If an _id kwarg is present, this is treated as id as a CGI
# param.
_id = kwargs.pop('_id', None)
if _id:
kwargs['id'] = _id
# If an _timeout is specified in kwargs, use it
_timeout = kwargs.pop('_timeout', None)
secure_str = ''
if self.secure:
secure_str = 's'
dot = ""
if self.format:
dot = "."
uriBase = "http%s://%s/%s%s%s" %(
secure_str, self.domain, uri, dot, self.format)
headers = {'Accept-Encoding': 'gzip'}
if self.auth:
headers.update(self.auth.generate_headers())
arg_data = self.auth.encode_params(uriBase, method, kwargs)
if method == 'GET':
uriBase += '?' + arg_data
body = None
else:
body = arg_data.encode('utf8')
req = urllib_request.Request(uriBase, body, headers)
return self._handle_response(req, uri, arg_data, _timeout)
def _handle_response(self, req, uri, arg_data, _timeout=None):
kwargs = {}
if _timeout:
kwargs['timeout'] = _timeout
try:
handle = urllib_request.urlopen(req, **kwargs)
if handle.headers['Content-Type'] in ['image/jpeg', 'image/png']:
return handle
try:
data = handle.read()
except http_client.IncompleteRead as e:
# Even if we don't get all the bytes we should have there
# may be a complete response in e.partial
data = e.partial
if handle.info().get('Content-Encoding') == 'gzip':
# Handle gzip decompression
buf = StringIO(data)
f = gzip.GzipFile(fileobj=buf)
data = f.read()
if "json" == self.format:
res = json.loads(data.decode('utf8'))
return wrap_response(res, handle.headers)
else:
return wrap_response(
data.decode('utf8'), handle.headers)
except urllib_error.HTTPError as e:
if (e.code == 304):
return []
else:
raise TwitterHTTPError(e, uri, self.format, arg_data)
class Twitter(TwitterCall):
"""
The minimalist yet fully featured Twitter API class.
Get RESTful data by accessing members of this class. The result
is decoded python objects (lists and dicts).
The Twitter API is documented at:
http://dev.twitter.com/doc
Examples::
t = Twitter(
auth=OAuth(token, token_key, con_secret, con_secret_key)))
# Get your "home" timeline
t.statuses.home_timeline()
# Get a particular friend's timeline
t.statuses.friends_timeline(id="billybob")
# Also supported (but totally weird)
t.statuses.friends_timeline.billybob()
# Update your status
t.statuses.update(
status="Using @sixohsix's sweet Python Twitter Tools.")
# Send a direct message
t.direct_messages.new(
user="billybob",
text="I think yer swell!")
# Get the members of tamtar's list "Things That Are Rad"
t._("tamtar")._("things-that-are-rad").members()
# Note how the magic `_` method can be used to insert data
# into the middle of a call. You can also use replacement:
t.user.list.members(user="tamtar", list="things-that-are-rad")
# An *optional* `_timeout` parameter can also be used for API
# calls which take much more time than normal or twitter stops
# responding for some reasone
t.users.lookup(
screen_name=','.join(A_LIST_OF_100_SCREEN_NAMES), \
_timeout=1)
Searching Twitter::
# Search for the latest tweets about #pycon
t.search.tweets(q="#pycon")
Using the data returned
-----------------------
Twitter API calls return decoded JSON. This is converted into
a bunch of Python lists, dicts, ints, and strings. For example::
x = twitter.statuses.home_timeline()
# The first 'tweet' in the timeline
x[0]
# The screen name of the user who wrote the first 'tweet'
x[0]['user']['screen_name']
Getting raw XML data
--------------------
If you prefer to get your Twitter data in XML format, pass
format="xml" to the Twitter object when you instantiate it::
twitter = Twitter(format="xml")
The output will not be parsed in any way. It will be a raw string
of XML.
"""
def __init__(
self, format="json",
domain="api.twitter.com", secure=True, auth=None,
api_version=_DEFAULT):
"""
Create a new twitter API connector.
Pass an `auth` parameter to use the credentials of a specific
user. Generally you'll want to pass an `OAuth`
instance::
twitter = Twitter(auth=OAuth(
token, token_secret, consumer_key, consumer_secret))
`domain` lets you change the domain you are connecting. By
default it's `api.twitter.com` but `search.twitter.com` may be
useful too.
If `secure` is False you will connect with HTTP instead of
HTTPS.
`api_version` is used to set the base uri. By default it's
'1'. If you are using "search.twitter.com" set this to None.
"""
if not auth:
auth = NoAuth()
if (format not in ("json", "xml", "")):
raise ValueError("Unknown data format '%s'" %(format))
if api_version is _DEFAULT:
api_version = '1.1'
uriparts = ()
if api_version:
uriparts += (str(api_version),)
TwitterCall.__init__(
self, auth=auth, format=format, domain=domain,
callable_cls=TwitterCall,
secure=secure, uriparts=uriparts)
__all__ = ["Twitter", "TwitterError", "TwitterHTTPError", "TwitterResponse"]
|
{
"content_hash": "fc262bcb975d6cd730b07bccfbba961d",
"timestamp": "",
"source": "github",
"line_count": 365,
"max_line_length": 77,
"avg_line_length": 30.676712328767124,
"alnum_prop": 0.5648834509243548,
"repo_name": "atpaino/socialsonar",
"id": "0ebc814e1954d5881dfefbdf65393cbc77d9a5d2",
"size": "11197",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "twitter-1.10.0/build/lib.linux-x86_64-2.7/twitter/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8326"
}
],
"symlink_target": ""
}
|
import requests_unixsocket
import json
from utils.utils import Utils
u = Utils()
#https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/
class Ids():
def __init__(self):
self.base = "http+unix://%2Fvar%2Frun%2Fdocker.sock"
self.url = "/containers/json"
self.session = requests_unixsocket.Session()
try:
self.resp = self.session.get( self.base + self.url)
except Exception as ex:
template = "An exception of type {0} occured. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print message
def ids(self):
resp = self.resp
url = self.url
resp_status_code = resp.status_code
u.check_resp(resp_status_code, url)
for item in resp.json():
yield('{}'.format(item["Id"]))
|
{
"content_hash": "556cc96d6746d694e6c3fb3a5e06e507",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 76,
"avg_line_length": 29.333333333333332,
"alnum_prop": 0.5772727272727273,
"repo_name": "eon01/DoMonit",
"id": "54030b286ba9893a88cf285f929dc2441015852f",
"size": "880",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "domonit/ids.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "73392"
}
],
"symlink_target": ""
}
|
import re
from BeautifulSoup import BeautifulSoup
def YesNo(answer):
while (re.search(r'([Yy][Ee]*[Ss]*)|([Nn][Oo]*)', answer) is None):
answer = raw_input("Please Enter a Valid Answer: ")
if re.search(r'[Yy][Ee]*[Ss]*', answer):
return "Yes"
else:
return "No"
class ErrorCheck():
def obviousChecks(self, crn="", term="09", year="2013", subj="", crse=""):
errors = []
if crn: # If a CRN is given
if (len(crn) == 5 and crn.isdigit() is True):
pass
else:
errors.append("Please enter a valid CRN")
if subj: # If a Subject is given
if (subj.isalpha() is True):
pass
else:
errors.append("Please enter a valid Subject")
if crse: # If a Course Number is given
if (len(crse) == 4 and crse.isdigit() is True):
pass
else:
errors.append("Please enter a valid Course Number")
return errors
def harderChecks(self, crn="", term="09", year="2013", subj="", crse=""):
pass
class Cleaner():
data = ""
def __init__(self, contents):
soup = BeautifulSoup(contents)
self.data = soup.findAll("td", {"class":"pldefault"})
self.data = self.toList(self.data)
self.data = self.parseList(self.data)
self.data = self.pretty(self.data)
return None
def __str__(self):
return self.data
def toList(self, classinfo):
classinfo = str(classinfo)
classinfo = re.sub(r'( ){0,}', '', classinfo) # Replace with ''
classinfo = re.sub(r'(&)', '&', classinfo) # Replace & with &
classinfo = re.sub(r'[\[\]]', '', classinfo) # Replace [ and ] with ''
classinfo = re.sub(r'<.+?>', ' ', classinfo) # Replace html tags with ''
classinfo = classinfo.split(' , ') # Split the String Into a List
classinfo = [string.strip() for string in classinfo] # Remove White Space Characters from Each Element
classinfo = filter(None, classinfo) # Remove Empty Entries
return classinfo # Returns a List
def parseList(self, data):
all_sections = []
section_info = []
current_element = data.pop(0) # Let's pop the first element since this is the first CRN
section_info.append(current_element) # Add it to section info
while len(data) > 0: # While we have at least one element
current_element = data[0] # Let's look at the first element
if (len(current_element) == 5 and current_element.isdigit()): # If the element's length is 5 and is a digit, then we have a CRN number
all_sections.append(section_info)
section_info = []
current_element = data.pop(0) # Let's pop the first element
section_info.append(current_element) # and add it to section info
all_sections.append(section_info) # For the last section
return all_sections
def pretty(self, course_info):
string = ""
for course in course_info:
info = """
CRN: {}
Course: {}
Description: {}
Type: {}
Credit Hours: {}
Capacity: {}
Instructor: {}
Day(s): {}
Begin: {}
End: {}
Location: {}
Exam: {}
"""
if "* Additional Times *" in course_info:
info += """
{}
Day(s) {}
Begin: {}
End: {}
Location: {}
"""
string += info.format(*course)
return string
class CourseWatch():
pass
|
{
"content_hash": "636a4a7387cec9ff45290604c4b57adb",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 147,
"avg_line_length": 31.496124031007753,
"alnum_prop": 0.485601772089589,
"repo_name": "ajn123/VT-Python-Class-Add",
"id": "1a6bca0a8d3b5c75da29b863990b395120a9e11b",
"size": "4081",
"binary": false,
"copies": "1",
"ref": "refs/heads/testing",
"path": "Helper.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
'''OpenGL extension NV.float_buffer
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_NV_float_buffer'
_DEPRECATED = False
GL_FLOAT_R_NV = constant.Constant( 'GL_FLOAT_R_NV', 0x8880 )
GL_FLOAT_RG_NV = constant.Constant( 'GL_FLOAT_RG_NV', 0x8881 )
GL_FLOAT_RGB_NV = constant.Constant( 'GL_FLOAT_RGB_NV', 0x8882 )
GL_FLOAT_RGBA_NV = constant.Constant( 'GL_FLOAT_RGBA_NV', 0x8883 )
GL_FLOAT_R16_NV = constant.Constant( 'GL_FLOAT_R16_NV', 0x8884 )
GL_FLOAT_R32_NV = constant.Constant( 'GL_FLOAT_R32_NV', 0x8885 )
GL_FLOAT_RG16_NV = constant.Constant( 'GL_FLOAT_RG16_NV', 0x8886 )
GL_FLOAT_RG32_NV = constant.Constant( 'GL_FLOAT_RG32_NV', 0x8887 )
GL_FLOAT_RGB16_NV = constant.Constant( 'GL_FLOAT_RGB16_NV', 0x8888 )
GL_FLOAT_RGB32_NV = constant.Constant( 'GL_FLOAT_RGB32_NV', 0x8889 )
GL_FLOAT_RGBA16_NV = constant.Constant( 'GL_FLOAT_RGBA16_NV', 0x888A )
GL_FLOAT_RGBA32_NV = constant.Constant( 'GL_FLOAT_RGBA32_NV', 0x888B )
GL_TEXTURE_FLOAT_COMPONENTS_NV = constant.Constant( 'GL_TEXTURE_FLOAT_COMPONENTS_NV', 0x888C )
GL_FLOAT_CLEAR_COLOR_VALUE_NV = constant.Constant( 'GL_FLOAT_CLEAR_COLOR_VALUE_NV', 0x888D )
glget.addGLGetConstant( GL_FLOAT_CLEAR_COLOR_VALUE_NV, (4,) )
GL_FLOAT_RGBA_MODE_NV = constant.Constant( 'GL_FLOAT_RGBA_MODE_NV', 0x888E )
glget.addGLGetConstant( GL_FLOAT_RGBA_MODE_NV, (1,) )
def glInitFloatBufferNV():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
|
{
"content_hash": "662fa895180332b0e68c942d4685fdc4",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 94,
"avg_line_length": 51.09375,
"alnum_prop": 0.7412844036697248,
"repo_name": "Universal-Model-Converter/UMC3.0a",
"id": "49a8cae34454c257f6e849b62b7db00eb679107a",
"size": "1635",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "data/Python/x86/Lib/site-packages/OpenGL/raw/GL/NV/float_buffer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "226"
},
{
"name": "C",
"bytes": "1082640"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "3621086"
},
{
"name": "CSS",
"bytes": "6226"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "FORTRAN",
"bytes": "7795"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "Groff",
"bytes": "5943"
},
{
"name": "HTML",
"bytes": "1196266"
},
{
"name": "Java",
"bytes": "5793"
},
{
"name": "Makefile",
"bytes": "1109"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "33351557"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Shell",
"bytes": "6931"
},
{
"name": "Tcl",
"bytes": "2084458"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
}
|
"""
Cocktail Sort
-------------
A bidirectional bubble sort. Walks the elements bidirectionally, swapping
neighbors if one should come before/after the other.
Time Complexity: O(n**2)
Space Complexity: O(1) Auxiliary
Stable: Yes
Psuedo Code: http://en.wikipedia.org/wiki/Cocktail_sort
"""
def sort(seq):
"""
Takes a list of integers and sorts them in ascending order. This sorted
list is then returned.
:param seq: A list of integers
:rtype: A list of sorted integers
"""
lower_bound = -1
upper_bound = len(seq) - 1
swapped = True
while swapped:
swapped = False
lower_bound += 1
for i in range(lower_bound, upper_bound):
if seq[i] > seq[i + 1]:
seq[i], seq[i + 1] = seq[i + 1], seq[i]
swapped = True
if not swapped:
break
swapped = False
upper_bound -= 1
for i in range(upper_bound, lower_bound, -1):
if seq[i] < seq[i - 1]:
seq[i], seq[i - 1] = seq[i - 1], seq[i]
swapped = True
return seq
|
{
"content_hash": "4c1c28fadb73f54ab7b1fc348424ec8d",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 77,
"avg_line_length": 25.155555555555555,
"alnum_prop": 0.5432862190812721,
"repo_name": "rexshihaoren/algorithms",
"id": "635da03a1c8ab82f8ef890ef90c2844729e54f54",
"size": "1132",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "algorithms/sorting/cocktail_sort.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "109247"
}
],
"symlink_target": ""
}
|
import logging
from dplace_app.models import Variable, Category, CodeDescription
from sources import get_source
def load_vars(repos):
categories, count = {}, 0
for ds in repos.datasets:
for var in ds.variables:
count += load_var(ds, var, categories)
return count
def load_var(ds, var, categories):
variable = Variable.objects.create(
name=var.title,
type=ds.type,
codebook_info=var.definition,
data_type=var.type,
units=var.units,
label=var.id,
source=get_source(ds))
for c in var.category:
index_category = categories.get((ds.type, c))
if not index_category:
index_category = categories[(ds.type, c)] = Category.objects.create(
name=c, type=ds.type)
logging.info("Created %s category: %s" % (ds.type, c))
if index_category not in variable.index_categories.all():
variable.index_categories.add(index_category)
for code in var.codes:
code_description, created = CodeDescription.objects.get_or_create(
variable=variable, code=code.code)
code_description.description = code.description
code_description.short_description = code.name
code_description.save()
logging.info(
("Created CulturalCodeDescription: %s" % code_description).decode('utf8'))
variable.save()
logging.info("Created Variable: %s" % variable.label)
return 1
|
{
"content_hash": "a3f91f62f1f7c96798226837de9ee79d",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 86,
"avg_line_length": 32.23913043478261,
"alnum_prop": 0.6331759946055293,
"repo_name": "stefelisabeth/dplace",
"id": "729d6b1fdf0210a1a4f9fa67f8e45da947436ace",
"size": "1507",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dplace_app/loader/variables.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10863"
},
{
"name": "HTML",
"bytes": "149761"
},
{
"name": "JavaScript",
"bytes": "156539"
},
{
"name": "Makefile",
"bytes": "189"
},
{
"name": "Python",
"bytes": "124392"
},
{
"name": "Shell",
"bytes": "826"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import weakref
import six
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras import combinations
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import input_layer
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.optimizer_v2 import adam
from tensorflow.python.module import module
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import template
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import training_util
from tensorflow.python.training.tracking import graph_view
from tensorflow.python.training.tracking import util as trackable_utils
# pylint: disable=not-callable
class MyModel(training.Model):
"""A concrete Model for testing."""
def __init__(self):
super(MyModel, self).__init__()
self._named_dense = core.Dense(1, use_bias=True)
self._second = core.Dense(1, use_bias=False)
# We can still track Trackables which aren't Layers.
self._non_layer = NonLayerTrackable()
def call(self, values):
ret = self._second(self._named_dense(values))
return ret
class NonLayerTrackable(module.Module):
def __init__(self):
super(NonLayerTrackable, self).__init__()
self.a_variable = trackable_utils.add_variable(
self, name="a_variable", shape=[])
class InterfaceTests(test.TestCase):
def testLayerDeduplication(self):
model = training.Model()
layer_one = core.Dense(1)
layer_two = core.Dense(1)
model.other_path = [layer_one, layer_two]
model.l2 = layer_two
model.l1 = layer_one
self.assertEqual([layer_one, layer_two], model.layers)
def testSaveWithOnlyKerasSession(self):
with ops.Graph().as_default(), self.cached_session():
inp = input_layer.Input([1])
dense = core.Dense(1)(inp)
model = training.Model(inp, dense)
model.compile(optimizer="sgd", loss="mse")
model.fit([1.], [2.])
checkpoint = trackable_utils.Checkpoint(model=model)
checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
def testObjectMetadata(self):
if not context.executing_eagerly():
self.skipTest("Run in eager mode only.")
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
dense = core.Dense(1)
checkpoint = trackable_utils.Checkpoint(dense=dense)
dense(constant_op.constant([[1.]]))
save_path = checkpoint.save(checkpoint_prefix)
objects = trackable_utils.object_metadata(save_path)
all_variable_names = []
for obj in objects.nodes:
for attribute in obj.attributes:
all_variable_names.append(attribute.full_name)
self.assertIn("dense/kernel", all_variable_names)
class CheckpointingTests(keras_parameterized.TestCase):
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testNamingWithOptimizer(self):
input_value = constant_op.constant([[3.]])
model = MyModel()
# A nuisance Model using the same optimizer. Its slot variables should not
# go in the checkpoint, since it is never depended on.
other_model = MyModel()
optimizer = adam.Adam(0.001)
step = training_util.get_or_create_global_step()
root_trackable = trackable_utils.Checkpoint(
optimizer=optimizer, model=model, step=step)
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
train_op = control_flow_ops.group(
optimizer.apply_gradients(zip(gradients, variables)),
step.assign_add(1))
with backprop.GradientTape() as tape:
loss = other_model(input_value)
variables = other_model.trainable_variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
self.evaluate(trackable_utils.gather_initializers(
root_trackable))
self.evaluate(train_op)
named_variables, serialized_graph, _ = graph_view.ObjectGraphView(
root_trackable).serialize_object_graph()
expected_slot_keys = (
"model/_second/kernel/.OPTIMIZER_SLOT/optimizer/m",
"model/_second/kernel/.OPTIMIZER_SLOT/optimizer/v",
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/m",
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/v",
"model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/m",
"model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/v",
)
expected_checkpoint_names = (
# Created in the root node, so no prefix.
"step",
"model/_second/kernel",
"model/_named_dense/kernel",
"model/_named_dense/bias",
# non-Layer dependency of the model
"model/_non_layer/a_variable",
"optimizer/learning_rate",
"optimizer/beta_1",
"optimizer/beta_2",
"optimizer/iter",
"optimizer/decay",
) + expected_slot_keys
suffix = "/.ATTRIBUTES/VARIABLE_VALUE"
expected_checkpoint_names = [
name + suffix for name in expected_checkpoint_names]
named_variables = {v.name: v for v in named_variables}
six.assertCountEqual(self, expected_checkpoint_names,
named_variables.keys())
# Check that we've mapped to the right variable objects (not exhaustive)
self.assertEqual(
"global_step",
named_variables["step" + suffix].full_name)
self.assertEqual(
"my_model/dense_1/kernel",
named_variables["model/_second/kernel" + suffix].full_name)
self.assertEqual(
"my_model/dense/kernel",
named_variables["model/_named_dense/kernel" + suffix].full_name)
self.assertEqual("Adam/beta_1",
named_variables["optimizer/beta_1" + suffix].full_name)
self.assertEqual("Adam/beta_2",
named_variables["optimizer/beta_2" + suffix].full_name)
# Spot check the generated protocol buffers.
self.assertEqual("optimizer",
serialized_graph.nodes[0].children[1].local_name)
optimizer_node = serialized_graph.nodes[
serialized_graph.nodes[0].children[1].node_id]
children = [node.local_name for node in optimizer_node.children]
six.assertCountEqual(
self,
# hyper variable dependencies
["beta_1", "beta_2", "iter", "decay", "learning_rate"],
children)
serialized_slot_keys = []
for slot in optimizer_node.slot_variables:
for attribute in (
serialized_graph.nodes[slot.slot_variable_node_id].attributes):
serialized_slot_keys.append(attribute.checkpoint_key)
six.assertCountEqual(
self,
[key + suffix for key in expected_slot_keys],
serialized_slot_keys)
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testSaveRestore(self):
with self.test_session():
model = MyModel()
optimizer = adam.Adam(0.001)
root_trackable = trackable_utils.Checkpoint(
optimizer=optimizer, model=model)
input_value = constant_op.constant([[3.]])
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
train_op = optimizer.apply_gradients(zip(gradients, variables))
self.assertFalse(root_trackable.save_counter.trainable)
self.evaluate(trackable_utils.gather_initializers(
root_trackable))
self.evaluate(train_op)
prefix = os.path.join(self.get_temp_dir(), "ckpt")
self.evaluate(state_ops.assign(model._named_dense.variables[1], [42.]))
m_bias_slot = optimizer.get_slot(model._named_dense.variables[1], "m")
self.evaluate(state_ops.assign(m_bias_slot, [1.5]))
save_path = root_trackable.save(file_prefix=prefix)
self.evaluate(state_ops.assign(model._named_dense.variables[1], [43.]))
self.evaluate(state_ops.assign(root_trackable.save_counter, 3))
optimizer_variables = self.evaluate(
sorted(optimizer.variables(), key=lambda v: v.name))
self.evaluate(state_ops.assign(m_bias_slot, [-2.]))
# Immediate restoration
status = root_trackable.restore(save_path=save_path).assert_consumed()
status.run_restore_ops()
self.assertAllEqual([42.], self.evaluate(model._named_dense.variables[1]))
self.assertAllEqual(1, self.evaluate(root_trackable.save_counter))
self.assertAllEqual([1.5], self.evaluate(m_bias_slot))
if not context.executing_eagerly():
return # Restore-on-create is only supported when executing eagerly
on_create_model = MyModel()
on_create_optimizer = adam.Adam(0.001)
on_create_root = trackable_utils.Checkpoint(
optimizer=on_create_optimizer, model=on_create_model)
# Deferred restoration
status = on_create_root.restore(save_path=save_path)
status.assert_nontrivial_match()
status.assert_existing_objects_matched()
with self.assertRaises(AssertionError):
status.assert_consumed()
on_create_model(constant_op.constant([[3.]])) # create variables
self.assertAllEqual(1, self.evaluate(on_create_root.save_counter))
self.assertAllEqual([42.],
self.evaluate(
on_create_model._named_dense.variables[1]))
on_create_m_bias_slot = on_create_optimizer.get_slot(
on_create_model._named_dense.variables[1], "m")
status.assert_existing_objects_matched()
if not context.executing_eagerly():
with self.assertRaises(AssertionError):
status.assert_consumed()
# Optimizer slot variables are created when the original variable is
# restored.
self.assertAllEqual([1.5], self.evaluate(on_create_m_bias_slot))
dummy_var = resource_variable_ops.ResourceVariable([1.])
on_create_optimizer.minimize(loss=dummy_var.read_value,
var_list=[dummy_var])
status.assert_existing_objects_matched()
status.assert_consumed()
self.assertAllEqual(
optimizer_variables,
# Creation order is different, so .variables() needs to be re-sorted.
self.evaluate(sorted(optimizer.variables(), key=lambda v: v.name)))
# TODO(allenl): Debug garbage created by this test in python3.
def testDeferredRestorationUsageEager(self):
"""An idiomatic eager execution example."""
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
model = MyModel()
optimizer = adam.Adam(0.001)
root = trackable_utils.Checkpoint(
optimizer=optimizer, model=model)
root.restore(checkpoint_management.latest_checkpoint(
checkpoint_directory))
for _ in range(num_training_steps):
# TODO(allenl): Use a Dataset and serialize/checkpoint it.
input_value = constant_op.constant([[3.]])
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
root.optimizer.iterations.numpy())
def testUsageGraph(self):
"""Expected usage when graph building."""
with context.graph_mode():
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with ops.Graph().as_default():
model = MyModel()
optimizer = adam.Adam(0.001)
root = trackable_utils.CheckpointV1(
optimizer=optimizer, model=model)
input_value = constant_op.constant([[3.]])
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
train_op = optimizer.apply_gradients(zip(gradients, variables))
checkpoint_path = checkpoint_management.latest_checkpoint(
checkpoint_directory)
with self.session(graph=ops.get_default_graph()) as session:
status = root.restore(save_path=checkpoint_path)
status.initialize_or_restore(session=session)
if checkpoint_path is None:
self.assertEqual(0, training_continuation)
with self.assertRaises(AssertionError):
status.assert_consumed()
with self.assertRaises(AssertionError):
status.assert_existing_objects_matched()
else:
status.assert_consumed()
status.assert_existing_objects_matched()
for _ in range(num_training_steps):
session.run(train_op)
root.save(file_prefix=checkpoint_prefix, session=session)
self.assertEqual((training_continuation + 1) * num_training_steps,
session.run(root.optimizer.iterations))
self.assertEqual(training_continuation + 1,
session.run(root.save_counter))
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testAgnosticUsage(self):
"""Graph/eager agnostic usage."""
# Does create garbage when executing eagerly due to ops.Graph() creation.
with self.test_session():
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
optimizer = adam.Adam(0.001)
def _train_fn(model, input_value):
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
return optimizer.apply_gradients(zip(gradients, variables))
for training_continuation in range(3):
with testing_utils.device(should_use_gpu=True):
model = MyModel()
root = trackable_utils.Checkpoint(
optimizer=optimizer, model=model)
manager = checkpoint_management.CheckpointManager(
root, checkpoint_directory, max_to_keep=1)
status = root.restore(save_path=manager.latest_checkpoint)
input_value = constant_op.constant([[3.]])
train_fn = functools.partial(_train_fn, model, input_value)
if not context.executing_eagerly():
train_fn = functools.partial(self.evaluate, train_fn())
status.initialize_or_restore()
for _ in range(num_training_steps):
train_fn()
manager.save()
self.assertEqual((training_continuation + 1) * num_training_steps,
self.evaluate(root.optimizer.iterations))
self.assertEqual(training_continuation + 1,
self.evaluate(root.save_counter))
@combinations.generate(combinations.combine(mode=["eager"]))
def testPartialRestoreWarningObject(self):
optimizer = adam.Adam(0.0)
original_root = trackable_utils.Checkpoint(v1=variables_lib.Variable(2.),
v2=variables_lib.Variable(3.),
optimizer=optimizer)
# Create a slot variable to save
optimizer.minimize(original_root.v1.read_value, [original_root.v1])
prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_path = original_root.save(prefix)
partial_root = trackable_utils.Checkpoint(v1=variables_lib.Variable(0.))
weak_partial_root = weakref.ref(partial_root)
weak_v1 = weakref.ref(partial_root.v1)
partial_root.restore(save_path)
self.assertEqual(2., partial_root.v1.numpy())
with test.mock.patch.object(logging, "warning") as mock_log:
del partial_root
self.assertIsNone(weak_partial_root())
self.assertIsNone(weak_v1())
messages = str(mock_log.call_args_list)
self.assertIn("(root).v2'", messages)
self.assertIn("(root).optimizer's state 'm' for (root).v1", messages)
self.assertNotIn("(root).v1'", messages)
self.assertIn("expect_partial()", messages)
# pylint: disable=cell-var-from-loop
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testWithDefun(self):
with self.test_session():
num_training_steps = 2
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with testing_utils.device(should_use_gpu=True):
model = MyModel()
# Don't actually train so we can test variable values
optimizer = adam.Adam(0.)
root = trackable_utils.Checkpoint(
optimizer=optimizer, model=model)
checkpoint_path = checkpoint_management.latest_checkpoint(
checkpoint_directory)
status = root.restore(save_path=checkpoint_path)
def train_fn():
@def_function.function
def _call_model(x):
return model(x)
with backprop.GradientTape() as tape:
loss = _call_model(constant_op.constant([[3.]]))
gradients = tape.gradient(loss, model.variables)
return optimizer.apply_gradients(zip(gradients, model.variables))
if not context.executing_eagerly():
train_fn = functools.partial(
self.evaluate, train_fn())
status.initialize_or_restore()
for _ in range(num_training_steps):
train_fn()
if training_continuation > 0:
status.assert_consumed()
self.assertAllClose([[42.]], self.evaluate(model.variables[0]))
else:
self.evaluate(model.variables[0].assign([[42.]]))
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
self.evaluate(optimizer.iterations))
self.assertEqual(training_continuation + 1,
self.evaluate(root.save_counter))
# pylint: enable=cell-var-from-loop
@combinations.generate(combinations.combine(mode=["eager"]))
def testAnonymousVarsInInit(self):
class Model(training.Model):
def __init__(self):
super(Model, self).__init__()
self.w = resource_variable_ops.ResourceVariable(0.0)
self.b = resource_variable_ops.ResourceVariable(0.0)
self.vars = [self.w, self.b]
def call(self, x):
return x * self.w + self.b
model = Model()
optimizer = adam.Adam(learning_rate=0.05)
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
checkpoint = trackable_utils.Checkpoint(
model=model, optimizer=optimizer)
for _ in range(2):
checkpoint.save(checkpoint_prefix)
with backprop.GradientTape() as tape:
loss = (constant_op.constant(1.)
- model(constant_op.constant(1.))) ** 2
grad = tape.gradient(loss, model.vars)
optimizer.apply_gradients(
[(g, v) for g, v in zip(grad, model.vars)])
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testDeferredSlotRestoration(self):
with self.test_session():
checkpoint_directory = self.get_temp_dir()
root = trackable_utils.Checkpoint()
root.var = trackable_utils.add_variable(
root, name="var", initializer=0.)
optimizer = adam.Adam(0.1)
variables = [root.var]
gradients = [1.]
train_op = optimizer.apply_gradients(zip(gradients, variables))
# Note that `optimizer` has not been added as a dependency of
# `root`. Create a one-off grouping so that slot variables for `root.var`
# get initialized too.
self.evaluate(trackable_utils.gather_initializers(
trackable_utils.Checkpoint(root=root, optimizer=optimizer)))
self.evaluate(train_op)
self.evaluate(state_ops.assign(root.var, 12.))
no_slots_path = root.save(os.path.join(checkpoint_directory, "no_slots"))
root.optimizer = optimizer
self.evaluate(state_ops.assign(root.var, 13.))
self.evaluate(state_ops.assign(
optimizer.get_slot(slot_name="m", var=root.var),
14.))
slots_path = root.save(os.path.join(checkpoint_directory, "with_slots"))
new_root = trackable_utils.Checkpoint()
# Load the slot-containing checkpoint (deferred), then immediately
# overwrite the non-slot variable (also deferred).
slot_status = new_root.restore(slots_path)
no_slot_status = new_root.restore(no_slots_path)
with self.assertRaises(AssertionError):
no_slot_status.assert_consumed()
new_root.var = trackable_utils.add_variable(
new_root, name="var", shape=[])
no_slot_status.assert_consumed()
no_slot_status.run_restore_ops()
self.assertEqual(12., self.evaluate(new_root.var))
new_root.optimizer = adam.Adam(0.1)
slot_status.assert_existing_objects_matched()
if not context.executing_eagerly():
with self.assertRaisesRegex(AssertionError, "Unresolved object"):
slot_status.assert_consumed()
self.assertEqual(12., self.evaluate(new_root.var))
if context.executing_eagerly():
# Slot variables are only created with restoring initializers when
# executing eagerly.
self.assertEqual(14., self.evaluate(
new_root.optimizer.get_slot(slot_name="m", var=new_root.var)))
else:
# Slot variables are not created eagerly when graph building.
with self.assertRaises(KeyError):
new_root.optimizer.get_slot(slot_name="m", var=new_root.var)
variables = [new_root.var]
gradients = [1.]
train_op = new_root.optimizer.apply_gradients(zip(gradients, variables))
# The slot variable now exists; restore() didn't create it, but we should
# now have a restore op for it.
slot_status.run_restore_ops()
if not context.executing_eagerly():
# The train op hasn't run when graph building, so the slot variable has
# its restored value. It has run in eager, so the value will
# be different.
self.assertEqual(14., self.evaluate(
new_root.optimizer.get_slot(slot_name="m", var=new_root.var)))
self.evaluate(train_op)
slot_status.assert_consumed()
def testManySavesGraph(self):
"""Saves after the first should not modify the graph."""
with context.graph_mode():
graph = ops.Graph()
with graph.as_default(), self.session(graph):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = trackable_utils.Checkpoint()
obj.var = variables_lib.Variable(0., name="v")
obj.opt = adam.Adam(0.1)
variables = [obj.var]
gradients = [1.]
obj.opt.apply_gradients(zip(gradients, variables))
self.evaluate(trackable_utils.gather_initializers(obj))
obj.save(checkpoint_prefix)
graph.finalize()
obj.save(checkpoint_prefix)
def testManyRestoresGraph(self):
"""Restores after the first should not modify the graph."""
with context.graph_mode():
graph = ops.Graph()
with graph.as_default(), self.session(graph):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = trackable_utils.Checkpoint()
obj.var = variables_lib.Variable(0., name="v")
obj.opt = adam.Adam(0.1)
variables = [obj.var]
gradients = [1.]
obj.opt.apply_gradients(zip(gradients, variables))
self.evaluate(trackable_utils.gather_initializers(obj))
save_path = obj.save(checkpoint_prefix)
obj.restore(save_path)
graph.finalize()
obj.restore(save_path)
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def test_sequential(self):
with self.test_session():
model = sequential.Sequential()
checkpoint = trackable_utils.Checkpoint(model=model)
model.add(core.Dense(4))
second_dense = core.Dense(5)
model.add(second_dense)
model(constant_op.constant([[1.]]))
checkpoint.restore(None).initialize_or_restore()
self.evaluate(second_dense.bias.assign(
constant_op.constant([1., 2., 3., 4., 5.])))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = checkpoint.save(checkpoint_prefix)
self.evaluate(second_dense.bias.assign(
constant_op.constant([5., 6., 7., 8., 9.])))
checkpoint.restore(save_path).assert_consumed().run_restore_ops()
self.assertAllEqual([1., 2., 3., 4., 5.],
self.evaluate(second_dense.bias))
deferred_sequential = sequential.Sequential()
deferred_sequential_checkpoint = trackable_utils.Checkpoint(
model=deferred_sequential)
status = deferred_sequential_checkpoint.restore(save_path)
deferred_sequential.add(core.Dense(4))
deferred_second_dense = core.Dense(5)
deferred_sequential.add(deferred_second_dense)
deferred_sequential(constant_op.constant([[1.]]))
status.run_restore_ops()
self.assertAllEqual([1., 2., 3., 4., 5.],
self.evaluate(deferred_second_dense.bias))
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def test_initialize_if_not_restoring(self):
with self.test_session():
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
optimizer_only_prefix = os.path.join(checkpoint_directory, "opt")
with testing_utils.device(should_use_gpu=True):
model = MyModel()
optimizer = adam.Adam(0.001)
root = trackable_utils.Checkpoint(
model=model) # Do not save the optimizer with the checkpoint.
optimizer_checkpoint = trackable_utils.Checkpoint(
optimizer=optimizer)
checkpoint_path = checkpoint_management.latest_checkpoint(
checkpoint_directory)
status = root.restore(save_path=checkpoint_path)
input_value = constant_op.constant([[3.]])
def train_fn():
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
return optimizer.apply_gradients(zip(gradients, variables))
if not context.executing_eagerly():
train_fn = functools.partial(self.evaluate, train_fn())
status.initialize_or_restore()
# TODO(tanzheny): Add hyper variables to .variables(), and set them with
# set_weights etc.
variables_not_in_the_variables_property = [
obj for obj in optimizer._hyper.values()
if isinstance(obj, variables_lib.Variable)]
self.evaluate([v.initializer for v
in optimizer.variables()
+ variables_not_in_the_variables_property])
train_fn()
model_save_path = root.save(file_prefix=checkpoint_prefix)
self.evaluate(optimizer.beta_1.assign(42.))
optimizer_save_path = optimizer_checkpoint.save(optimizer_only_prefix)
del train_fn
# Restore into a graph with the optimizer
with testing_utils.device(should_use_gpu=True):
model = MyModel()
optimizer = adam.Adam(0.001)
root = trackable_utils.Checkpoint(
optimizer=optimizer, model=model)
status = root.restore(save_path=model_save_path)
input_value = constant_op.constant([[3.]])
def train_fn1():
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
return optimizer.apply_gradients(zip(gradients, variables))
if not context.executing_eagerly():
train_fn1 = functools.partial(self.evaluate, train_fn1())
status.initialize_or_restore()
train_fn1()
with self.assertRaises(AssertionError):
status.assert_existing_objects_matched()
with self.assertRaises(AssertionError):
status.assert_consumed()
del train_fn1
# Make sure initialization doesn't clobber later restores
with testing_utils.device(should_use_gpu=True):
model = MyModel()
optimizer = adam.Adam(0.001, beta_1=1.0)
root = trackable_utils.Checkpoint(
optimizer=optimizer, model=model)
opt_root = trackable_utils.Checkpoint(
optimizer=optimizer)
status = root.restore(save_path=model_save_path)
init_only_optimizer_status = opt_root.restore(save_path=None)
optimizer_status = opt_root.restore(save_path=optimizer_save_path)
input_value = constant_op.constant([[3.]])
def train_fn2():
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
return optimizer.apply_gradients(zip(gradients, variables))
if not context.executing_eagerly():
train_fn2 = functools.partial(self.evaluate, train_fn2())
optimizer_status.run_restore_ops()
status.initialize_or_restore()
init_only_optimizer_status.initialize_or_restore()
train_fn2()
self.assertEqual(42., self.evaluate(optimizer.beta_1))
class _ManualScope(module.Module):
def __call__(self):
with variable_scope.variable_scope("ManualScope") as vs:
self.variable_scope = vs
with trackable_utils.capture_dependencies(template=self):
return self._build()
def _build(self):
return variable_scope.get_variable(name="in_manual_scope", shape=[])
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
class TemplateTests(keras_parameterized.TestCase):
def test_trackable_save_restore(self):
with self.test_session():
def _templated():
v = variable_scope.get_variable(
"v", shape=[1], initializer=init_ops.zeros_initializer(),
use_resource=True)
v2 = variable_scope.get_variable(
"v2", shape=[1], initializer=init_ops.zeros_initializer(),
use_resource=True)
manual = _ManualScope()
return v, v + 1., v2, manual, manual()
save_template = template.make_template("s1", _templated)
v1_save, _, v2_save, manual_scope, manual_scope_v = save_template()
six.assertCountEqual(
self,
[id(v1_save), id(v2_save), id(manual_scope),
id(manual_scope_v), id(save_template)],
map(id, trackable_utils.list_objects(save_template)))
manual_dep, = manual_scope._checkpoint_dependencies
self.assertEqual("in_manual_scope", manual_dep.name)
self.assertIs(manual_scope_v, manual_dep.ref)
optimizer = adam.Adam(0.0)
save_root = trackable_utils.Checkpoint(
my_template=save_template, optimizer=optimizer)
optimizer.minimize(v1_save.read_value,
var_list=[v1_save])
self.evaluate([v.initializer for v in save_template.variables])
optimizer_variables = optimizer.variables() + list(
optimizer._hyper.values())
self.evaluate([v.initializer for v in optimizer_variables])
self.evaluate(v1_save.assign([12.]))
self.evaluate(v2_save.assign([14.]))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = save_root.save(checkpoint_prefix)
load_template = template.make_template("s2", _templated)
load_optimizer = adam.Adam(0.0)
load_root = trackable_utils.Checkpoint(
my_template=load_template, optimizer=load_optimizer)
status = load_root.restore(save_path)
var, var_plus_one, var2, _, _ = load_template()
load_optimizer.minimize(var.read_value, var_list=[var])
self.assertLen(load_template._checkpoint_dependencies, 3)
self.assertEqual("v", load_template._checkpoint_dependencies[0].name)
self.assertEqual("v2", load_template._checkpoint_dependencies[1].name)
self.assertEqual("ManualScope",
load_template._checkpoint_dependencies[2].name)
status.assert_consumed().run_restore_ops()
self.assertAllEqual([12.], self.evaluate(var))
self.assertAllEqual([13.], self.evaluate(var_plus_one))
self.assertAllEqual([14.], self.evaluate(var2))
class CheckpointCompatibilityTests(keras_parameterized.TestCase):
def _initialized_model(self):
input_value = constant_op.constant([[3.]])
model = MyModel()
optimizer = adam.Adam(0.001)
root_trackable = trackable_utils.Checkpoint(
optimizer=optimizer, model=model)
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
train_op = optimizer.apply_gradients(zip(gradients, variables))
self.evaluate(trackable_utils.gather_initializers(
root_trackable))
self.evaluate(train_op)
# A regular variable, a slot variable, and a non-slot Optimizer variable
# with known values to check when loading.
self.evaluate(model._named_dense.bias.assign([1.]))
self.evaluate(optimizer.get_slot(
var=model._named_dense.bias, slot_name="m").assign([2.]))
self.evaluate(optimizer.beta_1.assign(3.))
return root_trackable
def _set_sentinels(self, root_trackable):
self.evaluate(root_trackable.model._named_dense.bias.assign([101.]))
self.evaluate(
root_trackable.optimizer.get_slot(
var=root_trackable.model._named_dense.bias, slot_name="m")
.assign([102.]))
self.evaluate(root_trackable.optimizer.beta_1.assign(103.))
def _check_sentinels(self, root_trackable):
self.assertAllEqual(
[1.], self.evaluate(root_trackable.model._named_dense.bias))
self.assertAllEqual([2.], self.evaluate(
root_trackable.optimizer.get_slot(
var=root_trackable.model._named_dense.bias, slot_name="m")))
self.assertAllEqual(3.,
self.evaluate(root_trackable.optimizer.beta_1))
def _write_name_based_checkpoint(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.session(
graph=save_graph) as session:
root = self._initialized_model()
name_saver = saver_lib.Saver()
return name_saver.save(
sess=session,
save_path=checkpoint_prefix,
global_step=root.optimizer.iterations)
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testLoadFromNameBasedSaver(self):
"""Save a name-based checkpoint, load it using the object-based API."""
with testing_utils.device(should_use_gpu=True):
with self.test_session():
save_path = self._write_name_based_checkpoint()
root = self._initialized_model()
self._set_sentinels(root)
with self.assertRaises(AssertionError):
self._check_sentinels(root)
object_saver = trackable_utils.TrackableSaver(
graph_view.ObjectGraphView(root))
self._set_sentinels(root)
status = object_saver.restore(save_path)
if context.executing_eagerly():
self._check_sentinels(root)
if context.executing_eagerly():
status.assert_consumed()
status.assert_existing_objects_matched()
status.assert_nontrivial_match()
else:
# When graph building, we haven't read any keys, so we don't know
# whether the restore will be complete.
with self.assertRaisesRegex(AssertionError, "not restored"):
status.assert_consumed()
with self.assertRaisesRegex(AssertionError, "not restored"):
status.assert_existing_objects_matched()
with self.assertRaisesRegex(AssertionError, "not restored"):
status.assert_nontrivial_match()
status.run_restore_ops()
self._check_sentinels(root)
self._set_sentinels(root)
status = object_saver.restore(save_path)
status.initialize_or_restore()
status.assert_nontrivial_match()
self._check_sentinels(root)
# Check that there is no error when keys are missing from the name-based
# checkpoint.
root.not_in_name_checkpoint = resource_variable_ops.ResourceVariable(
[1.])
status = object_saver.restore(save_path)
with self.assertRaises(AssertionError):
status.assert_existing_objects_matched()
def testSaveGraphLoadEager(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.session(
graph=save_graph):
root = self._initialized_model()
save_path = root.save(file_prefix=checkpoint_prefix)
with context.eager_mode():
root = self._initialized_model()
self._set_sentinels(root)
root.restore(save_path).assert_consumed()
self._check_sentinels(root)
def testSaveEagerLoadGraph(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.eager_mode():
root = self._initialized_model()
save_path = root.save(file_prefix=checkpoint_prefix)
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.session(
graph=save_graph):
root = self._initialized_model()
self._set_sentinels(root)
root.restore(save_path).assert_consumed().run_restore_ops()
self._check_sentinels(root)
def testIgnoreSaveCounter(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with self.cached_session() as session:
# Create and save a model using Saver() before using a Checkpoint. This
# generates a snapshot without the Checkpoint's `save_counter`.
model = sequential.Sequential()
model.add(core.Flatten(input_shape=(1,)))
model.add(core.Dense(1))
name_saver = saver_lib.Saver(model.trainable_variables)
save_path = name_saver.save(
sess=session, save_path=checkpoint_prefix, global_step=1)
# Checkpoint.restore must successfully load that checkpoint.
ckpt = trackable_utils.Checkpoint(model=model)
status = ckpt.restore(save_path)
status.assert_existing_objects_matched()
# It should, however, refuse to load a checkpoint where an unrelated
# `save_counter` variable is missing.
model.layers[1].var = variables_lib.Variable(0., name="save_counter")
status = ckpt.restore(save_path)
with self.assertRaises(AssertionError):
status.assert_existing_objects_matched()
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()
|
{
"content_hash": "9a93de68c0a7ee51526e157c0cf8dbd2",
"timestamp": "",
"source": "github",
"line_count": 925,
"max_line_length": 80,
"avg_line_length": 43.97081081081081,
"alnum_prop": 0.6565289012366926,
"repo_name": "freedomtan/tensorflow",
"id": "ed0bb17adbd523bd6b299e78473d0584cf5ed1a0",
"size": "41362",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/tests/tracking_util_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "32479"
},
{
"name": "Batchfile",
"bytes": "38366"
},
{
"name": "C",
"bytes": "1035837"
},
{
"name": "C#",
"bytes": "13395"
},
{
"name": "C++",
"bytes": "99324075"
},
{
"name": "CMake",
"bytes": "107781"
},
{
"name": "Dockerfile",
"bytes": "283435"
},
{
"name": "Go",
"bytes": "2013128"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "928595"
},
{
"name": "Jupyter Notebook",
"bytes": "981916"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "4489624"
},
{
"name": "Makefile",
"bytes": "97500"
},
{
"name": "NASL",
"bytes": "8048"
},
{
"name": "Objective-C",
"bytes": "141623"
},
{
"name": "Objective-C++",
"bytes": "360423"
},
{
"name": "PHP",
"bytes": "20570"
},
{
"name": "Pawn",
"bytes": "32277"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "42762396"
},
{
"name": "RobotFramework",
"bytes": "2661"
},
{
"name": "Roff",
"bytes": "2515"
},
{
"name": "Ruby",
"bytes": "6723"
},
{
"name": "Shell",
"bytes": "647623"
},
{
"name": "Smarty",
"bytes": "52687"
},
{
"name": "Starlark",
"bytes": "4632847"
},
{
"name": "Swift",
"bytes": "56924"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
import pytest
from api.base.settings.defaults import API_BASE
from osf_tests.factories import (
AuthUserFactory,
PreprintProviderFactory,
)
from osf.utils import permissions
@pytest.mark.enable_quickfiles_creation
class ProviderModeratorDetailTestClass:
@pytest.fixture()
def admin(self, provider):
user = AuthUserFactory()
provider.get_group(permissions.ADMIN).user_set.add(user)
return user
@pytest.fixture()
def moderator(self, provider):
user = AuthUserFactory()
provider.get_group('moderator').user_set.add(user)
return user
@pytest.fixture()
def nonmoderator(self):
return AuthUserFactory()
def update_payload(self, user_id, permission_group, full_name=None):
data = {
'data': {
'attributes': {
'permission_group': permission_group,
},
'type': 'moderators',
'id': user_id
}
}
if full_name:
data['data']['attributes']['full_name'] = full_name
return data
def test_detail_not_authorized(self, app, url, nonmoderator, moderator, admin, provider):
# Must be logged in
res = app.get(url.format(admin._id), expect_errors=True)
assert res.status_code == 401
# Must be mod to get
res = app.get(url.format(admin._id), auth=nonmoderator.auth, expect_errors=True)
assert res.status_code == 403
# Must be admin to edit
res = app.patch_json_api(url.format(moderator._id),
self.update_payload(user_id=moderator._id, permission_group=permissions.ADMIN),
auth=nonmoderator.auth,
expect_errors=True)
assert res.status_code == 403
# Must be logged in
res = app.patch_json_api(url.format(moderator._id),
self.update_payload(user_id=moderator._id, permission_group=permissions.ADMIN),
expect_errors=True)
assert res.status_code == 401
# Must be admin to edit
res = app.patch_json_api(url.format(moderator._id),
self.update_payload(user_id=moderator._id, permission_group=permissions.ADMIN),
auth=moderator.auth,
expect_errors=True)
assert res.status_code == 403
def test_detail_successful_gets(self, app, url, moderator, admin, provider):
res = app.get(url.format(moderator._id), auth=moderator.auth)
assert res.status_code == 200
assert res.json['data']['id'] == moderator._id
assert res.json['data']['attributes']['permission_group'] == 'moderator'
res = app.get(url.format(admin._id), auth=moderator.auth)
assert res.status_code == 200
assert res.json['data']['id'] == admin._id
assert res.json['data']['attributes']['permission_group'] == permissions.ADMIN
res = app.get(url.format(moderator._id), auth=admin.auth)
assert res.status_code == 200
assert res.json['data']['id'] == moderator._id
assert res.json['data']['attributes']['permission_group'] == 'moderator'
res = app.get(url.format(admin._id), auth=admin.auth)
assert res.status_code == 200
assert res.json['data']['id'] == admin._id
assert res.json['data']['attributes']['permission_group'] == permissions.ADMIN
def test_detail_updates(self, app, url, nonmoderator, moderator, admin, provider):
# Admin makes moderator a new admin
res = app.patch_json_api(url.format(moderator._id),
self.update_payload(user_id=moderator._id, permission_group=permissions.ADMIN),
auth=admin.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['permission_group'] == permissions.ADMIN
# Admin makes new admin a moderator again
res = app.patch_json_api(url.format(moderator._id),
self.update_payload(user_id=moderator._id, permission_group='moderator'),
auth=admin.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['permission_group'] == 'moderator'
# Admin makes mod a mod -- No changes
res = app.patch_json_api(url.format(moderator._id),
self.update_payload(user_id=moderator._id, permission_group='moderator'),
auth=admin.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['permission_group'] == 'moderator'
# Mod has no perm, even though request would make no changes
res = app.patch_json_api(url.format(moderator._id),
self.update_payload(user_id=moderator._id, permission_group='moderator'),
auth=moderator.auth,
expect_errors=True)
assert res.status_code == 403
# Admin can't patch non-mod
res = app.patch_json_api(url.format(nonmoderator._id),
self.update_payload(user_id=nonmoderator._id, permission_group='moderator'),
auth=admin.auth,
expect_errors=True)
assert res.status_code == 404
def test_detail_cannot_remove_last_admin(self, app, url, admin, provider):
res = app.patch_json_api(url.format(admin._id),
self.update_payload(user_id=admin._id, permission_group='moderator'),
auth=admin.auth,
expect_errors=True)
assert res.status_code == 400
assert 'last admin' in res.json['errors'][0]['detail']
res = app.delete_json_api(url.format(admin._id), auth=admin.auth, expect_errors=True)
assert res.status_code == 400
assert 'last admin' in res.json['errors'][0]['detail']
def test_moderator_deletes(self, app, url, moderator, admin, provider):
res = app.delete_json_api(url.format(admin._id), auth=moderator.auth, expect_errors=True)
assert res.status_code == 403
res = app.delete_json_api(url.format(moderator._id), auth=moderator.auth)
assert res.status_code in [200, 204]
if res.status_code == 200:
assert 'meta' in res.json
else:
assert not res.body
def test_admin_delete_moderator(self, app, url, moderator, admin, provider):
res = app.delete_json_api(url.format(moderator._id), auth=admin.auth)
assert res.status_code in [200, 204]
if res.status_code == 200:
assert 'meta' in res.json
else:
assert not res.body
def test_admin_delete_admin(self, app, url, moderator, admin, provider):
# Make mod an admin
res = app.patch_json_api(url.format(moderator._id),
self.update_payload(user_id=moderator._id, permission_group=permissions.ADMIN),
auth=admin.auth)
assert res.json['data']['attributes']['permission_group'] == permissions.ADMIN # Sanity check
# Admin delete admin
res = app.delete_json_api(url.format(moderator._id), auth=admin.auth)
assert res.status_code in [200, 204]
if res.status_code == 200:
assert 'meta' in res.json
else:
assert not res.body
@pytest.mark.django_db
class TestPreprintProviderModeratorDetail(ProviderModeratorDetailTestClass):
@pytest.fixture()
def provider(self):
pp = PreprintProviderFactory(name='ModArxiv')
pp.update_group_permissions()
return pp
@pytest.fixture(params=['/{}preprint_providers/{}/moderators/{{}}/', '/{}providers/preprints/{}/moderators/{{}}/'])
def url(self, provider, request):
url = request.param
return url.format(API_BASE, provider._id)
|
{
"content_hash": "d819477e64107cf7f40d4cc0fb38fe66",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 119,
"avg_line_length": 43.10582010582011,
"alnum_prop": 0.578617896158095,
"repo_name": "baylee-d/osf.io",
"id": "b6379f17b6af6507993037451eaaf07e912ebd6b",
"size": "8147",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "api_tests/providers/preprints/views/test_preprint_provider_moderator_detail.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "92773"
},
{
"name": "Dockerfile",
"bytes": "5721"
},
{
"name": "HTML",
"bytes": "318459"
},
{
"name": "JavaScript",
"bytes": "1792442"
},
{
"name": "Jupyter Notebook",
"bytes": "41326"
},
{
"name": "Mako",
"bytes": "654930"
},
{
"name": "Python",
"bytes": "10662092"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
}
|
"""
Windows Command Prompt (DOS) shell.
"""
from rez.config import config
from rez.rex import RexExecutor, expandable, OutputStyle, EscapedString
from rez.shells import Shell
from rez.system import system
from rez.utils.execution import Popen
from rez.utils.platform_ import platform_
from rez.vendor.six import six
from ._utils.windows import to_windows_path, get_syspaths_from_registry
from functools import partial
import os
import re
import subprocess
basestring = six.string_types[0]
class CMD(Shell):
# For reference, the ss64 web page provides useful documentation on builtin
# commands for the Windows Command Prompt (cmd). It can be found here :
# http://ss64.com/nt/cmd.html
syspaths = None
_doskey = None
expand_env_vars = True
_env_var_regex = re.compile("%([A-Za-z0-9_]+)%") # %ENVVAR%
# Regex to aid with escaping of Windows-specific special chars:
# http://ss64.com/nt/syntax-esc.html
_escape_re = re.compile(r'(?<!\^)[&<>]|(?<!\^)\^(?![&<>\^])|(\|)')
_escaper = partial(_escape_re.sub, lambda m: '^' + m.group(0))
def __init__(self):
super(CMD, self).__init__()
self._doskey_aliases = {}
@classmethod
def name(cls):
return 'cmd'
@classmethod
def file_extension(cls):
return 'bat'
@classmethod
def startup_capabilities(cls, rcfile=False, norc=False, stdin=False,
command=False):
cls._unsupported_option('rcfile', rcfile)
rcfile = False
cls._unsupported_option('norc', norc)
norc = False
cls._unsupported_option('stdin', stdin)
stdin = False
return (rcfile, norc, stdin, command)
@classmethod
def get_startup_sequence(cls, rcfile, norc, stdin, command):
rcfile, norc, stdin, command = \
cls.startup_capabilities(rcfile, norc, stdin, command)
return dict(
stdin=stdin,
command=command,
do_rcfile=False,
envvar=None,
files=[],
bind_files=[],
source_bind_files=(not norc)
)
@classmethod
def get_syspaths(cls):
if cls.syspaths is not None:
return cls.syspaths
if config.standard_system_paths:
cls.syspaths = config.standard_system_paths
return cls.syspaths
cls.syspaths = get_syspaths_from_registry()
return cls.syspaths
def _bind_interactive_rez(self):
if config.set_prompt and self.settings.prompt:
stored_prompt = os.getenv("REZ_STORED_PROMPT_CMD")
curr_prompt = stored_prompt or os.getenv("PROMPT", "")
if not stored_prompt:
self.setenv("REZ_STORED_PROMPT_CMD", curr_prompt)
new_prompt = "%%REZ_ENV_PROMPT%%"
new_prompt = (new_prompt + " %s") if config.prefix_prompt \
else ("%s " + new_prompt)
new_prompt = new_prompt % curr_prompt
self._addline('set PROMPT=%s' % new_prompt)
def spawn_shell(self, context_file, tmpdir, rcfile=None, norc=False,
stdin=False, command=None, env=None, quiet=False,
pre_command=None, add_rez=True, **Popen_args):
command = self._expand_alias(command)
startup_sequence = self.get_startup_sequence(rcfile, norc, bool(stdin), command)
shell_command = None
def _record_shell(ex, files, bind_rez=True, print_msg=False):
ex.source(context_file)
if startup_sequence["envvar"]:
ex.unsetenv(startup_sequence["envvar"])
if add_rez and bind_rez:
ex.interpreter._bind_interactive_rez()
if print_msg and add_rez and not quiet:
ex.info('')
ex.info('You are now in a rez-configured environment.')
ex.info('')
if system.is_production_rez_install:
# previously this was called with the /K flag, however
# that would leave spawn_shell hung on a blocked call
# waiting for the user to type "exit" into the shell that
# was spawned to run the rez context printout
ex.command("cmd /Q /C rez context")
def _create_ex():
return RexExecutor(interpreter=self.new_shell(),
parent_environ={},
add_default_namespaces=False)
executor = _create_ex()
if self.settings.prompt:
executor.interpreter._saferefenv('REZ_ENV_PROMPT')
executor.env.REZ_ENV_PROMPT = \
expandable("%REZ_ENV_PROMPT%").literal(self.settings.prompt)
# Make .py launch within cmd without extension.
if self.settings.additional_pathext:
# Ensure that the PATHEXT does not append duplicates.
fmt = (
'echo %PATHEXT%|C:\\Windows\\System32\\findstr.exe /i /c:"{0}">nul '
'|| set PATHEXT=%PATHEXT%;{0}'
)
for pathext in self.settings.additional_pathext:
executor.command(fmt.format(pathext))
# This resets the errorcode, which is tainted by the code above
executor.command("(call )")
if startup_sequence["command"] is not None:
_record_shell(executor, files=startup_sequence["files"])
shell_command = startup_sequence["command"]
else:
_record_shell(executor, files=startup_sequence["files"], print_msg=(not quiet))
if shell_command:
# Launch the provided command in the configured shell and wait
# until it exits.
executor.command(shell_command)
# Test for None specifically because resolved_context.execute_rex_code
# passes '' and we do NOT want to keep a shell open during a rex code
# exec operation.
elif shell_command is None:
# Launch the configured shell itself and wait for user interaction
# to exit.
executor.command('cmd /Q /K')
# Exit the configured shell.
executor.command('exit %errorlevel%')
code = executor.get_output()
target_file = os.path.join(tmpdir, "rez-shell.%s"
% self.file_extension())
with open(target_file, 'w') as f:
f.write(code)
if startup_sequence["stdin"] and stdin and (stdin is not True):
Popen_args["stdin"] = stdin
cmd = []
if pre_command:
if isinstance(pre_command, basestring):
cmd = pre_command.strip().split()
else:
cmd = pre_command
# Test for None specifically because resolved_context.execute_rex_code
# passes '' and we do NOT want to keep a shell open during a rex code
# exec operation.
if shell_command is None:
cmd_flags = ['/Q', '/K']
else:
cmd_flags = ['/Q', '/C']
cmd += [self.executable]
cmd += cmd_flags
cmd += ['call {}'.format(target_file)]
is_detached = (cmd[0] == 'START')
p = Popen(cmd, env=env, shell=is_detached, **Popen_args)
return p
def get_output(self, style=OutputStyle.file):
if style == OutputStyle.file:
script = '\n'.join(self._lines) + '\n'
else: # eval style
lines = []
for line in self._lines:
if not line.startswith('REM'): # strip comments
line = line.rstrip()
lines.append(line)
script = '&& '.join(lines)
return script
def escape_string(self, value, is_path=False):
"""Escape the <, >, ^, and & special characters reserved by Windows.
Args:
value (str/EscapedString): String or already escaped string.
Returns:
str: The value escaped for Windows.
"""
value = EscapedString.promote(value)
value = value.expanduser()
result = ''
for is_literal, txt in value.strings:
if is_literal:
txt = self._escaper(txt)
# Note that cmd uses ^% while batch files use %% to escape %
txt = self._env_var_regex.sub(r"%%\1%%", txt)
else:
if is_path:
txt = self.normalize_paths(txt)
txt = self._escaper(txt)
result += txt
return result
def normalize_path(self, path):
return to_windows_path(path)
def _saferefenv(self, key):
pass
def shebang(self):
pass
def setenv(self, key, value):
value = self.escape_string(value, is_path=self._is_pathed_key(key))
self._addline('set %s=%s' % (key, value))
def unsetenv(self, key):
self._addline("set %s=" % key)
def resetenv(self, key, value, friends=None):
self._addline(self.setenv(key, value))
def alias(self, key, value):
# find doskey, falling back to system paths if not in $PATH. Fall back
# to unqualified 'doskey' if all else fails
if self._doskey is None:
try:
self.__class__._doskey = \
self.find_executable("doskey", check_syspaths=True)
except:
self._doskey = "doskey"
self._doskey_aliases[key] = value
self._addline("%s %s=%s $*" % (self._doskey, key, value))
def comment(self, value):
for line in value.split('\n'):
self._addline('REM %s' % line)
def info(self, value):
for line in value.split('\n'):
line = self.escape_string(line)
line = self.convert_tokens(line)
if line:
self._addline('echo %s' % line)
else:
self._addline('echo.')
def error(self, value):
for line in value.split('\n'):
line = self.escape_string(line)
line = self.convert_tokens(line)
self._addline('echo "%s" 1>&2' % line)
def source(self, value):
self._addline("call %s" % value)
def command(self, value):
self._addline(value)
@classmethod
def get_all_key_tokens(cls, key):
return ["%{}%".format(key)]
@classmethod
def join(cls, command):
# TODO: This needs to be properly fixed, see other shell impls
# at https://github.com/AcademySoftwareFoundation/rez/pull/1130
#
# TODO: This may disappear in future [1]
# [1] https://bugs.python.org/issue10838
return subprocess.list2cmdline(command)
@classmethod
def line_terminator(cls):
return "\r\n"
def _expand_alias(self, command):
"""Expand `command` if alias is being presented
This is important for Windows CMD shell because the doskey.exe isn't
executed yet when the alias is being passed in `command`. This means we
cannot rely on doskey.exe to execute alias in first run. So here we
lookup alias that were just parsed from package, replace it with full
command if matched.
"""
if command:
word = command.split()[0]
resolved_alias = self._doskey_aliases.get(word)
if resolved_alias:
command = command.replace(word, resolved_alias, 1)
return command
def register_plugin():
if platform_.name == "windows":
return CMD
|
{
"content_hash": "8d7a15b4883e653b8cb7bb0952651b43",
"timestamp": "",
"source": "github",
"line_count": 339,
"max_line_length": 91,
"avg_line_length": 34.07079646017699,
"alnum_prop": 0.5631168831168831,
"repo_name": "nerdvegas/rez",
"id": "c3afdcc01e48d57592a467ff649cf267cfaab48c",
"size": "11634",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/rezplugins/shell/cmd.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13"
},
{
"name": "CMake",
"bytes": "61812"
},
{
"name": "Dockerfile",
"bytes": "3668"
},
{
"name": "PowerShell",
"bytes": "1390"
},
{
"name": "Python",
"bytes": "1950470"
},
{
"name": "Shell",
"bytes": "3185"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from __future__ import absolute_import
# System modules
import argparse
import sys
import multiprocessing
import os
import textwrap
# Third-party modules
# LLDB modules
from . import configuration
class ArgParseNamespace(object):
pass
def parse_args(parser, argv):
""" Returns an argument object. LLDB_TEST_ARGUMENTS environment variable can
be used to pass additional arguments.
"""
args = ArgParseNamespace()
if ('LLDB_TEST_ARGUMENTS' in os.environ):
print(
"Arguments passed through environment: '%s'" %
os.environ['LLDB_TEST_ARGUMENTS'])
args = parser.parse_args([sys.argv[0]].__add__(
os.environ['LLDB_TEST_ARGUMENTS'].split()), namespace=args)
return parser.parse_args(args=argv, namespace=args)
def default_thread_count():
# Check if specified in the environment
num_threads_str = os.environ.get("LLDB_TEST_THREADS")
if num_threads_str:
return int(num_threads_str)
else:
return multiprocessing.cpu_count()
def create_parser():
parser = argparse.ArgumentParser(
description='description',
prefix_chars='+-',
add_help=False)
group = None
# Helper function for boolean options (group will point to the current
# group when executing X)
X = lambda optstr, helpstr, **kwargs: group.add_argument(
optstr, help=helpstr, action='store_true', **kwargs)
group = parser.add_argument_group('Help')
group.add_argument(
'-h',
'--help',
dest='h',
action='store_true',
help="Print this help message and exit. Add '-v' for more detailed help.")
# C and Python toolchain options
group = parser.add_argument_group('Toolchain options')
group.add_argument(
'-A',
'--arch',
metavar='arch',
dest='arch',
help=textwrap.dedent('''Specify the architecture(s) to test. This option can be specified more than once'''))
group.add_argument('-C', '--compiler', metavar='compiler', dest='compiler', help=textwrap.dedent(
'''Specify the compiler(s) used to build the inferior executables. The compiler path can be an executable basename or a full path to a compiler executable. This option can be specified multiple times.'''))
if sys.platform == 'darwin':
group.add_argument('--apple-sdk', metavar='apple_sdk', dest='apple_sdk', default="macosx", help=textwrap.dedent(
'''Specify the name of the Apple SDK (macosx, macosx.internal, iphoneos, iphoneos.internal, or path to SDK) and use the appropriate tools from that SDK's toolchain.'''))
# FIXME? This won't work for different extra flags according to each arch.
group.add_argument(
'-E',
metavar='extra-flags',
help=textwrap.dedent('''Specify the extra flags to be passed to the toolchain when building the inferior programs to be debugged
suggestions: do not lump the "-A arch1 -A arch2" together such that the -E option applies to only one of the architectures'''))
group.add_argument('--dsymutil', metavar='dsymutil', dest='dsymutil', help=textwrap.dedent('Specify which dsymutil to use.'))
# Test filtering options
group = parser.add_argument_group('Test filtering options')
group.add_argument(
'-f',
metavar='filterspec',
action='append',
help='Specify a filter, which consists of the test class name, a dot, followed by the test method, to only admit such test into the test suite') # FIXME: Example?
X('-l', "Don't skip long running tests")
group.add_argument(
'-p',
metavar='pattern',
help='Specify a regexp filename pattern for inclusion in the test suite')
group.add_argument('--excluded', metavar='exclusion-file', action='append', help=textwrap.dedent(
'''Specify a file for tests to exclude. File should contain lists of regular expressions for test files or methods,
with each list under a matching header (xfail files, xfail methods, skip files, skip methods)'''))
group.add_argument(
'-G',
'--category',
metavar='category',
action='append',
dest='categoriesList',
help=textwrap.dedent('''Specify categories of test cases of interest. Can be specified more than once.'''))
group.add_argument(
'--skip-category',
metavar='category',
action='append',
dest='skipCategories',
help=textwrap.dedent('''Specify categories of test cases to skip. Takes precedence over -G. Can be specified more than once.'''))
# Configuration options
group = parser.add_argument_group('Configuration options')
group.add_argument(
'--framework',
metavar='framework-path',
help='The path to LLDB.framework')
group.add_argument(
'--executable',
metavar='executable-path',
help='The path to the lldb executable')
group.add_argument(
'--server',
metavar='server-path',
help='The path to the debug server executable to use')
group.add_argument(
'--out-of-tree-debugserver',
dest='out_of_tree_debugserver',
action='store_true',
help='A flag to indicate an out-of-tree debug server is being used')
group.add_argument(
'-s',
metavar='name',
help='Specify the name of the dir created to store the session files of tests with errored or failed status. If not specified, the test driver uses the timestamp as the session dir name')
group.add_argument(
'-S',
'--session-file-format',
default=configuration.session_file_format,
metavar='format',
help='Specify session file name format. See configuration.py for a description.')
group.add_argument(
'-y',
type=int,
metavar='count',
help="Specify the iteration count used to collect our benchmarks. An example is the number of times to do 'thread step-over' to measure stepping speed.")
group.add_argument(
'-#',
type=int,
metavar='sharp',
dest='sharp',
help='Repeat the test suite for a specified number of times')
group.add_argument('--channel', metavar='channel', dest='channels', action='append', help=textwrap.dedent(
"Specify the log channels (and optional categories) e.g. 'lldb all' or 'gdb-remote packets' if no categories are specified, 'default' is used"))
group.add_argument(
'--log-success',
dest='log_success',
action='store_true',
help="Leave logs/traces even for successful test runs (useful for creating reference log files during debugging.)")
group.add_argument(
'--codesign-identity',
metavar='Codesigning identity',
default='lldb_codesign',
help='The codesigning identity to use')
group.add_argument(
'--build-dir',
dest='test_build_dir',
metavar='Test build directory',
default='lldb-test-build.noindex',
help='The root build directory for the tests. It will be removed before running.')
# Configuration options
group = parser.add_argument_group('Remote platform options')
group.add_argument(
'--platform-name',
dest='lldb_platform_name',
metavar='platform-name',
help='The name of a remote platform to use')
group.add_argument(
'--platform-url',
dest='lldb_platform_url',
metavar='platform-url',
help='A LLDB platform URL to use when connecting to a remote platform to run the test suite')
group.add_argument(
'--platform-working-dir',
dest='lldb_platform_working_dir',
metavar='platform-working-dir',
help='The directory to use on the remote platform.')
# Test-suite behaviour
group = parser.add_argument_group('Runtime behaviour options')
X('-d', 'Suspend the process after launch to wait indefinitely for a debugger to attach')
X('-q', "Don't print extra output from this script.")
X('-t', 'Turn on tracing of lldb command and other detailed test executions')
group.add_argument(
'-u',
dest='unset_env_varnames',
metavar='variable',
action='append',
help='Specify an environment variable to unset before running the test cases. e.g., -u DYLD_INSERT_LIBRARIES -u MallocScribble')
group.add_argument(
'--env',
dest='set_env_vars',
metavar='variable',
action='append',
help='Specify an environment variable to set to the given value before running the test cases e.g.: --env CXXFLAGS=-O3 --env DYLD_INSERT_LIBRARIES')
X('-v', 'Do verbose mode of unittest framework (print out each test case invocation)')
group.add_argument(
'--enable-crash-dialog',
dest='disable_crash_dialog',
action='store_false',
help='(Windows only) When LLDB crashes, display the Windows crash dialog.')
group.set_defaults(disable_crash_dialog=True)
group = parser.add_argument_group('Parallel execution options')
group.add_argument(
'--inferior',
action='store_true',
help=('specify this invocation is a multiprocess inferior, '
'used internally'))
group.add_argument(
'--no-multiprocess',
action='store_true',
help='skip running the multiprocess test runner')
group.add_argument(
'--threads',
type=int,
dest='num_threads',
default=default_thread_count(),
help=('The number of threads/processes to use when running tests '
'separately, defaults to the number of CPU cores available'))
group.add_argument(
'--test-subdir',
action='store',
help='Specify a test subdirectory to use relative to the test root dir'
)
group.add_argument(
'--test-runner-name',
action='store',
help=('Specify a test runner strategy. Valid values: multiprocessing,'
' multiprocessing-pool, serial, threading, threading-pool')
)
# Test results support.
group = parser.add_argument_group('Test results options')
group.add_argument(
'--curses',
action='store_true',
help='Shortcut for specifying test results using the curses formatter')
group.add_argument(
'--results-file',
action='store',
help=('Specifies the file where test results will be written '
'according to the results-formatter class used'))
group.add_argument(
'--results-port',
action='store',
type=int,
help=('Specifies the localhost port to which the results '
'formatted output should be sent'))
group.add_argument(
'--results-formatter',
action='store',
help=('Specifies the full package/module/class name used to translate '
'test events into some kind of meaningful report, written to '
'the designated output results file-like object'))
group.add_argument(
'--results-formatter-option',
'-O',
action='append',
dest='results_formatter_options',
help=('Specify an option to pass to the formatter. '
'Use --results-formatter-option="--option1=val1" '
'syntax. Note the "=" is critical, don\'t include whitespace.'))
group.add_argument(
'--event-add-entries',
action='store',
help=('Specify comma-separated KEY=VAL entries to add key and value '
'pairs to all test events generated by this test run. VAL may '
'be specified as VAL:TYPE, where TYPE may be int to convert '
'the value to an int'))
# Re-run related arguments
group = parser.add_argument_group('Test Re-run Options')
group.add_argument(
'--rerun-all-issues',
action='store_true',
help=('Re-run all issues that occurred during the test run '
'irrespective of the test method\'s marking as flakey. '
'Default behavior is to apply re-runs only to flakey '
'tests that generate issues.'))
group.add_argument(
'--rerun-max-file-threshold',
action='store',
type=int,
default=50,
help=('Maximum number of files requiring a rerun beyond '
'which the rerun will not occur. This is meant to '
'stop a catastrophically failing test suite from forcing '
'all tests to be rerun in the single-worker phase.'))
# Remove the reference to our helper function
del X
group = parser.add_argument_group('Test directories')
group.add_argument(
'args',
metavar='test-dir',
nargs='*',
help='Specify a list of directory names to search for test modules named after Test*.py (test discovery). If empty, search from the current working directory instead.')
return parser
|
{
"content_hash": "aeb7450ea8502c818dc9fc0eaae90f2f",
"timestamp": "",
"source": "github",
"line_count": 316,
"max_line_length": 213,
"avg_line_length": 41.4873417721519,
"alnum_prop": 0.6324942791762014,
"repo_name": "youtube/cobalt_sandbox",
"id": "58190d8c75eebf99f27c9fb0f7afa61da8bb35b4",
"size": "13110",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "third_party/llvm-project/lldb/packages/Python/lldbsuite/test/dotest_args.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid as fluid
def crop(data, offsets, crop_shape):
def indexOf(shape, index):
result = []
for dim in reversed(shape):
result.append(index % dim)
index = index / dim
return result[::-1]
result = []
for i, value in enumerate(data.flatten()):
index = indexOf(data.shape, i)
selected = True
if len(index) == len(offsets):
for j, offset in enumerate(offsets):
selected = selected and index[j] >= offset and index[
j] < crop_shape[j] + offset
if selected:
result.append(value)
return np.array(result).reshape(crop_shape)
class TestCropTensorOp(OpTest):
def setUp(self):
self.op_type = "crop_tensor"
self.shape_by_input = False
self.offset_by_input = False
self.unk_dim_idx = -1
self.attrs = {}
self.initTestCase()
if self.shape_by_input:
self.inputs = {
'X': np.random.random(self.x_shape).astype("float32"),
'Shape': np.array(self.crop_shape).astype("int32")
}
else:
self.attrs['shape'] = self.crop_shape
self.inputs = {
'X': np.random.random(self.x_shape).astype("float32"),
}
if self.offset_by_input:
self.inputs['Offsets'] = np.array(self.offsets).astype('int32')
else:
self.attrs['offsets'] = self.offsets
crop_shape = [val for val in self.crop_shape]
for i in range(len(self.crop_shape)):
if self.crop_shape[i] == -1:
crop_shape[i] = self.x_shape[i] - self.offsets[i]
self.outputs = {'Out': crop(self.inputs['X'], self.offsets, crop_shape)}
def initTestCase(self):
self.x_shape = (8, 8)
self.crop_shape = [2, 2]
self.offsets = [1, 2]
def test_check_output(self):
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', max_relative_error=0.006)
class TestCase1(TestCropTensorOp):
def initTestCase(self):
self.x_shape = (100)
self.crop_shape = [64]
self.offsets = [13]
class TestCase2(TestCropTensorOp):
def initTestCase(self):
self.x_shape = (12, 24)
self.crop_shape = [-1, 8]
self.offsets = [0, 0]
class TestCase3(TestCropTensorOp):
def initTestCase(self):
self.x_shape = (4, 8, 16)
self.crop_shape = [2, 2, 3]
self.offsets = [1, 5, 3]
self.shape_by_input = True
class TestCase4(TestCropTensorOp):
def initTestCase(self):
self.x_shape = (8, 3, 6, 6)
self.crop_shape = [-1, 3, -1, 4]
self.offsets = [0, 0, 1, 0]
self.shape_by_input = True
class TestCase5(TestCropTensorOp):
def initTestCase(self):
self.x_shape = (2, 4, 5, 8, 8)
self.crop_shape = [1, 1, 2, 4, 4]
self.offsets = [1, 0, 0, 2, 2]
self.offset_by_input = True
class TestCase6(TestCropTensorOp):
def initTestCase(self):
self.x_shape = (2, 2, 4, 4, 4, 2)
self.crop_shape = [1, 1, 4, 2, 2, 2]
self.offsets = [0, 0, 0, 0, 0, 0]
self.shape_by_input = True
self.offset_by_input = True
class TestCropTensorOpTensorAttr(OpTest):
def setUp(self):
self.op_type = "crop_tensor"
self.OffsetsTensor = False
self.ShapeTensor = True
self.attrs = {}
self.initTestCase()
if self.ShapeTensor:
shape_tensor = []
for index, ele in enumerate(self.crop_shape):
shape_tensor.append(("x" + str(index), np.ones(
(1)).astype('int32') * ele))
self.inputs = {
'X': np.random.random(self.x_shape).astype("float32"),
'ShapeTensor': shape_tensor
}
self.attrs['shape'] = self.shape_attr
if self.OffsetsTensor:
offsets_tensor = []
for index, ele in enumerate(self.offsets):
offsets_tensor.append(("x" + str(index), np.ones(
(1)).astype('int32') * ele))
self.inputs = {
'X': np.random.random(self.x_shape).astype("float32"),
'OffsetsTensor': offsets_tensor
}
self.attrs['offsets'] = self.offsets_attr
self.attrs['shape'] = self.crop_shape
self.attrs['offsets'] = self.offsets
crop_shape = [val for val in self.crop_shape]
for i in range(len(self.crop_shape)):
if self.crop_shape[i] == -1:
crop_shape[i] = self.x_shape[i] - self.offsets[i]
self.outputs = {'Out': crop(self.inputs['X'], self.offsets, crop_shape)}
def initTestCase(self):
self.x_shape = (8, 8)
self.crop_shape = (2, 2)
self.offsets = [1, 2]
self.shape_attr = [0, 0]
def test_check_output(self):
self.check_output()
def test_check_grad_normal(self):
self.check_grad(["X"], "Out", max_relative_error=0.006)
class TestCropTensorOpTensorAttrCase1(TestCropTensorOpTensorAttr):
def initTestCase(self):
self.x_shape = (16, 8, 32)
self.crop_shape = [-1, -1, 3]
self.offsets = [1, 5, 3]
self.shape_attr = [-1, -1, 3]
class TestCropTensorOpTensorAttrCase2(TestCropTensorOpTensorAttr):
def initTestCase(self):
self.x_shape = (4, 8, 16, 8)
self.crop_shape = [2, 2, 3, 4]
self.offsets = [1, 5, 3, 0]
self.shape_attr = [0, 0, 3, 4]
class TestCropTensorOpTensorAttrCase3(TestCropTensorOpTensorAttr):
def initTestCase(self):
self.x_shape = (16, 8, 32)
self.crop_shape = [2, 2, 3]
self.offsets = [1, 5, 3]
self.offsets_attr = [-1, -1, 3]
self.ShapeTensor = False
self.OffsetsTensor = True
class TestCropTensorOpTensorAttrCase4(TestCropTensorOpTensorAttr):
def initTestCase(self):
self.x_shape = (16, 8, 32)
self.crop_shape = [2, 2, 3]
self.shape_attr = [0, 2, 3]
self.offsets = [1, 5, 3]
self.offsets_attr = [-1, -1, 3]
self.OffsetsTensor = True
class TestCropTensorException(OpTest):
def test_exception(self):
input1 = fluid.data(name="input1", shape=[2, 3, 6, 6], dtype="float32")
input2 = fluid.data(name="input2", shape=[2, 3, 6, 6], dtype="float16")
dim = fluid.data(name='dim', shape=[1], dtype='int32')
offset = fluid.data(name='offset', shape=[1], dtype='int32')
def attr_shape_type():
out = fluid.layers.crop_tensor(input1, shape=3)
def attr_shape_dtype():
out = fluid.layers.crop_tensor(input1, shape=[2, 2.0, 3, 3])
def attr_shape_value1():
out = fluid.layers.crop_tensor(input1, shape=[2, -2, dim, 3])
def attr_shape_value2():
out = fluid.layers.crop_tensor(input1, shape=[2, 0, dim, 3])
def attr_offsets_type():
out = fluid.layers.crop_tensor(
input1, shape=[2, 2, 3, 3], offsets=0)
def attr_offsets_dtype():
out = fluid.layers.crop_tensor(
input1, shape=[2, 2, 3, 3], offsets=[0, 1.0, 0, 0])
def attr_offsets_value():
out = fluid.layers.crop_tensor(
input1, shape=[2, 2, 3, 3], offsets=[0, -1, offset, 0])
def input_dtype():
out = fluid.layers.crop_tensor(input2, shape=[2, 2, 3, 3])
self.assertRaises(TypeError, attr_shape_type)
self.assertRaises(TypeError, attr_shape_dtype)
self.assertRaises(ValueError, attr_shape_value1)
self.assertRaises(ValueError, attr_shape_value2)
self.assertRaises(TypeError, attr_offsets_type)
self.assertRaises(TypeError, attr_offsets_dtype)
self.assertRaises(ValueError, attr_offsets_value)
self.assertRaises(TypeError, input_dtype)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "c2a011568a0cfa70bb1c7b5898d797bf",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 80,
"avg_line_length": 32.330677290836654,
"alnum_prop": 0.5566235366605052,
"repo_name": "chengduoZH/Paddle",
"id": "5864e15df132766992271301bdccb058fdc5d781",
"size": "8728",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/test_crop_tensor_op.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "32490"
},
{
"name": "C++",
"bytes": "10146609"
},
{
"name": "CMake",
"bytes": "291349"
},
{
"name": "Cuda",
"bytes": "1192566"
},
{
"name": "Dockerfile",
"bytes": "10002"
},
{
"name": "Python",
"bytes": "7124331"
},
{
"name": "Ruby",
"bytes": "353"
},
{
"name": "Shell",
"bytes": "200906"
}
],
"symlink_target": ""
}
|
"""
Scan multiple DNSBLs for IP addresss or domain.
Copyright (c) 2012, 2013 Rob Cakebread
All rights reserved.
If you give the domain, musubi will try to find all your IP addresses
for each mail server by querying MX DNS records and then doing a lookup
for the IPs. If your mail server uses round-robin DNS, this of course
won't find all the IPs. You must find out the IP CIDR range and then
give that, e.g.
musubi scan 192.0.64.0/24
"""
import sys
import logging
import dns
from cliff.lister import Lister
from IPy import IP
import requests
from .dnsbl import Base
from .netdns import get_mx_hosts, ips_from_domains, get_txt, build_query, \
net_calc, verify_domain
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.WARNING)
DNSBL_LIST = 'http://musubi.cakebread.info/dnsbl.txt'
# Try to get list of working DNSBLs checked hourly, experimental.
# TODO Add options to use local list, pipe in, etc.
req = requests.get(DNSBL_LIST)
if req.status_code == 200:
BASE_DNSBLS = req.text.split()
else:
from .dnsbllist import BASE_DNSBLS
class Scan(Lister):
"""Scan multiple DNSBLs by IP or domain"""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(Scan, self).get_parser(prog_name)
parser.add_argument('scan', nargs='?', default=None)
return parser
def dnsbl_check(self, ip):
backend = Base(ip=ip, dnsbls=BASE_DNSBLS)
return backend.check()
def dnsbl_scanner(self, rdata, ip):
for dnsbl, blacklisted in self.dnsbl_check(ip):
# Scan.log.debug('Testing: %s' % dnsbl)
if blacklisted:
Scan.log.debug('blacklisted: %s' % dnsbl)
try:
query = build_query(ip, dnsbl)
txt = get_txt(query)[0]
except dns.resolver.NoAnswer:
Scan.log.debug("No TXT record for %s" % query)
rdata.append(
(ip,
dnsbl,
blacklisted,
txt,)
)
return rdata
def take_action(self, parsed_args):
"""This could be a lot prettier if I used these as arguments
instead of trying to detect input type --IP --domain --range
It's just easier to use without them, hmm.
"""
arg = parsed_args.scan
rdata = []
if "/" in arg:
# CIDR notation
ips = net_calc(arg)
else:
try:
# Throw exception if it's not an IP and then try domain name
ip = IP(arg)
ips = [ip]
except ValueError:
if verify_domain(arg):
hosts = get_mx_hosts(arg)
ips = ips_from_domains(hosts)
else:
raise RuntimeError('Can not lookup domain: %s' % arg)
for ip in ips:
ip = str(ip)
rdata = self.dnsbl_scanner(rdata, ip)
if not len(rdata):
Scan.log.debug("Not found on any DNSBL lists.")
sys.exit(0)
Scan.log.debug(rdata)
return (('IP', 'DNSBL Host', 'Response Code', 'DNS TXT Record'), rdata)
|
{
"content_hash": "38aabcdd51676270b1bdacc046fa0613",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 79,
"avg_line_length": 29.81651376146789,
"alnum_prop": 0.576,
"repo_name": "cakebread/musubi",
"id": "3a6bf0d09b3c529abc9d6d52f3bd8dc5e927cf8a",
"size": "3251",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "musubi/scan.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "33717"
}
],
"symlink_target": ""
}
|
"""Helper utilities and decorators."""
from flask import flash
from flask_login import current_user
from functools import wraps
from flask import g, request, redirect, url_for
from op_mon.user.models import User
def flash_errors(form, category='warning'):
"""Flash all errors for a form."""
for field, errors in form.errors.items():
for error in errors:
flash('{0} - {1}'.format(getattr(form, field).label.text, error), category)
#TODO: Not working 161004 //Joyider
def admin_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if current_user is None:
return redirect(url_for('login', next=request.url))
if not User.is_admin:
flash('Not auhorized to edit', 'warning')
return redirect(url_for(request.url))
return f(*args, **kwargs)
return decorated_function
|
{
"content_hash": "9c387bf719426f5876e2368d233420eb",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 78,
"avg_line_length": 30.76923076923077,
"alnum_prop": 0.72,
"repo_name": "joyider/op_mon",
"id": "2d2e28eeabe5b71d2053cb2f5a5c641c6f48daed",
"size": "824",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "op_mon/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "228476"
},
{
"name": "CoffeeScript",
"bytes": "83631"
},
{
"name": "HTML",
"bytes": "133029"
},
{
"name": "JavaScript",
"bytes": "1142336"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "64114"
},
{
"name": "Shell",
"bytes": "444"
}
],
"symlink_target": ""
}
|
"""Lovelace dashboard support."""
from abc import ABC, abstractmethod
import logging
import os
from pathlib import Path
import time
from typing import Optional, cast
import voluptuous as vol
from homeassistant.components.frontend import DATA_PANELS
from homeassistant.const import CONF_FILENAME
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import collection, storage
from homeassistant.util.yaml import Secrets, load_yaml
from .const import (
CONF_ICON,
CONF_URL_PATH,
DOMAIN,
EVENT_LOVELACE_UPDATED,
LOVELACE_CONFIG_FILE,
MODE_STORAGE,
MODE_YAML,
STORAGE_DASHBOARD_CREATE_FIELDS,
STORAGE_DASHBOARD_UPDATE_FIELDS,
ConfigNotFound,
)
CONFIG_STORAGE_KEY_DEFAULT = DOMAIN
CONFIG_STORAGE_KEY = "lovelace.{}"
CONFIG_STORAGE_VERSION = 1
DASHBOARDS_STORAGE_KEY = f"{DOMAIN}_dashboards"
DASHBOARDS_STORAGE_VERSION = 1
_LOGGER = logging.getLogger(__name__)
class LovelaceConfig(ABC):
"""Base class for Lovelace config."""
def __init__(self, hass, url_path, config):
"""Initialize Lovelace config."""
self.hass = hass
if config:
self.config = {**config, CONF_URL_PATH: url_path}
else:
self.config = None
@property
def url_path(self) -> str:
"""Return url path."""
return self.config[CONF_URL_PATH] if self.config else None
@property
@abstractmethod
def mode(self) -> str:
"""Return mode of the lovelace config."""
@abstractmethod
async def async_get_info(self):
"""Return the config info."""
@abstractmethod
async def async_load(self, force):
"""Load config."""
async def async_save(self, config):
"""Save config."""
raise HomeAssistantError("Not supported")
async def async_delete(self):
"""Delete config."""
raise HomeAssistantError("Not supported")
@callback
def _config_updated(self):
"""Fire config updated event."""
self.hass.bus.async_fire(EVENT_LOVELACE_UPDATED, {"url_path": self.url_path})
class LovelaceStorage(LovelaceConfig):
"""Class to handle Storage based Lovelace config."""
def __init__(self, hass, config):
"""Initialize Lovelace config based on storage helper."""
if config is None:
url_path = None
storage_key = CONFIG_STORAGE_KEY_DEFAULT
else:
url_path = config[CONF_URL_PATH]
storage_key = CONFIG_STORAGE_KEY.format(config["id"])
super().__init__(hass, url_path, config)
self._store = storage.Store(hass, CONFIG_STORAGE_VERSION, storage_key)
self._data = None
@property
def mode(self) -> str:
"""Return mode of the lovelace config."""
return MODE_STORAGE
async def async_get_info(self):
"""Return the Lovelace storage info."""
if self._data is None:
await self._load()
if self._data["config"] is None:
return {"mode": "auto-gen"}
return _config_info(self.mode, self._data["config"])
async def async_load(self, force):
"""Load config."""
if self.hass.config.safe_mode:
raise ConfigNotFound
if self._data is None:
await self._load()
config = self._data["config"]
if config is None:
raise ConfigNotFound
return config
async def async_save(self, config):
"""Save config."""
if self.hass.config.safe_mode:
raise HomeAssistantError("Saving not supported in safe mode")
if self._data is None:
await self._load()
self._data["config"] = config
self._config_updated()
await self._store.async_save(self._data)
async def async_delete(self):
"""Delete config."""
if self.hass.config.safe_mode:
raise HomeAssistantError("Deleting not supported in safe mode")
await self._store.async_remove()
self._data = None
self._config_updated()
async def _load(self):
"""Load the config."""
data = await self._store.async_load()
self._data = data if data else {"config": None}
class LovelaceYAML(LovelaceConfig):
"""Class to handle YAML-based Lovelace config."""
def __init__(self, hass, url_path, config):
"""Initialize the YAML config."""
super().__init__(hass, url_path, config)
self.path = hass.config.path(
config[CONF_FILENAME] if config else LOVELACE_CONFIG_FILE
)
self._cache = None
@property
def mode(self) -> str:
"""Return mode of the lovelace config."""
return MODE_YAML
async def async_get_info(self):
"""Return the YAML storage mode."""
try:
config = await self.async_load(False)
except ConfigNotFound:
return {
"mode": self.mode,
"error": f"{self.path} not found",
}
return _config_info(self.mode, config)
async def async_load(self, force):
"""Load config."""
is_updated, config = await self.hass.async_add_executor_job(
self._load_config, force
)
if is_updated:
self._config_updated()
return config
def _load_config(self, force):
"""Load the actual config."""
# Check for a cached version of the config
if not force and self._cache is not None:
config, last_update = self._cache
modtime = os.path.getmtime(self.path)
if config and last_update > modtime:
return False, config
is_updated = self._cache is not None
try:
config = load_yaml(self.path, Secrets(Path(self.hass.config.config_dir)))
except FileNotFoundError:
raise ConfigNotFound from None
self._cache = (config, time.time())
return is_updated, config
def _config_info(mode, config):
"""Generate info about the config."""
return {
"mode": mode,
"views": len(config.get("views", [])),
}
class DashboardsCollection(collection.StorageCollection):
"""Collection of dashboards."""
CREATE_SCHEMA = vol.Schema(STORAGE_DASHBOARD_CREATE_FIELDS)
UPDATE_SCHEMA = vol.Schema(STORAGE_DASHBOARD_UPDATE_FIELDS)
def __init__(self, hass):
"""Initialize the dashboards collection."""
super().__init__(
storage.Store(hass, DASHBOARDS_STORAGE_VERSION, DASHBOARDS_STORAGE_KEY),
_LOGGER,
)
async def _async_load_data(self) -> Optional[dict]:
"""Load the data."""
data = await self.store.async_load()
if data is None:
return cast(Optional[dict], data)
updated = False
for item in data["items"] or []:
if "-" not in item[CONF_URL_PATH]:
updated = True
item[CONF_URL_PATH] = f"lovelace-{item[CONF_URL_PATH]}"
if updated:
await self.store.async_save(data)
return cast(Optional[dict], data)
async def _process_create_data(self, data: dict) -> dict:
"""Validate the config is valid."""
if "-" not in data[CONF_URL_PATH]:
raise vol.Invalid("Url path needs to contain a hyphen (-)")
if data[CONF_URL_PATH] in self.hass.data[DATA_PANELS]:
raise vol.Invalid("Panel url path needs to be unique")
return self.CREATE_SCHEMA(data)
@callback
def _get_suggested_id(self, info: dict) -> str:
"""Suggest an ID based on the config."""
return info[CONF_URL_PATH]
async def _update_data(self, data: dict, update_data: dict) -> dict:
"""Return a new updated data object."""
update_data = self.UPDATE_SCHEMA(update_data)
updated = {**data, **update_data}
if CONF_ICON in updated and updated[CONF_ICON] is None:
updated.pop(CONF_ICON)
return updated
|
{
"content_hash": "572d4fdd528c6c861ac210ba219c263b",
"timestamp": "",
"source": "github",
"line_count": 276,
"max_line_length": 85,
"avg_line_length": 29.152173913043477,
"alnum_prop": 0.6005468555804127,
"repo_name": "partofthething/home-assistant",
"id": "c6f4726724bcb6a1d025329f66898fd4e5d67e01",
"size": "8046",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/lovelace/dashboard.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "31051838"
},
{
"name": "Shell",
"bytes": "4832"
}
],
"symlink_target": ""
}
|
"""Run Performance Test Bisect Tool
This script is used by a trybot to run the src/tools/bisect-perf-regression.py
script with the parameters specified in run-bisect-perf-regression.cfg. It will
check out a copy of the depot in a subdirectory 'bisect' of the working
directory provided, and run the bisect-perf-regression.py script there.
"""
import imp
import optparse
import os
import subprocess
import sys
def LoadConfigFile(path_to_file):
"""Attempts to load the file 'run-bisect-perf-regression.cfg' as a module
and grab the global config dict.
Args:
path_to_file: Path to the run-bisect-perf-regression.cfg file.
Returns:
The config dict which should be formatted as follows:
{'command': string, 'good_revision': string, 'bad_revision': string
'metric': string}.
Returns None on failure.
"""
try:
local_vars = {}
execfile(os.path.join(path_to_file, 'run-bisect-perf-regression.cfg'),
local_vars)
return local_vars['config']
except:
return None
def RunBisectionScript(config, working_directory, path_to_file):
"""Attempts to execute src/tools/bisect-perf-regression.py with the parameters
passed in.
Args:
config: A dict containing the parameters to pass to the script.
working_directory: A working directory to provide to the
bisect-perf-regression.py script, where it will store it's own copy of
the depot.
path_to_file: Path to the bisect-perf-regression.py script.
Returns:
0 on success, otherwise 1.
"""
cmd = ['python', os.path.join(path_to_file, 'bisect-perf-regression.py'),
'-c', config['command'],
'-g', config['good_revision'],
'-b', config['bad_revision'],
'-m', config['metric'],
'--working_directory', working_directory,
'--output_buildbot_annotations']
return_code = subprocess.call(cmd)
if return_code:
print 'Error: bisect-perf-regression.py returned with error %d' %\
return_code
print
return return_code
def main():
usage = ('%prog [options] [-- chromium-options]\n'
'Used by a trybot to run the bisection script using the parameters'
' provided in the run-bisect-perf-regression.cfg file.')
parser = optparse.OptionParser(usage=usage)
parser.add_option('-w', '--working_directory',
type='str',
help='A working directory to supply to the bisection '
'script, which will use it as the location to checkout '
'a copy of the chromium depot.')
(opts, args) = parser.parse_args()
if not opts.working_directory:
print 'Error: missing required parameter: --working_directory'
print
parser.print_help()
return 1
path_to_file = os.path.abspath(os.path.dirname(sys.argv[0]))
config = LoadConfigFile(path_to_file)
if not config:
print 'Error: Could not load config file.'
print
return 1
return RunBisectionScript(config, opts.working_directory, path_to_file)
if __name__ == '__main__':
sys.exit(main())
|
{
"content_hash": "996b5355006eafcda7740645e016ac12",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 80,
"avg_line_length": 29.257142857142856,
"alnum_prop": 0.6656901041666666,
"repo_name": "zcbenz/cefode-chromium",
"id": "c43dde970817895c2fcdbfffc1afe913f606536b",
"size": "3261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/run-bisect-perf-regression.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "1174304"
},
{
"name": "Awk",
"bytes": "9519"
},
{
"name": "C",
"bytes": "76026099"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "157904700"
},
{
"name": "DOT",
"bytes": "1559"
},
{
"name": "F#",
"bytes": "381"
},
{
"name": "Java",
"bytes": "3225038"
},
{
"name": "JavaScript",
"bytes": "18180217"
},
{
"name": "Logos",
"bytes": "4517"
},
{
"name": "Matlab",
"bytes": "5234"
},
{
"name": "Objective-C",
"bytes": "7139426"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "932901"
},
{
"name": "Python",
"bytes": "8654916"
},
{
"name": "R",
"bytes": "262"
},
{
"name": "Ragel in Ruby Host",
"bytes": "3621"
},
{
"name": "Shell",
"bytes": "1533012"
},
{
"name": "Tcl",
"bytes": "277077"
},
{
"name": "XML",
"bytes": "13493"
}
],
"symlink_target": ""
}
|
from django import template
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import tables
LOG = logging.getLogger(__name__)
def render_versions(plugin):
template_name = 'plugins/_versions_list.html'
context = {"plugin": plugin}
return template.loader.render_to_string(template_name, context)
class PluginsTable(tables.DataTable):
title = tables.Column("title",
verbose_name=_("Name"),
link=("horizon:savanna:plugins:details"))
versions = tables.Column(render_versions,
verbose_name=_("Supported Hadoop Versions"))
description = tables.Column("description",
verbose_name=_("Plugin Description"))
class Meta:
name = "plugins"
verbose_name = _("Plugins")
table_actions = ()
row_actions = ()
|
{
"content_hash": "391ae1fdee1bb828f042a7049fac0324",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 73,
"avg_line_length": 27.515151515151516,
"alnum_prop": 0.6057268722466961,
"repo_name": "denismakogon/savanna-dashboard",
"id": "9c0b00c0d5ecf22c657d1566494d078af4ee4ab2",
"size": "1535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "savannadashboard/plugins/tables.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from django import forms
from journeyman.projects.models import Project
class RepositoryForm(forms.Form):
name = forms.CharField(
help_text='What\'s the name of your awesome project?')
repository = forms.CharField(
help_text='Please enter a valid repository url \
(e.g. git+git://github.com/stephrdev/loetwerk.git)')
class BuildProcessForm(forms.Form):
build_steps = forms.CharField(initial='python setup.py install',
widget=forms.Textarea(attrs={'rows':3, 'cols':40}),
help_text='Let\'s start off with the easy stuff, please type in all \
the commands needed to install your package')
test_steps = forms.CharField(initial='python setup.py test',
widget=forms.Textarea(attrs={'rows':3, 'cols':40}),
help_text='Now tell us how to run your tests. If you should have \
many different test suites, just add another line.')
dependencies = forms.CharField(required=False, initial='dependencies.txt',
widget=forms.Textarea(attrs={'rows':3, 'cols':40}),
help_text='Please enter a list of pip requirement files that you \
have used to specify your dependencies')
test_xmls = forms.CharField(required=False,
widget=forms.Textarea(attrs={'rows':3, 'cols':40}),
help_text='Please enter a newline separated list of paths of \
unit test result xmls.')
class JourneyConfigOutputForm(forms.Form):
pass
class JourneyConfigFileForm(forms.Form):
config_file = forms.CharField(initial="journey.conf/config",
help_text="If you leave this field blank, we will store the config \
locally.", required=False)
class ProjectForm(forms.ModelForm):
class Meta:
model = Project
exclude = ['active',]
|
{
"content_hash": "e6be89c05c10eeb1e375351a1c0ddc3c",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 78,
"avg_line_length": 44.075,
"alnum_prop": 0.6812251843448667,
"repo_name": "stephrdev/loetwerk",
"id": "409bd9769b54f11f3f9d7a90b883bcb38d91bacd",
"size": "1763",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "journeyman/projects/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1137"
},
{
"name": "Python",
"bytes": "77495"
},
{
"name": "Shell",
"bytes": "47"
}
],
"symlink_target": ""
}
|
julia_whitespace_factory = util.BuildFactory()
julia_whitespace_factory.useProgress = True
julia_whitespace_factory.addSteps([
# Fetch first (allowing failure if no existing clone is present)
steps.ShellCommand(
name="git fetch",
command=["git", "fetch", "--tags", "--all", "--force"],
flunkOnFailure=False
),
# Clone julia
steps.Git(
name="Julia checkout",
repourl=util.Property('repository', default='git://github.com/JuliaLang/julia.git'),
mode='full',
method='fresh',
submodules=True,
clobberOnFailure=True,
progress=True,
retryFetch=True,
getDescription={'--tags': True},
),
# Check whitespace
steps.ShellCommand(
name="make check-whitespace",
command=["sh", "-c", util.Interpolate("%(prop:make_cmd)s check-whitespace")],
haltOnFailure = True,
# Fail out if 60 minutes have gone by with nothing printed to stdout
timeout=60*60,
# Kill everything if the overall job has taken more than 2 hours
maxTime=60*60*2,
# Give the process 10 seconds to print out the current backtraces when being killed
sigtermTime=10,
),
])
c['schedulers'].append(schedulers.AnyBranchScheduler(
name="Julia check whitespace",
change_filter=util.ChangeFilter(filter_fn=julia_ci_filter),
builderNames=["whitespace_linux32"],
treeStableTimer=1,
))
# Add workers for these jobs
c['builders'].append(util.BuilderConfig(
name="whitespace_linux32",
workernames=builder_mapping["linux32"],
collapseRequests=False,
tags=["Packaging"],
factory=julia_whitespace_factory,
))
# Add a scheduler for building release candidates/triggering builds manually
c['schedulers'].append(schedulers.ForceScheduler(
name="whitespace",
label="Force whitespace",
builderNames=["whitespace_linux32"],
reason=util.FixedParameter(name="reason", default=""),
codebases=[
util.CodebaseParameter(
"",
name="",
branch=util.FixedParameter(name="branch", default=""),
repository=util.FixedParameter(name="repository", default=""),
project=util.FixedParameter(name="project", default="Packaging"),
)
],
properties=[
util.StringParameter(
name="extra_make_flags",
label="Extra Make Flags",
size=30,
default="",
),
],
))
|
{
"content_hash": "1f31e7dcf6dc2e8ff16322bb2a741f6b",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 92,
"avg_line_length": 32.18181818181818,
"alnum_prop": 0.6307506053268765,
"repo_name": "staticfloat/julia-buildbot",
"id": "c69e3a81f18401f7ce7ae8d90e09c7f2a73549f6",
"size": "2478",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "master/whitespace.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Julia",
"bytes": "3406"
},
{
"name": "Makefile",
"bytes": "167"
},
{
"name": "Python",
"bytes": "73098"
},
{
"name": "Shell",
"bytes": "8154"
}
],
"symlink_target": ""
}
|
"""Run all infrastructure-related tests."""
import os
import subprocess
import sys
INFRA_BOTS_DIR = os.path.dirname(os.path.realpath(__file__))
SKIA_DIR = os.path.abspath(os.path.join(INFRA_BOTS_DIR, os.pardir, os.pardir))
def test(cmd, cwd):
try:
subprocess.check_output(cmd, cwd=cwd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
return e.output
def python_unit_tests(train):
if train:
return None
return test(
['python', '-m', 'unittest', 'discover', '-s', '.', '-p', '*_test.py'],
INFRA_BOTS_DIR)
def recipe_test(train):
cmd = [
'python', os.path.join(INFRA_BOTS_DIR, 'recipes.py'), 'test']
if train:
cmd.append('train')
else:
cmd.append('run')
return test(cmd, SKIA_DIR)
def gen_tasks_test(train):
cmd = ['go', 'run', 'gen_tasks.go']
if not train:
cmd.append('--test')
try:
output = test(cmd, INFRA_BOTS_DIR)
except OSError:
return ('Failed to run "%s"; do you have Go installed on your machine?'
% ' '.join(cmd))
if output:
if ('cannot find package "go.skia.org/infra' in output or
'gen_tasks.go:' in output):
return ('Failed to run gen_tests.go:\n\n%s\nMaybe you need to run:\n\n'
'$ go get -u go.skia.org/infra/...' % output)
return output
def main():
train = False
if '--train' in sys.argv:
train = True
tests = (
python_unit_tests,
recipe_test,
gen_tasks_test,
)
errs = []
for t in tests:
err = t(train)
if err:
errs.append(err)
if len(errs) > 0:
print >> sys.stderr, 'Test failures:\n'
for err in errs:
print >> sys.stderr, '=============================='
print >> sys.stderr, err
print >> sys.stderr, '=============================='
sys.exit(1)
if train:
print 'Trained tests successfully.'
else:
print 'All tests passed!'
if __name__ == '__main__':
main()
|
{
"content_hash": "2af299b342aa12dc2dde34cc38f6b58a",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 78,
"avg_line_length": 22.430232558139537,
"alnum_prop": 0.5775012960082945,
"repo_name": "Hikari-no-Tenshi/android_external_skia",
"id": "10546ed7223813aa65262358417e784cb2cd981c",
"size": "2088",
"binary": false,
"copies": "7",
"ref": "refs/heads/10.0",
"path": "infra/bots/infra_tests.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "12375"
},
{
"name": "Batchfile",
"bytes": "1155"
},
{
"name": "C",
"bytes": "860408"
},
{
"name": "C++",
"bytes": "24242899"
},
{
"name": "CSS",
"bytes": "11147"
},
{
"name": "Go",
"bytes": "29067"
},
{
"name": "HTML",
"bytes": "932599"
},
{
"name": "Java",
"bytes": "24340"
},
{
"name": "JavaScript",
"bytes": "377437"
},
{
"name": "Makefile",
"bytes": "67776"
},
{
"name": "Objective-C",
"bytes": "23795"
},
{
"name": "Objective-C++",
"bytes": "111148"
},
{
"name": "Python",
"bytes": "499622"
},
{
"name": "Shell",
"bytes": "63350"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0009_auto_20170206_0059'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='multiple_org',
field=models.BooleanField(default=False),
),
]
|
{
"content_hash": "09642b8225c4b0b053e9d657657dcf1f",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 53,
"avg_line_length": 21.555555555555557,
"alnum_prop": 0.6005154639175257,
"repo_name": "awemulya/fieldsight-kobocat",
"id": "e19b04897f6e3ab2f576ca086c8a6bededd6c5f1",
"size": "412",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "onadata/apps/users/migrations/0010_userprofile_multiple_org.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "70153"
},
{
"name": "Dockerfile",
"bytes": "2462"
},
{
"name": "HTML",
"bytes": "1488442"
},
{
"name": "JavaScript",
"bytes": "674757"
},
{
"name": "Makefile",
"bytes": "2286"
},
{
"name": "Python",
"bytes": "5340355"
},
{
"name": "Shell",
"bytes": "16493"
}
],
"symlink_target": ""
}
|
import jinja2
import pytest
def test_list_templates(site):
assert site.aips[62].env.list_templates() == ['en', 'generic']
assert site.aips[38].env.list_templates() == ['generic']
def test_get_template(site):
assert isinstance(
site.aips[62].env.get_template('generic'),
jinja2.Template,
)
with pytest.raises(jinja2.TemplateNotFound):
site.aips[62].env.get_template('bogus')
def test_template_auto_blocks(site):
generic = site.aips[62].env.get_template('generic')
assert tuple(generic.blocks.keys()) == (
'guidance',
'bp_myriel',
'interface_definitions',
'reading_a_book',
'further_reading',
)
|
{
"content_hash": "382f39cc7815c198658bf6e1e3fb95ea",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 66,
"avg_line_length": 25.74074074074074,
"alnum_prop": 0.6258992805755396,
"repo_name": "aip-dev/site-generator",
"id": "92be77cc7c77ecf9637ad1fef8a4327db45cda70",
"size": "1270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_jinja_loaders.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "25089"
},
{
"name": "Jinja",
"bytes": "24694"
},
{
"name": "Python",
"bytes": "76927"
},
{
"name": "SCSS",
"bytes": "22086"
}
],
"symlink_target": ""
}
|
from panda3d.core import *
from panda3d.direct import *
from direct.showbase import DirectObject
from direct.fsm import ClassicFSM, State
from toontown.toonbase import ToontownGlobals
from toontown.coghq import CountryClubRoomSpecs
from direct.directnotify import DirectNotifyGlobal
import random
class CountryClubRoom(DirectObject.DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('CountryClubRoom')
FloorCollPrefix = 'mintFloorColl'
CashbotMintDoorFrame = 'phase_10/models/cashbotHQ/DoorFrame'
def __init__(self, path = None):
if path is not None:
if path in CountryClubRoomSpecs.BossbotCountryClubConnectorRooms:
loadFunc = loader.loadModelCopy
else:
loadFunc = loader.loadModel
self.setGeom(loadFunc(path))
self.localToonFSM = ClassicFSM.ClassicFSM('CountryClubRoomLocalToonPresent', [State.State('off', self.enterLtOff, self.exitLtOff, ['notPresent']), State.State('notPresent', self.enterLtNotPresent, self.exitLtNotPresent, ['present']), State.State('present', self.enterLtPresent, self.exitLtPresent, ['notPresent'])], 'notPresent', 'notPresent')
self.localToonFSM.enterInitialState()
return
def delete(self):
del self.localToonFSM
def enter(self):
self.localToonFSM.request('notPresent')
def exit(self):
self.localToonFSM.requestFinalState()
def setRoomNum(self, num):
self.roomNum = num
def getRoomNum(self):
return self.roomNum
def setGeom(self, geom):
if geom == None:
import pdb
pdb.set_trace()
self.__geom = geom
return
def getGeom(self):
return self.__geom
def _getEntrances(self):
return self.__geom.findAllMatches('**/ENTRANCE*')
def _getExits(self):
return self.__geom.findAllMatches('**/EXIT*')
def attachTo(self, other, rng):
otherExits = other._getExits()
entrances = self._getEntrances()
otherDoor = otherExits[0]
thisDoor = rng.choice(entrances)
geom = self.getGeom()
otherGeom = other.getGeom()
self.notify.debug('thisDoor = %s' % thisDoor)
self.notify.debug('otherDoor = %s' % otherDoor)
self.notify.debug('thisGeom = %s' % geom)
self.notify.debug('otherGeom = %s' % otherGeom)
debugAxis1 = None
if debugAxis1:
debugAxis1.reparentTo(thisDoor)
debugAxis2 = None
if debugAxis2:
debugAxis2.reparentTo(otherDoor)
debugAxis2.setColorScale(0.5, 0.5, 0.5, 1)
tempNode = otherDoor.attachNewNode('tempRotNode')
geom.reparentTo(tempNode)
geom.clearMat()
newGeomPos = Vec3(0) - thisDoor.getPos(geom)
self.notify.debug('newGeomPos = %s' % newGeomPos)
geom.setPos(newGeomPos)
newTempNodeH = -thisDoor.getH(otherDoor)
self.notify.debug('newTempNodeH =%s' % newTempNodeH)
tempNode.setH(newTempNodeH)
geom.wrtReparentTo(otherGeom.getParent())
tempNode.removeNode()
return
def getFloorCollName(self):
return '%s%s' % (CountryClubRoom.FloorCollPrefix, self.roomNum)
def initFloorCollisions(self):
allColls = self.getGeom().findAllMatches('**/+CollisionNode')
floorColls = []
for coll in allColls:
bitmask = coll.node().getIntoCollideMask()
if not (bitmask & ToontownGlobals.FloorBitmask).isZero():
floorColls.append(coll)
if len(floorColls) > 0:
floorCollName = self.getFloorCollName()
others = self.getGeom().findAllMatches('**/%s' % floorCollName)
for other in others:
other.setName('%s_renamed' % floorCollName)
for floorColl in floorColls:
floorColl.setName(floorCollName)
def enterLtOff(self):
pass
def exitLtOff(self):
pass
def enterLtNotPresent(self):
pass
def exitLtNotPresent(self):
pass
def enterLtPresent(self):
pass
def exitLtPresent(self):
pass
|
{
"content_hash": "55ecc7d883773f6e78f92df84307c927",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 351,
"avg_line_length": 33.524193548387096,
"alnum_prop": 0.6382006254510464,
"repo_name": "silly-wacky-3-town-toon/SOURCE-COD",
"id": "973af7d3c06cfcf8dd14bfee50a8357d4ffdfbf9",
"size": "4157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toontown/coghq/CountryClubRoom.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "10249"
},
{
"name": "C",
"bytes": "1752256"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "5485400"
},
{
"name": "Emacs Lisp",
"bytes": "210083"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Makefile",
"bytes": "895"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "NSIS",
"bytes": "1009050"
},
{
"name": "Objective-C",
"bytes": "21821"
},
{
"name": "PLSQL",
"bytes": "10200"
},
{
"name": "Pascal",
"bytes": "4986"
},
{
"name": "Perl6",
"bytes": "30612"
},
{
"name": "Puppet",
"bytes": "259"
},
{
"name": "Python",
"bytes": "33566014"
},
{
"name": "Shell",
"bytes": "14642"
},
{
"name": "Tcl",
"bytes": "2084458"
}
],
"symlink_target": ""
}
|
import os,math
import Tkinter as Tk
from tiasgatfuncs import *
def doit(fn,dp,mt):
""" JHT: this is the module that analyses an ALFOSC pinhole spectrum
with a Vertical Grism, and recommends an alignment offset to the
current GRISM WHEEL stepper motor units."""
## First check if data file exists
if not os.access(dp+fn+".fits",os.F_OK):
messageOut(mt,"File not found: "+dp+fn+".fits\n")
return "File not found: "+dp+fn+".fits"
messageOut(mt,"\nVertical-Grism analysis of file: "+dp+fn+".fits\n")
from pyraf import iraf
from pyraf import gwm
## Read current grism wheel position from image FITS header
iraf.images.imutil.imgets(dp+fn,"GRISM")
grismid=iraf.images.imutil.imgets.value
if grismid.find("Grism")==-1:
iraf.images.imutil.imgets(dp+fn,"FILTER") # maybe in filter wheel?
grismid=iraf.images.imutil.imgets.value
if grismid.find("Grism")==-1:
messageOut(mt,"Vertical-Grism mode: no vertical grism in wheel\n")
return "File %s: Vertical-Grism mode: no vertical grism in wheel" % fn
## Read current aperture wheel position from image FITS header
iraf.images.imutil.imgets(dp+fn,"APERTUR")
slitid=iraf.images.imutil.imgets.value
if slitid.find("inho")==-1:
messageOut(mt,"Vertical-Grism mode: no pinhole in aperture wheel\n")
return "File %s: Vertical-Grism mode: no pinhole in aperture wheel" % fn
iraf.noao(_doprint=0)
iraf.noao.imred(_doprint=0)
iraf.noao.imred.specred(_doprint=0)
if not os.access("/tmp/tiasgat/",os.F_OK):
os.mkdir("/tmp/tiasgat/")
os.chmod("/tmp/tiasgat/",0777)
if os.access("/tmp/tiasgat/plot",os.F_OK): os.remove("/tmp/tiasgat/plot")
if os.access("/tmp/tiasgat/plot2",os.F_OK): os.remove("/tmp/tiasgat/plot2")
if os.access("/tmp/tiasgat/aplast",os.F_OK): os.remove("/tmp/tiasgat/aplast")
if os.access("/tmp/tiasgat/tvmarks",os.F_OK): os.remove("/tmp/tiasgat/tvmarks")
if os.access("/tmp/tiasgat/logfile",os.F_OK): os.remove("/tmp/tiasgat/logfile")
## Note that this will *not* update any uparm files !! (see pyraf documentation)
iraf.noao.imred.specred.dispaxis=2
iraf.noao.imred.specred.database="/tmp/tiasgat/"
iraf.noao.imred.specred.plotfile="/tmp/tiasgat/plot"
iraf.noao.imred.specred.logfile="/tmp/tiasgat/logfile"
iraf.noao.imred.specred.apedit.width=15
#iraf.lpar(iraf.noao.imred.specred.aptrace)
## Display image on ds9
iraf.set(stdimage="imt512")
iraf.display(dp+fn,1,fill="no",Stdout="/dev/null")
# Suppress IRAF query for number of apertures to find
# This is only necesary for the widest slits: then the call to
# apfind results in an empty database file, as it cannot find an aperture.
# But aptrace works fine anyway (except for the annoying query) !?
iraf.noao.imred.specred.apfind.setParam('nfind.p_value', 1)
iraf.noao.imred.specred.apfind.setParam('nfind.p_mode','h')
## 'find' and trace spectrum; this will dump a plot to /tmp/tiasgat/plot
lines = iraf.noao.imred.specred.apfind(dp+fn,nfind=1,interactive="no", Stdout=1)
for i in range (0,len(lines)): messageOut(mt,lines[i]+"\n")
# To properly fit grism #3 i need low-rej=2.1 and niter=8
lines = iraf.noao.imred.specred.aptrace(dp+fn,interactive="no",step=5,low_reject=2.1,
high_reject=2.1,function="leg",order=2,niterate=8,naverage=1, Stdout=1)
for i in range (0,len(lines)): messageOut(mt,lines[i]+"\n")
## Start graphics window; select the correct plot; show plot
gwm.window("Tiasgat! graphics")
iraf.plot.gkiextract("/tmp/tiasgat/plot",2,Stdout="/tmp/tiasgat/plot2")
gwm.getActiveGraphicsWindow().load("/tmp/tiasgat/plot2")
### how to read the aperture file, as output by aptrace ####
###
### center line 6 gives zero point
### max,min lines 24-25 n = (2 * x - (max + min)) / (max - min)
### c1,c2 lines 26-27
###
### The polynomial can be expressed as the sum
###
### poly = sum from i=1 to order {c_i * z_i}
###
### where the the c_i are the coefficients and the z_i are defined
### interatively as:
###
### z_1 = 1
### z_2 = n
### z_i = ((2*i-3) * n * z_{i-1} - (i-2) * z_{i-2}) / (i - 1)
###
### So for order=2 and for vertical slit/grism: X=center+c1+c2*n
### X=center + c1 + c2*(2 * Y - (max + min)) / (max - min)
### translated to X=a + bY
### a=center + c1 - c2*(max+min)/(max-min)
### b=2*C2/(max-min)
## Read the aperture definition file
apfile=open("/tmp/tiasgat/ap"+dp.replace('/','_')+fn,'r')
lines=apfile.readlines()
apfile.close()
#print lines[5], lines[23:]
c0 = float(lines[5].split(None,9)[1].strip())
lower = float(lines[23].strip())
upper = float(lines[24].strip())
c1 = float(lines[25].strip())
c2 = float(lines[26].strip())
a = c0 + c1 - c2*(upper+lower)/(upper-lower)
b = 2*c2/(upper-lower)
#print "zeropoint ", a, " slope ",b
## Remove aperture definition file
if os.access("/tmp/tiasgat/ap"+dp.replace('/','_')+fn,os.F_OK):
os.remove("/tmp/tiasgat/ap"+dp.replace('/','_')+fn)
## Mark the fit on the image display
if os.access("/tmp/tiasgat/tvmarks",os.F_OK): os.remove("/tmp/tiasgat/tvmarks")
tvmarkfile=open("/tmp/tiasgat/tvmarks",'w')
for i in range(int(lower),int(upper)+1,3):
tvmarkfile.write(str(a+b*i)+" "+str(i)+" 100 s \n")
tvmarkfile.close()
iraf.tv.tvmark(1,"",commands="/tmp/tiasgat/tvmarks",interactive="no")
## Read current grism wheel position from image FITS header
iraf.images.imutil.imgets(dp+fn,"ALGRSTP")
oldwheelunits=float(iraf.images.imutil.imgets.value)
#print "GRISMSTE ", oldwheelunits
## Read binning FITS headers
iraf.images.imutil.imgets(dp+fn,"CDELT1")
xbin=float(iraf.images.imutil.imgets.value)
iraf.images.imutil.imgets(dp+fn,"CDELT2")
ybin=float(iraf.images.imutil.imgets.value)
messageOut(mt,"\nBinning factors "+str(int(xbin))+" x "+str(int(ybin))+"\n")
## Correct the angle for the binning factors.
## A full wheel turn corresponds to 320000 units
offsetangle=-320000 * math.atan(b*xbin/ybin) / (2*math.pi)
messageOut(mt,"Offset to motor units "+str(offsetangle)+"\n")
newwheelunits=offsetangle + oldwheelunits
if newwheelunits < 0: newwheelunits+=320000
return "Result for %s : current GRISM wheel units %d, suggested new value %d" % \
(fn, (0.5+oldwheelunits), (0.5+newwheelunits))
|
{
"content_hash": "637d76da9dbddc8ff4bc407334148b2b",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 94,
"avg_line_length": 38.25443786982248,
"alnum_prop": 0.6536736272235112,
"repo_name": "sniemi/SamPy",
"id": "c6f7ef04035cf05bb8e3e49103a54116d00cab72",
"size": "6613",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sandbox/src1/VG.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "296"
},
{
"name": "C",
"bytes": "68436"
},
{
"name": "C++",
"bytes": "45956"
},
{
"name": "CSS",
"bytes": "35570"
},
{
"name": "Fortran",
"bytes": "45191"
},
{
"name": "HTML",
"bytes": "107435"
},
{
"name": "IDL",
"bytes": "13651"
},
{
"name": "JavaScript",
"bytes": "25435"
},
{
"name": "Makefile",
"bytes": "26035"
},
{
"name": "Matlab",
"bytes": "1508"
},
{
"name": "Perl",
"bytes": "59198"
},
{
"name": "PostScript",
"bytes": "1403536"
},
{
"name": "Prolog",
"bytes": "16061"
},
{
"name": "Python",
"bytes": "5763358"
},
{
"name": "R",
"bytes": "208346"
},
{
"name": "Rebol",
"bytes": "161"
},
{
"name": "Roff",
"bytes": "73616"
},
{
"name": "Ruby",
"bytes": "2032"
},
{
"name": "Shell",
"bytes": "41512"
},
{
"name": "Tcl",
"bytes": "44150"
},
{
"name": "TeX",
"bytes": "107783"
}
],
"symlink_target": ""
}
|
"""Logger classes and a few convenience methods."""
import os
import sys
import platform
import logging
import tempfile
import shutil
import subprocess
import warnings
def is_str(string):
"""
Python 2 and 3 compatible string checker.
Args:
string (str | basestring): the string to check
Returns:
bool: True or False
"""
if sys.version_info[:2] >= (3, 0):
return isinstance(string, str)
return isinstance(string, basestring)
def find_xml_generator(name="castxml"):
"""
Try to find a c++ parser (xml generator)
Args:
name (str): name of the c++ parser (e.g. castxml)
Returns:
path (str), name (str): path to the xml generator and it's name
If no c++ parser is found the function raises an exception.
pygccxml does currently only support castxml as c++ parser.
"""
if sys.version_info[:2] >= (3, 3):
path = _find_xml_generator_for_python_greater_equals_33(name)
else:
path = _find_xml_generator_for_legacy_python(name)
if path == "" or path is None:
raise Exception("No c++ parser found. Please install castxml.")
return path.rstrip(), name
def _find_xml_generator_for_python_greater_equals_33(name):
return shutil.which(name)
def _find_xml_generator_for_legacy_python(name):
if platform.system() == "Windows":
command = "where"
else:
command = "which"
p = subprocess.Popen([command, name], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
path = p.stdout.read().decode("utf-8")
p.wait()
p.stdout.close()
p.stderr.close()
return path.rstrip()
def _create_logger_(name):
"""Implementation detail, creates a logger."""
logger = logging.getLogger(name)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(levelname)s %(message)s'))
logger.addHandler(handler)
logger.setLevel(logging.INFO)
return logger
class loggers(object):
"""Class-namespace, defines a few loggers classes, used in the project."""
cxx_parser = _create_logger_('pygccxml.cxx_parser')
"""
Logger for C++ parser functionality
If you set this logger level to DEBUG, you will be able to see the exact
command line, used to invoke GCC-XML and errors that occures during XML
parsing
"""
pdb_reader = _create_logger_('pygccxml.pdb_reader')
"""
Logger for MS .pdb file reader functionality
"""
queries_engine = _create_logger_('pygccxml.queries_engine')
"""
Logger for query engine functionality.
If you set this logger level to DEBUG, you will be able to see what queries
you do against declarations tree, measure performance and may be even to
improve it.
Query engine reports queries and whether they are optimized or not.
"""
declarations_cache = _create_logger_('pygccxml.declarations_cache')
"""
Logger for declarations tree cache functionality
If you set this logger level to DEBUG, you will be able to see what is
exactly happens, when you read the declarations from cache file. You will
be able to decide, whether it worse for you to use this or that cache
strategy.
"""
root = logging.getLogger('pygccxml')
"""
Root logger exists for your convenience only.
"""
all_loggers = [
root, cxx_parser, queries_engine, declarations_cache, pdb_reader]
"""
Contains all logger classes, defined by the class.
"""
@staticmethod
def set_level(level):
"""Set the same logging level for all the loggers at once."""
for logger in loggers.all_loggers:
logger.setLevel(level)
def remove_file_no_raise(file_name, config):
"""Removes file from disk if exception is raised."""
# The removal can be disabled by the config for debugging purposes.
if config.keep_xml:
return True
try:
if os.path.exists(file_name):
os.remove(file_name)
except IOError as error:
loggers.root.error(
"Error occurred while removing temporary created file('%s'): %s",
file_name, str(error))
# pylint: disable=W0622
def create_temp_file_name(suffix, prefix=None, dir=None, directory=None):
"""
Small convenience function that creates temporary files.
This function is a wrapper around the Python built-in
function tempfile.mkstemp.
"""
if dir is not None:
warnings.warn(
"The dir argument is deprecated.\n" +
"Please use the directory argument instead.", DeprecationWarning)
# Deprecated since 1.9.0, will be removed in 2.0.0
directory = dir
if not prefix:
prefix = tempfile.gettempprefix()
fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=directory)
file_obj = os.fdopen(fd)
file_obj.close()
return name
def normalize_path(some_path):
"""Return os.path.normpath(os.path.normcase(some_path))."""
return os.path.normpath(os.path.normcase(some_path))
def contains_parent_dir(fpath, dirs):
"""
Returns true if paths in dirs start with fpath.
Precondition: dirs and fpath should be normalized before calling
this function.
"""
# Note: this function is used nowhere in pygccxml but is used
# at least by pypluplus; so it should stay here.
return bool([x for x in dirs if _f(fpath, x)])
def _f(fpath, dir_):
"""Helper function for contains_parent_dir function."""
return fpath.startswith(dir_)
def get_architecture():
"""
Returns computer architecture: 32 or 64.
The guess is based on maxint.
"""
if sys.maxsize == 2147483647:
return 32
elif sys.maxsize == 9223372036854775807:
return 64
else:
raise RuntimeError("Unknown architecture")
class cached(property):
"""Convert a method into a cached attribute."""
# The following code is cut-and-paste from this post:
# http://groups.google.com/group/comp.lang.python/browse_thread/
# thread/5b71896c06bd0f76/
# Thanks to Michele Simionato
def __init__(self, method):
private = '_' + method.__name__
def fget(s):
try:
return getattr(s, private)
except AttributeError:
value = method(s)
setattr(s, private, value)
return value
def fdel(s):
del s.__dict__[private]
super(cached, self).__init__(fget, fdel=fdel)
def reset(self):
cls = self.__class__
for name in dir(cls):
attr = getattr(cls, name)
if isinstance(attr, cached):
delattr(self, name)
def get_tr1(name):
"""In libstd++ the tr1 namespace needs special care.
Return either an empty string or tr1::, useful for
appending to search patterns.
Args:
name (str): the name of the declaration
Returns:
str: an empty string or "tr1::"
"""
tr1 = ""
if "tr1" in name:
tr1 = "tr1::"
return tr1
class cxx_standard(object):
"""Helper class for parsing the C++ standard version.
This class holds the C++ standard version the XML generator has been
configured with, and provides helpers functions for querying C++ standard
version related information.
"""
__STD_CXX = {
'-std=c++98': 199711,
'-std=gnu++98': 199711,
'-std=c++03': 199711,
'-std=gnu++03': 199711,
'-std=c++0x': 201103,
'-std=gnu++0x': 201103,
'-std=c++11': 201103,
'-std=gnu++11': 201103,
'-std=c++1y': 201402,
'-std=gnu++1y': 201402,
'-std=c++14': 201402,
'-std=gnu++14': 201402,
'-std=c++1z': float('inf'),
'-std=gnu++1z': float('inf'),
}
def __init__(self, cflags):
"""Class constructor that parses the XML generator's command line
Args:
cflags (str): cflags command line arguments passed to the XML
generator
"""
super(cxx_standard, self).__init__()
self._stdcxx = None
self._is_implicit = False
for key in cxx_standard.__STD_CXX:
if key in cflags:
self._stdcxx = key
self._cplusplus = cxx_standard.__STD_CXX[key]
if not self._stdcxx:
if '-std=' in cflags:
raise RuntimeError('Unknown -std=c++xx flag used')
# Assume c++03 by default
self._stdcxx = '-std=c++03'
self._cplusplus = cxx_standard.__STD_CXX['-std=c++03']
self._is_implicit = True
@property
def stdcxx(self):
"""Returns the -std=c++xx option passed to the constructor"""
return self._stdcxx
@property
def is_implicit(self):
"""Indicates whether a -std=c++xx was specified"""
return self._is_implicit
@property
def is_cxx03(self):
"""Returns true if -std=c++03 is being used"""
return self._cplusplus == cxx_standard.__STD_CXX['-std=c++03']
@property
def is_cxx11(self):
"""Returns true if -std=c++11 is being used"""
return self._cplusplus == cxx_standard.__STD_CXX['-std=c++11']
@property
def is_cxx11_or_greater(self):
"""Returns true if -std=c++11 or a newer standard is being used"""
return self._cplusplus >= cxx_standard.__STD_CXX['-std=c++11']
@property
def is_cxx14(self):
"""Returns true if -std=c++14 is being used"""
return self._cplusplus == cxx_standard.__STD_CXX['-std=c++14']
@property
def is_cxx14_or_greater(self):
"""Returns true if -std=c++14 or a newer standard is being used"""
return self._cplusplus >= cxx_standard.__STD_CXX['-std=c++14']
@property
def is_cxx1z(self):
"""Returns true if -std=c++1z is being used"""
return self._cplusplus == cxx_standard.__STD_CXX['-std=c++1z']
class DeprecationWrapper(object):
"""
A small wrapper class useful when deprecation classes.
This class is not part of the public API.
"""
def __init__(self, new_target, old_name, new_name, version):
self.new_target = new_target
self.old_name = old_name
self.new_name = new_name
self.version = version
def _warn(self):
warnings.warn(
self.old_name + " is deprecated. Please use " + self.new_name +
" instead. This will be removed in version " + self.version,
DeprecationWarning)
def __call__(self, *args, **kwargs):
self._warn()
return self.new_target(*args, **kwargs)
def __getattr__(self, attr):
self._warn()
return getattr(self.new_target, attr)
|
{
"content_hash": "e9c9b5ba00b4fe171bdb89bc56b4c09d",
"timestamp": "",
"source": "github",
"line_count": 388,
"max_line_length": 79,
"avg_line_length": 27.804123711340207,
"alnum_prop": 0.6081757508342603,
"repo_name": "stnava/ITK",
"id": "cf6279fd350b8dab7a550cb8cc684ef02ef81b06",
"size": "10983",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Modules/ThirdParty/pygccxml/src/pygccxml/utils/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "306"
},
{
"name": "C",
"bytes": "33796739"
},
{
"name": "C++",
"bytes": "47670177"
},
{
"name": "CMake",
"bytes": "2446263"
},
{
"name": "CSS",
"bytes": "24960"
},
{
"name": "DIGITAL Command Language",
"bytes": "709"
},
{
"name": "Fortran",
"bytes": "2260380"
},
{
"name": "HTML",
"bytes": "208515"
},
{
"name": "Io",
"bytes": "1833"
},
{
"name": "Java",
"bytes": "28585"
},
{
"name": "JavaScript",
"bytes": "1522"
},
{
"name": "Lex",
"bytes": "7073"
},
{
"name": "Makefile",
"bytes": "292947"
},
{
"name": "Objective-C",
"bytes": "59064"
},
{
"name": "Objective-C++",
"bytes": "5773"
},
{
"name": "OpenEdge ABL",
"bytes": "85244"
},
{
"name": "Perl",
"bytes": "18085"
},
{
"name": "Python",
"bytes": "886833"
},
{
"name": "Ruby",
"bytes": "296"
},
{
"name": "Shell",
"bytes": "172972"
},
{
"name": "Tcl",
"bytes": "74786"
},
{
"name": "WebAssembly",
"bytes": "4056"
},
{
"name": "XSLT",
"bytes": "206927"
},
{
"name": "Yacc",
"bytes": "21035"
}
],
"symlink_target": ""
}
|
def ensure_dir(dir_path, overwrite=False):
from shutil import rmtree
from os.path import isdir, exists
from os import makedirs
if exists(dir_path):
if not isdir(dir_path):
raise ValueError("%s is a file..." % dir_path)
if overwrite:
rmtree(dir_path)
if not exists(dir_path):
makedirs(dir_path)
def detect_encoding(file_path):
from chardet.universaldetector import UniversalDetector
u = UniversalDetector()
with open(file_path, 'rb') as f:
for line in f:
u.feed(line)
u.close()
result = u.result
if result['encoding']:
return result['encoding']
return None
def numberfy(s):
n = s
try:
n = float(n)
return n
except Exception:
return s
def intify(s):
n = s
try:
n = int(n)
return n
except Exception:
return s
def read_df(file_path, delimiter='\t'):
# maybe use sniffer to unify this and csv reader... would be better
import io
enc = detect_encoding(file_path)
f = io.open(file_path, encoding=enc)
lines = [line.split(delimiter) for line in f]
f.close()
lines = [line for line in lines if len(line) != 0]
length = len(lines[0])
if not all([len(line) == length for line in lines]):
raise ValueError("tdf error, not all rows had the same length in %s" % file_path)
fieldnames = [c.encode('utf-8').strip() for c in lines[0]]
bad_chars = "".join([c for f in fieldnames for c in f if not c.isalnum()])
fieldnames = [f.strip(bad_chars) for f in fieldnames]
dict_lines = []
for line in lines[1:]:
d = {}
for i in xrange(length):
d[fieldnames[i]] = numberfy(line[i].encode('utf-8').strip())
dict_lines.append(d)
return dict_lines
def read_tdf(file_path):
return read_df(file_path, delimiter='\t')
def read_csv(file_path):
return read_df(file_path, delimiter=',')
def round_to(x, base=2.5):
return base * round(float(x)/base)
|
{
"content_hash": "5aa6ef24cb2d52cbc24af56359c8ba3e",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 89,
"avg_line_length": 29.085714285714285,
"alnum_prop": 0.5987229862475442,
"repo_name": "beOn/varys",
"id": "ae987f82ebac59c40dc8f85cf5cac35f8b7cdb5e",
"size": "2036",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "varys/util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "44535"
}
],
"symlink_target": ""
}
|
import sys
sys.path.insert(1, "../../")
import h2o, tests
def test_locate():
iris_path = h2o.locate("smalldata/iris/iris.csv")
try:
h2o.locate("smalldata/iris/afilethatdoesnotexist.csv")
assert False, "Expected h2o.locate to raise a ValueError"
except ValueError:
assert True
if __name__ == "__main__":
tests.run_test(sys.argv, test_locate)
|
{
"content_hash": "f2ec2fc94737b5d917b8954795a1117e",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 65,
"avg_line_length": 24.0625,
"alnum_prop": 0.6415584415584416,
"repo_name": "printedheart/h2o-3",
"id": "640849e72db15c21a6fcd5c1a76f48479537f2c0",
"size": "385",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "h2o-py/tests/testdir_misc/pyunit_locate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5090"
},
{
"name": "CSS",
"bytes": "163561"
},
{
"name": "CoffeeScript",
"bytes": "262107"
},
{
"name": "Emacs Lisp",
"bytes": "8927"
},
{
"name": "Groovy",
"bytes": "78"
},
{
"name": "HTML",
"bytes": "147257"
},
{
"name": "Java",
"bytes": "5417378"
},
{
"name": "JavaScript",
"bytes": "38932"
},
{
"name": "Makefile",
"bytes": "34005"
},
{
"name": "Python",
"bytes": "2098211"
},
{
"name": "R",
"bytes": "1831996"
},
{
"name": "Rebol",
"bytes": "7059"
},
{
"name": "Ruby",
"bytes": "3506"
},
{
"name": "Scala",
"bytes": "16336"
},
{
"name": "Shell",
"bytes": "47017"
},
{
"name": "TeX",
"bytes": "588475"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
import pytest
from pants.backend.docker.target_types import DockerImageTarget
from pants.backend.docker.util_rules.docker_build_args import docker_build_args
from pants.backend.docker.util_rules.docker_build_env import (
DockerBuildEnvironment,
DockerBuildEnvironmentRequest,
rules,
)
from pants.engine.addresses import Address
from pants.testutil.rule_runner import QueryRule, RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
docker_build_args,
*rules(),
QueryRule(DockerBuildEnvironment, [DockerBuildEnvironmentRequest]),
],
)
@pytest.mark.parametrize(
"env_vars, build_args, expected_env_vars",
[
(
(),
(),
{},
),
(
("ENV1",),
(),
{"ENV1": "val1"},
),
(
("ENV1=over1",),
(),
{"ENV1": "over1"},
),
(
(),
("ENV1=defined",),
{},
),
(
(),
("ENV1",),
{"ENV1": "val1"},
),
(
("ENV1=over1",),
("ENV1",),
{"ENV1": "over1"},
),
],
)
def test_docker_build_environment_vars_rule(
rule_runner: RuleRunner,
env_vars: tuple[str, ...],
build_args: tuple[str, ...],
expected_env_vars: dict[str, str],
) -> None:
tgt = DockerImageTarget({"extra_build_args": build_args}, address=Address("test"))
rule_runner.set_options(
[f"--docker-env-vars={env_var}" for env_var in env_vars],
env={
"ENV1": "val1",
"ENV2": "val2",
},
)
res = rule_runner.request(DockerBuildEnvironment, [DockerBuildEnvironmentRequest(tgt)])
assert res == DockerBuildEnvironment.create(expected_env_vars)
|
{
"content_hash": "bf2bbbb6e90d8fd1a66cf2cd79aa4dab",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 91,
"avg_line_length": 24.90909090909091,
"alnum_prop": 0.5234619395203337,
"repo_name": "benjyw/pants",
"id": "988b57f633930465cd71869a4d36b4d76f128e74",
"size": "2050",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "src/python/pants/backend/docker/util_rules/docker_build_env_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "688"
},
{
"name": "Go",
"bytes": "67315"
},
{
"name": "Java",
"bytes": "10690"
},
{
"name": "Kotlin",
"bytes": "6433"
},
{
"name": "Mustache",
"bytes": "3595"
},
{
"name": "Python",
"bytes": "7135320"
},
{
"name": "Rust",
"bytes": "1601736"
},
{
"name": "Scala",
"bytes": "21950"
},
{
"name": "Shell",
"bytes": "31723"
},
{
"name": "Starlark",
"bytes": "72809"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python3
#
# Copyright (c) 2017 Intel Corporation.
#
# SPDX-License-Identifier: Apache-2.0
#
"""
This script scans a specified object file and generates a header file
that defined macros for the offsets of various found structure members
(particularly symbols ending with ``_OFFSET`` or ``_SIZEOF``), primarily
intended for use in assembly code.
"""
from elftools.elf.elffile import ELFFile
from elftools.elf.sections import SymbolTableSection
import argparse
import sys
def get_symbol_table(obj):
for section in obj.iter_sections():
if isinstance(section, SymbolTableSection):
return section
raise LookupError("Could not find symbol table")
def gen_offset_header(input_name, input_file, output_file):
include_guard = "__GEN_OFFSETS_H__"
output_file.write("""/* THIS FILE IS AUTO GENERATED. PLEASE DO NOT EDIT.
*
* This header file provides macros for the offsets of various structure
* members. These offset macros are primarily intended to be used in
* assembly code.
*/
#ifndef %s
#define %s\n\n""" % (include_guard, include_guard))
obj = ELFFile(input_file)
for sym in get_symbol_table(obj).iter_symbols():
if isinstance(sym.name, bytes):
sym.name = str(sym.name, 'ascii')
if not sym.name.endswith(('_OFFSET', '_SIZEOF')):
continue
if sym.entry['st_shndx'] != 'SHN_ABS':
continue
if sym.entry['st_info']['bind'] != 'STB_GLOBAL':
continue
output_file.write(
"#define %s 0x%x\n" %
(sym.name, sym.entry['st_value']))
output_file.write("\n#endif /* %s */\n" % include_guard)
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"-i",
"--input",
required=True,
help="Input object file")
parser.add_argument(
"-o",
"--output",
required=True,
help="Output header file")
args = parser.parse_args()
input_file = open(args.input, 'rb')
output_file = open(args.output, 'w')
ret = gen_offset_header(args.input, input_file, output_file)
sys.exit(ret)
|
{
"content_hash": "a0f4df52913d0cf597f8f8c131add049",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 77,
"avg_line_length": 27.035714285714285,
"alnum_prop": 0.6318802289740203,
"repo_name": "Vudentz/zephyr",
"id": "11cb7227f78d02ec9a7393a34ded4d4fadd87f64",
"size": "2271",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "scripts/gen_offset_header.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "354867"
},
{
"name": "Batchfile",
"bytes": "110"
},
{
"name": "C",
"bytes": "23782049"
},
{
"name": "C++",
"bytes": "365645"
},
{
"name": "CMake",
"bytes": "574287"
},
{
"name": "EmberScript",
"bytes": "808"
},
{
"name": "HTML",
"bytes": "1631"
},
{
"name": "Haskell",
"bytes": "542"
},
{
"name": "Makefile",
"bytes": "3313"
},
{
"name": "Objective-C",
"bytes": "19541"
},
{
"name": "Perl",
"bytes": "198126"
},
{
"name": "Python",
"bytes": "1384284"
},
{
"name": "Shell",
"bytes": "75091"
},
{
"name": "SmPL",
"bytes": "19760"
},
{
"name": "Tcl",
"bytes": "3349"
},
{
"name": "VBA",
"bytes": "607"
},
{
"name": "Verilog",
"bytes": "6394"
}
],
"symlink_target": ""
}
|
import sys
try:
from setuptools import Extension, setup
except ImportError:
from distutils.core import Extension, setup
NAME = 'pysendfile'
VERSION = '2.0.1'
if sys.version_info < (2, 5):
sys.exit('python version not supported (< 2.5)')
if 'sunos' in sys.platform:
libraries = ["sendfile"]
else:
libraries = []
def main():
setup(name=NAME,
url='https://github.com/giampaolo/pysendfile',
version=VERSION,
description='A Python interface to sendfile(2)',
long_description=open('README.rst', 'r').read(),
author='Giampaolo Rodola',
author_email='g.rodola@gmail.com',
platforms='UNIX',
license='MIT',
keywords=['sendfile', 'python', 'performance', 'ftp'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: BSD',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: POSIX :: SunOS/Solaris',
'Operating System :: POSIX :: AIX',
'Programming Language :: C',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: System :: Networking',
'Topic :: System :: Operating System',
'Topic :: Internet :: File Transfer Protocol (FTP)',
'Topic :: Internet :: WWW/HTTP',
'License :: OSI Approved :: MIT License',
],
ext_modules=[Extension('sendfile',
sources=['sendfilemodule.c'],
libraries=libraries)])
if __name__ == '__main__':
main()
|
{
"content_hash": "0700eb19b9bb312e6de586a13d437027",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 66,
"avg_line_length": 37.23529411764706,
"alnum_prop": 0.5181674565560821,
"repo_name": "giampaolo/pysendfile",
"id": "54a991a385bd9da3a9f26780efab2ed38b49007b",
"size": "3789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "10585"
},
{
"name": "Makefile",
"bytes": "1396"
},
{
"name": "Python",
"bytes": "29492"
},
{
"name": "Shell",
"bytes": "1532"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.