text
stringlengths 4
1.02M
| meta
dict |
|---|---|
'''
MAP Client Plugin Step
'''
import os
from PySide import QtGui
from PySide import QtCore
from mapclient.mountpoints.workflowstep import WorkflowStepMountPoint
from mapclientplugins.loadstlstep.configuredialog import ConfigureDialog
from gias.common import stlreader
import numpy as np
class LoadSTLStep(WorkflowStepMountPoint):
'''
Skeleton step which is intended to be a helpful starting point
for new steps.
'''
def __init__(self, location):
super(LoadSTLStep, self).__init__('Load STL', location)
self._configured = False # A step cannot be executed until it has been configured.
self._category = 'Input'
# Add any other initialisation code here:
# Ports:
self.addPort(('http://physiomeproject.org/workflow/1.0/rdf-schema#port',
'http://physiomeproject.org/workflow/1.0/rdf-schema#uses',
'python#string'))
self.addPort(('http://physiomeproject.org/workflow/1.0/rdf-schema#port',
'http://physiomeproject.org/workflow/1.0/rdf-schema#provides',
'http://physiomeproject.org/workflow/1.0/rdf-schema#pointcloud'))
self.addPort(('http://physiomeproject.org/workflow/1.0/rdf-schema#port',
'http://physiomeproject.org/workflow/1.0/rdf-schema#provides',
'numpy#array3d'))
self._config = {}
self._config['identifier'] = ''
self._config['Filename'] = '.stl'
self._filename = None
self._V = None
self._T = None
def execute(self):
'''
Add your code here that will kick off the execution of the step.
Make sure you call the _doneExecution() method when finished. This method
may be connected up to a button in a widget for example.
'''
# Put your execute step code here before calling the '_doneExecution' method.
if self._filename == None:
filename = self._config['Filename']
else:
filename = self._filename
S = stlreader.STL()
S.setFilename(filename)
S.load()
self._V = S.points
self._T = S.triangles
self._doneExecution()
def setPortData(self, index, dataIn):
if index==0:
self._filename = dataIn
def getPortData(self, index):
'''
Add your code here that will return the appropriate objects for this step.
The index is the index of the port in the port list. If there is only one
provides port for this step then the index can be ignored.
'''
if index == 1:
return self._V
elif index == 2:
return self._T
def configure(self):
'''
This function will be called when the configure icon on the step is
clicked. It is appropriate to display a configuration dialog at this
time. If the conditions for the configuration of this step are complete
then set:
self._configured = True
'''
dlg = ConfigureDialog()
dlg.identifierOccursCount = self._identifierOccursCount
dlg.setConfig(self._config)
dlg.validate()
dlg.setModal(True)
if dlg.exec_():
self._config = dlg.getConfig()
self._configured = dlg.validate()
self._configuredObserver()
def getIdentifier(self):
'''
The identifier is a string that must be unique within a workflow.
'''
return self._config['identifier']
def setIdentifier(self, identifier):
'''
The framework will set the identifier for this step when it is loaded.
'''
self._config['identifier'] = identifier
def serialize(self, location):
'''
Add code to serialize this step to disk. The filename should
use the step identifier (received from getIdentifier()) to keep it
unique within the workflow. The suggested name for the file on
disk is:
filename = getIdentifier() + '.conf'
'''
configuration_file = os.path.join(location, self.getIdentifier() + '.conf')
conf = QtCore.QSettings(configuration_file, QtCore.QSettings.IniFormat)
conf.beginGroup('config')
conf.setValue('identifier', self._config['identifier'])
conf.setValue('identifier', self._config['identifier'])
conf.setValue('Filename', self._config['Filename'])
conf.endGroup()
def deserialize(self, location):
'''
Add code to deserialize this step from disk. As with the serialize
method the filename should use the step identifier. Obviously the
filename used here should be the same as the one used by the
serialize method.
'''
configuration_file = os.path.join(location, self.getIdentifier() + '.conf')
conf = QtCore.QSettings(configuration_file, QtCore.QSettings.IniFormat)
conf.beginGroup('config')
self._config['identifier'] = conf.value('identifier', '')
self._config['identifier'] = conf.value('identifier', '')
self._config['Filename'] = conf.value('Filename', '.stl')
conf.endGroup()
d = ConfigureDialog()
d.identifierOccursCount = self._identifierOccursCount
d.setConfig(self._config)
self._configured = d.validate()
|
{
"content_hash": "ff015f7e4d9247d1f6292e712f740cb9",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 90,
"avg_line_length": 36.641891891891895,
"alnum_prop": 0.6125760649087221,
"repo_name": "MusculoskeletalAtlasProject/mapclient-tests",
"id": "7c4a3b96bd58a73fad8e2c1dfe3adb6ede3ca0d7",
"size": "5424",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "test_resources/updater_test/loadstlstep-master/mapclientplugins/loadstlstep/step.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "924193"
}
],
"symlink_target": ""
}
|
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from frame_sounds_ui import Ui_FrameSoundsForm
class EditorFrameSounds(QDialog, Ui_FrameSoundsForm):
def __init__(self, parent, title=u'创建音乐'):
super(EditorFrameSounds, self).__init__(parent)
self.setupUi(self)
self.setWindowTitle(title)
self.btnCreate.clicked.connect(self.on_framesound_creating)
def show(self):
self.exec_()
def on_framesound_creating(self):
frame_index = self.frame_index_.value()
frame_sound = self.frame_sound_.text()
if frame_sound == '':
QMessageBox.warning(self, u'创建音乐', '请填写音乐')
return
print('------------- EditorFrameSounds.on_framesound_creating', frame_index, frame_sound)
# TODO:
self.close()
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
win = QWidget()
win.show()
e = EditorFrameSounds(win)
e.show()
sys.exit(app.exec_())
|
{
"content_hash": "954bbcb32acc4cf6324038e21648c4f6",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 97,
"avg_line_length": 25.75,
"alnum_prop": 0.6106796116504855,
"repo_name": "ASMlover/study",
"id": "d7da5e471fe8ab87e2724a9a0d722156542b258d",
"size": "1105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "game/editor/frame_sounds.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "3055440"
},
{
"name": "Batchfile",
"bytes": "4662"
},
{
"name": "Brainfuck",
"bytes": "571"
},
{
"name": "C",
"bytes": "13569580"
},
{
"name": "C#",
"bytes": "3959"
},
{
"name": "C++",
"bytes": "14741264"
},
{
"name": "CMake",
"bytes": "543917"
},
{
"name": "CSS",
"bytes": "11505"
},
{
"name": "Common Lisp",
"bytes": "114"
},
{
"name": "Emacs Lisp",
"bytes": "6042"
},
{
"name": "Go",
"bytes": "105203"
},
{
"name": "Groovy",
"bytes": "2907"
},
{
"name": "HTML",
"bytes": "911945"
},
{
"name": "Lex",
"bytes": "9370"
},
{
"name": "Lua",
"bytes": "32829"
},
{
"name": "Makefile",
"bytes": "1000611"
},
{
"name": "NASL",
"bytes": "3609"
},
{
"name": "NewLisp",
"bytes": "5805"
},
{
"name": "Perl",
"bytes": "594"
},
{
"name": "Python",
"bytes": "2752752"
},
{
"name": "SWIG",
"bytes": "91"
},
{
"name": "Shell",
"bytes": "9993"
},
{
"name": "Vim script",
"bytes": "92204"
},
{
"name": "Yacc",
"bytes": "6278"
}
],
"symlink_target": ""
}
|
from newt.views import JSONRestView
from newt.views import AuthJSONRestView
from common.response import json_response
from django.shortcuts import render
from django.conf import settings
import logging
logger = logging.getLogger("newt." + __name__)
from importlib import import_module
acct_adapter = import_module(settings.NEWT_CONFIG['ADAPTERS']['ACCOUNT']['adapter'])
# /api/account/user/<user_name>/
# /api/account/user/id/<uid>/
class UserInfoView(AuthJSONRestView):
def get(self, request, user_name=None, uid=None):
logger.debug("Entering %s:%s" % (self.__class__.__name__, __name__))
return acct_adapter.get_user_info(user_name=user_name, uid=uid)
# /api/account/group/<group_name>/
# /api/account/group/<gid>/
class GroupInfoView(AuthJSONRestView):
def get(self, request, group_name=None, gid=None):
return acct_adapter.get_group_info(group_name=group_name, gid=gid)
# /api/account/<query>/
class ExtraAcctView(JSONRestView):
def get(self, request, query):
return acct_adapter.extras_router(request, query)
|
{
"content_hash": "212bc2260d80afad0399cc98e3ced698",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 84,
"avg_line_length": 36.62068965517241,
"alnum_prop": 0.7250470809792844,
"repo_name": "NERSC/newt-2.0",
"id": "8025b50d3cf99e1361b96f64ac52655e35722ce2",
"size": "1062",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "account/views.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "167357"
},
{
"name": "Shell",
"bytes": "1117"
}
],
"symlink_target": ""
}
|
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class UpdateFirewall(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the UpdateFirewall Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(UpdateFirewall, self).__init__(temboo_session, '/Library/Google/ComputeEngine/Firewalls/UpdateFirewall')
def new_input_set(self):
return UpdateFirewallInputSet()
def _make_result_set(self, result, path):
return UpdateFirewallResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return UpdateFirewallChoreographyExecution(session, exec_id, path)
class UpdateFirewallInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the UpdateFirewall
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_FirewallResource(self, value):
"""
Set the value of the FirewallResource input for this Choreo. ((required, json) A JSON string containing the firewall resource fields to set. Required properties include: allowed, name, network, and either sourceRanges or sourceTags.)
"""
super(UpdateFirewallInputSet, self)._set_input('FirewallResource', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((optional, string) A valid access token retrieved during the OAuth process. This is required unless you provide the ClientID, ClientSecret, and RefreshToken to generate a new access token.)
"""
super(UpdateFirewallInputSet, self)._set_input('AccessToken', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((conditional, string) The Client ID provided by Google. Required unless providing a valid AccessToken.)
"""
super(UpdateFirewallInputSet, self)._set_input('ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((conditional, string) The Client Secret provided by Google. Required unless providing a valid AccessToken.)
"""
super(UpdateFirewallInputSet, self)._set_input('ClientSecret', value)
def set_Firewall(self, value):
"""
Set the value of the Firewall input for this Choreo. ((required, string) The name of the firewall to update.)
"""
super(UpdateFirewallInputSet, self)._set_input('Firewall', value)
def set_Project(self, value):
"""
Set the value of the Project input for this Choreo. ((required, string) The ID of a Google Compute project.)
"""
super(UpdateFirewallInputSet, self)._set_input('Project', value)
def set_RefreshToken(self, value):
"""
Set the value of the RefreshToken input for this Choreo. ((conditional, string) An OAuth refresh token used to generate a new access token when the original token is expired. Required unless providing a valid AccessToken.)
"""
super(UpdateFirewallInputSet, self)._set_input('RefreshToken', value)
class UpdateFirewallResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the UpdateFirewall Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Google.)
"""
return self._output.get('Response', None)
def get_NewAccessToken(self):
"""
Retrieve the value for the "NewAccessToken" output from this Choreo execution. ((string) Contains a new AccessToken when the RefreshToken is provided.)
"""
return self._output.get('NewAccessToken', None)
class UpdateFirewallChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return UpdateFirewallResultSet(response, path)
|
{
"content_hash": "475bfafdb9bbc7bde1babdb9f2706f23",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 254,
"avg_line_length": 48.02197802197802,
"alnum_prop": 0.7013729977116705,
"repo_name": "jordanemedlock/psychtruths",
"id": "fdf147008bacf351f069c59e5361badcf6d31f27",
"size": "5228",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "temboo/Library/Google/ComputeEngine/Firewalls/UpdateFirewall.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18544"
},
{
"name": "HTML",
"bytes": "34650"
},
{
"name": "JavaScript",
"bytes": "423"
},
{
"name": "PHP",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "23444578"
}
],
"symlink_target": ""
}
|
import argparse
import boto
import boto.s3
from boto.s3.connection import Location
from boto_cli import configure_logging
from boto_cli.s3 import class_iterator
from boto_cli.s3 import RegionMap
import logging
log = logging.getLogger('boto_cli')
from pprint import pprint
# configure command line argument parsing
parser = argparse.ArgumentParser(description='Create a S3 bucket in all/some available S3 regions')
parser.add_argument("bucket", help="A bucket name (will get region suffix)")
parser.add_argument("-r", "--region", help="A region substring selector (e.g. 'us-west')")
parser.add_argument("--access_key_id", dest='aws_access_key_id', help="Your AWS Access Key ID")
parser.add_argument("--secret_access_key", dest='aws_secret_access_key', help="Your AWS Secret Access Key")
parser.add_argument("-l", "--log", dest='log_level', default='WARNING',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
help="The logging level to use. [default: WARNING]")
args = parser.parse_args()
configure_logging(log, args.log_level)
def isSelected(region):
return True if RegionMap[region].find(args.region) != -1 else False
# execute business logic
credentials = {'aws_access_key_id': args.aws_access_key_id, 'aws_secret_access_key': args.aws_secret_access_key}
heading = "Creating S3 buckets named '" + args.bucket + "'"
locations = class_iterator(Location)
if args.region:
heading += " (filtered by region '" + args.region + "')"
locations = filter(isSelected, locations)
s3 = boto.connect_s3(**credentials)
print heading + ":"
for location in locations:
region = RegionMap[location]
pprint(region, indent=2)
try:
bucket_name = args.bucket + '-' + region
print 'Creating bucket ' + bucket_name
s3.create_bucket(bucket_name, location=getattr(Location, location))
except boto.exception.BotoServerError, e:
log.error(e.error_message)
|
{
"content_hash": "d623aba018802ceb8e313b1c8079a57d",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 112,
"avg_line_length": 41.340425531914896,
"alnum_prop": 0.7040658775090067,
"repo_name": "cityindex-attic/ec2-clock-accuracy-research",
"id": "1f7f27821ffefa2f32ada1d0546cde559eb37958",
"size": "1961",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "infrastructure/scripts/create-buckets.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "90852"
},
{
"name": "Ruby",
"bytes": "12902"
},
{
"name": "Shell",
"bytes": "154"
}
],
"symlink_target": ""
}
|
'''
Created on Mar 4, 2012
@author: scottporter
'''
from resource.resourcebase import ResourceType, ResourceBase
from resource.resourcehandler import ResourceHandlerBase
from render.texture import Texture
from render.texturemanager import TextureManager
class ImageResourceHandler(ResourceHandlerBase):
def __init__(self):
ResourceHandlerBase.__init__(self)
self._resource_type = ResourceType.IMAGE
self._filetypes = ["jpg","png"]
def process_resource(self, filename, name):
# new_texture = None
# Does the Texture already exist in the Texture Manager
new_texture = TextureManager.get_instance().get_texture_by_filename(filename)
# for texture in TextureManager.get_instance().textures:
# if texture.filename == filename:
# new_texture = texture
# break
# If the texture isn't in the texture manager
if new_texture is None:
# Load the image
new_texture = Texture(filename=filename, name=name)
# Add it to the TextureManager
TextureManager.get_instance().add_texture(new_texture)
# Create a Resource object that also contains the Texture object within the data property
# new_resource = Texture(filename=filename)
# Return the new Resource
return new_texture
def remove_resource(self, resource):
# Remove it from the TextureManager
TextureManager.get_instance().remove_texture(resource)
|
{
"content_hash": "fbb85e6de4edbc89dcf8ffc52aac931a",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 101,
"avg_line_length": 33.10204081632653,
"alnum_prop": 0.6282367447595562,
"repo_name": "freneticmonkey/epsilonc",
"id": "82c66e4c01eee49b9d05a3638d8834d6786390ed",
"size": "1622",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resources/scripts/resource/imageresourcehandler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "21612"
},
{
"name": "C++",
"bytes": "532243"
},
{
"name": "JavaScript",
"bytes": "28903"
},
{
"name": "Objective-C",
"bytes": "452"
},
{
"name": "Python",
"bytes": "72624"
},
{
"name": "Shell",
"bytes": "94"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
It returns content of dockerfile and therefore displays it in results.
"""
from dockerfile_parse import DockerfileParser
from atomic_reactor.plugin import PreBuildPlugin
class CpDockerfilePlugin(PreBuildPlugin):
key = "dockerfile_content"
def __init__(self, tasker, workflow):
"""
constructor
:param tasker: DockerTasker instance
:param workflow: DockerBuildWorkflow instance
:return:
"""
# call parent constructor
super(CpDockerfilePlugin, self).__init__(tasker, workflow)
def run(self):
"""
try open dockerfile, output an error if there is one
"""
try:
return DockerfileParser(self.workflow.builder.df_path).content
except (IOError, OSError) as ex:
return "Couldn't retrieve dockerfile: %r" % ex
|
{
"content_hash": "2dcb2816ffe4edcc195e9df6b6f3979d",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 74,
"avg_line_length": 28.305555555555557,
"alnum_prop": 0.6673209028459274,
"repo_name": "jpopelka/atomic-reactor",
"id": "0c38ab5a5798ad68920952bc4b5ca17ae7214ade",
"size": "1019",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "atomic_reactor/plugins/pre_return_dockerfile.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "570871"
},
{
"name": "Shell",
"bytes": "3589"
}
],
"symlink_target": ""
}
|
from keras.utils import np_utils
import numpy as np
import h5py
class HDF5DatasetGenerator:
def __init__(self, dbPath, batchSize, preprocessors=None,
aug=None, binarize=True, classes=2):
# store the batch size, preprocessors, and data augmentor,
# whether or not the labels should be binarized, along with
# the total number of classes
self.batchSize = batchSize
self.preprocessors = preprocessors
self.aug = aug
self.binarize = binarize
self.classes = classes
# open the HDF5 database for reading and determine the total
# number of entries in the database
self.db = h5py.File(dbPath)
self.numImages = self.db["labels"].shape[0]
def generator(self, passes=np.inf):
# initialize the epoch count
epochs = 0
# keep looping infinitely -- the model will stop once we have
# reach the desired number of epochs
while epochs < passes:
# loop over the HDF5 dataset
for i in np.arange(0, self.numImages, self.batchSize):
# extract the images and labels from the HDF dataset
images = self.db["images"][i: i + self.batchSize]
labels = self.db["labels"][i: i + self.batchSize]
# check to see if the labels should be binarized
if self.binarize:
labels = np_utils.to_categorical(labels,
self.classes)
# check to see if our preprocessors are not None
if self.preprocessors is not None:
# initialize the list of processed images
procImages = []
# loop over the images
for image in images:
# loop over the preprocessors and apply each
# to the image
for p in self.preprocessors:
image = p.preprocess(image)
# update the list of processed images
procImages.append(image)
# update the images array to be the processed
# images
images = np.array(procImages)
# if the data augmenator exists, apply it
if self.aug is not None:
(images, labels) = next(self.aug.flow(images,
labels, batch_size=self.batchSize))
# yield a tuple of images and labels
yield (images, labels)
# increment the total number of epochs
epochs += 1
def close(self):
# close the database
self.db.close()
|
{
"content_hash": "0827f87d3240bbf24fe4829348919728",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 63,
"avg_line_length": 29.958333333333332,
"alnum_prop": 0.6903106165971257,
"repo_name": "CyberLabs-BR/face_detect",
"id": "a8cc428f2aa6aa7286a4cbfee9402077ca6b8527",
"size": "2189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyimagesearch/io/hdf5datasetgenerator.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "141928"
},
{
"name": "Shell",
"bytes": "109"
}
],
"symlink_target": ""
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class ColorBar(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatterternary.marker"
_path_str = "scatterternary.marker.colorbar"
_valid_props = {
"bgcolor",
"bordercolor",
"borderwidth",
"dtick",
"exponentformat",
"len",
"lenmode",
"nticks",
"outlinecolor",
"outlinewidth",
"separatethousands",
"showexponent",
"showticklabels",
"showtickprefix",
"showticksuffix",
"thickness",
"thicknessmode",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"titlefont",
"titleside",
"x",
"xanchor",
"xpad",
"y",
"yanchor",
"ypad",
}
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# borderwidth
# -----------
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
# dtick
# -----
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
# exponentformat
# --------------
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
# len
# ---
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
# lenmode
# -------
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
# nticks
# ------
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
# outlinecolor
# ------------
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
# outlinewidth
# ------------
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
# separatethousands
# -----------------
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
# showexponent
# ------------
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
# showticklabels
# --------------
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
# showtickprefix
# --------------
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
# showticksuffix
# --------------
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# thicknessmode
# -------------
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
# tick0
# -----
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
# tickangle
# ---------
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180. Numeric values outside this
range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
# tickcolor
# ---------
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
# tickfont
# --------
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterternary.marker.colorbar.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.scatterternary.marker.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
# tickformat
# ----------
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for dates
see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add one item
to d3's date formatter: "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
# tickformatstops
# ---------------
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.scatterternary.marker.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Supported dict properties:
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
Returns
-------
tuple[plotly.graph_objs.scatterternary.marker.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
# tickformatstopdefaults
# ----------------------
@property
def tickformatstopdefaults(self):
"""
When used in a template (as layout.template.data.scatterternary
.marker.colorbar.tickformatstopdefaults), sets the default
property values to use for elements of
scatterternary.marker.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterternary.marker.colorbar.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.scatterternary.marker.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
# ticklen
# -------
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
# tickmode
# --------
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
# tickprefix
# ----------
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
# ticks
# -----
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
# ticksuffix
# ----------
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
# ticktext
# --------
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
# ticktextsrc
# -----------
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for ticktext .
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
# tickvals
# --------
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
# tickvalssrc
# -----------
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for tickvals .
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
# tickwidth
# ---------
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
# title
# -----
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterternary.marker.colorbar.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Supported dict properties:
font
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
side
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
text
Sets the title of the color bar. Note that
before the existence of `title.text`, the
title's contents used to be defined as the
`title` attribute itself. This behavior has
been deprecated.
Returns
-------
plotly.graph_objs.scatterternary.marker.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
# titlefont
# ---------
@property
def titlefont(self):
"""
Deprecated: Please use
scatterternary.marker.colorbar.title.font instead. Sets this
color bar's title font. Note that the title's font used to be
set by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterternary.marker.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
"""
return self["titlefont"]
@titlefont.setter
def titlefont(self, val):
self["titlefont"] = val
# titleside
# ---------
@property
def titleside(self):
"""
Deprecated: Please use
scatterternary.marker.colorbar.title.side instead. Determines
the location of color bar's title with respect to the color
bar. Note that the title's location used to be set by the now
deprecated `titleside` attribute.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
"""
return self["titleside"]
@titleside.setter
def titleside(self, val):
self["titleside"] = val
# x
# -
@property
def x(self):
"""
Sets the x position of the color bar (in plot fraction).
The 'x' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xanchor
# -------
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar.
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
# xpad
# ----
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
# y
# -
@property
def y(self):
"""
Sets the y position of the color bar (in plot fraction).
The 'y' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# yanchor
# -------
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar.
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
# ypad
# ----
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add
one item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.scatterternary.
marker.colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.scatte
rternary.marker.colorbar.tickformatstopdefaults), sets
the default property values to use for elements of
scatterternary.marker.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.scatterternary.marker.colo
rbar.Title` instance or dict with compatible properties
titlefont
Deprecated: Please use
scatterternary.marker.colorbar.title.font instead. Sets
this color bar's title font. Note that the title's font
used to be set by the now deprecated `titlefont`
attribute.
titleside
Deprecated: Please use
scatterternary.marker.colorbar.title.side instead.
Determines the location of color bar's title with
respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
"""
_mapped_properties = {
"titlefont": ("title", "font"),
"titleside": ("title", "side"),
}
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
len=None,
lenmode=None,
nticks=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
titlefont=None,
titleside=None,
x=None,
xanchor=None,
xpad=None,
y=None,
yanchor=None,
ypad=None,
**kwargs
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatterternary
.marker.ColorBar`
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add
one item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.scatterternary.
marker.colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.scatte
rternary.marker.colorbar.tickformatstopdefaults), sets
the default property values to use for elements of
scatterternary.marker.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.scatterternary.marker.colo
rbar.Title` instance or dict with compatible properties
titlefont
Deprecated: Please use
scatterternary.marker.colorbar.title.font instead. Sets
this color bar's title font. Note that the title's font
used to be set by the now deprecated `titlefont`
attribute.
titleside
Deprecated: Please use
scatterternary.marker.colorbar.title.side instead.
Determines the location of color bar's title with
respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
ColorBar
"""
super(ColorBar, self).__init__("colorbar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatterternary.marker.ColorBar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterternary.marker.ColorBar`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("borderwidth", None)
_v = borderwidth if borderwidth is not None else _v
if _v is not None:
self["borderwidth"] = _v
_v = arg.pop("dtick", None)
_v = dtick if dtick is not None else _v
if _v is not None:
self["dtick"] = _v
_v = arg.pop("exponentformat", None)
_v = exponentformat if exponentformat is not None else _v
if _v is not None:
self["exponentformat"] = _v
_v = arg.pop("len", None)
_v = len if len is not None else _v
if _v is not None:
self["len"] = _v
_v = arg.pop("lenmode", None)
_v = lenmode if lenmode is not None else _v
if _v is not None:
self["lenmode"] = _v
_v = arg.pop("nticks", None)
_v = nticks if nticks is not None else _v
if _v is not None:
self["nticks"] = _v
_v = arg.pop("outlinecolor", None)
_v = outlinecolor if outlinecolor is not None else _v
if _v is not None:
self["outlinecolor"] = _v
_v = arg.pop("outlinewidth", None)
_v = outlinewidth if outlinewidth is not None else _v
if _v is not None:
self["outlinewidth"] = _v
_v = arg.pop("separatethousands", None)
_v = separatethousands if separatethousands is not None else _v
if _v is not None:
self["separatethousands"] = _v
_v = arg.pop("showexponent", None)
_v = showexponent if showexponent is not None else _v
if _v is not None:
self["showexponent"] = _v
_v = arg.pop("showticklabels", None)
_v = showticklabels if showticklabels is not None else _v
if _v is not None:
self["showticklabels"] = _v
_v = arg.pop("showtickprefix", None)
_v = showtickprefix if showtickprefix is not None else _v
if _v is not None:
self["showtickprefix"] = _v
_v = arg.pop("showticksuffix", None)
_v = showticksuffix if showticksuffix is not None else _v
if _v is not None:
self["showticksuffix"] = _v
_v = arg.pop("thickness", None)
_v = thickness if thickness is not None else _v
if _v is not None:
self["thickness"] = _v
_v = arg.pop("thicknessmode", None)
_v = thicknessmode if thicknessmode is not None else _v
if _v is not None:
self["thicknessmode"] = _v
_v = arg.pop("tick0", None)
_v = tick0 if tick0 is not None else _v
if _v is not None:
self["tick0"] = _v
_v = arg.pop("tickangle", None)
_v = tickangle if tickangle is not None else _v
if _v is not None:
self["tickangle"] = _v
_v = arg.pop("tickcolor", None)
_v = tickcolor if tickcolor is not None else _v
if _v is not None:
self["tickcolor"] = _v
_v = arg.pop("tickfont", None)
_v = tickfont if tickfont is not None else _v
if _v is not None:
self["tickfont"] = _v
_v = arg.pop("tickformat", None)
_v = tickformat if tickformat is not None else _v
if _v is not None:
self["tickformat"] = _v
_v = arg.pop("tickformatstops", None)
_v = tickformatstops if tickformatstops is not None else _v
if _v is not None:
self["tickformatstops"] = _v
_v = arg.pop("tickformatstopdefaults", None)
_v = tickformatstopdefaults if tickformatstopdefaults is not None else _v
if _v is not None:
self["tickformatstopdefaults"] = _v
_v = arg.pop("ticklen", None)
_v = ticklen if ticklen is not None else _v
if _v is not None:
self["ticklen"] = _v
_v = arg.pop("tickmode", None)
_v = tickmode if tickmode is not None else _v
if _v is not None:
self["tickmode"] = _v
_v = arg.pop("tickprefix", None)
_v = tickprefix if tickprefix is not None else _v
if _v is not None:
self["tickprefix"] = _v
_v = arg.pop("ticks", None)
_v = ticks if ticks is not None else _v
if _v is not None:
self["ticks"] = _v
_v = arg.pop("ticksuffix", None)
_v = ticksuffix if ticksuffix is not None else _v
if _v is not None:
self["ticksuffix"] = _v
_v = arg.pop("ticktext", None)
_v = ticktext if ticktext is not None else _v
if _v is not None:
self["ticktext"] = _v
_v = arg.pop("ticktextsrc", None)
_v = ticktextsrc if ticktextsrc is not None else _v
if _v is not None:
self["ticktextsrc"] = _v
_v = arg.pop("tickvals", None)
_v = tickvals if tickvals is not None else _v
if _v is not None:
self["tickvals"] = _v
_v = arg.pop("tickvalssrc", None)
_v = tickvalssrc if tickvalssrc is not None else _v
if _v is not None:
self["tickvalssrc"] = _v
_v = arg.pop("tickwidth", None)
_v = tickwidth if tickwidth is not None else _v
if _v is not None:
self["tickwidth"] = _v
_v = arg.pop("title", None)
_v = title if title is not None else _v
if _v is not None:
self["title"] = _v
_v = arg.pop("titlefont", None)
_v = titlefont if titlefont is not None else _v
if _v is not None:
self["titlefont"] = _v
_v = arg.pop("titleside", None)
_v = titleside if titleside is not None else _v
if _v is not None:
self["titleside"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("xanchor", None)
_v = xanchor if xanchor is not None else _v
if _v is not None:
self["xanchor"] = _v
_v = arg.pop("xpad", None)
_v = xpad if xpad is not None else _v
if _v is not None:
self["xpad"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("yanchor", None)
_v = yanchor if yanchor is not None else _v
if _v is not None:
self["yanchor"] = _v
_v = arg.pop("ypad", None)
_v = ypad if ypad is not None else _v
if _v is not None:
self["ypad"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
{
"content_hash": "2c7b6f44923111b4a26f654e4a3fb460",
"timestamp": "",
"source": "github",
"line_count": 1947,
"max_line_length": 107,
"avg_line_length": 35.898818695428865,
"alnum_prop": 0.5547464053222692,
"repo_name": "plotly/python-api",
"id": "f9bffa92fac528195ba02f2b1492139519f63540",
"size": "69895",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/graph_objs/scatterternary/marker/_colorbar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
from .client import RaspiWsClient
from .core import RaspiBaseMsg, RaspiAckMsg
__all__ = ['TVService', 'TVPower', 'TVStatus', 'TVGetModes', 'TVSetExplicit']
class TVPower(RaspiBaseMsg):
_handle = 'power_ctrl'
_properties = {'power'}
def __init__(self, **kwargs):
super(TVPower, self).__init__(**kwargs)
class TVStatus(RaspiBaseMsg):
_handle = 'get_status'
def __init__(self, **kwargs):
super(TVStatus, self).__init__(**kwargs)
class TVGetModes(RaspiBaseMsg):
_handle = 'get_modes'
_properties = {'preferred', 'group'}
def __init__(self, **kwargs):
kwargs.setdefault('group', "")
kwargs.setdefault('preferred', len(kwargs.get("group")) == 0)
super(TVGetModes, self).__init__(**kwargs)
class TVSetExplicit(RaspiBaseMsg):
_handle = 'set_explicit'
_properties = {'preferred', 'group', 'mode'}
def __init__(self, **kwargs):
kwargs.setdefault('mode', 0)
kwargs.setdefault('group', '')
kwargs.setdefault('preferred', len(kwargs.get("group")) == 0 and kwargs.get("mode") == 0)
super(TVSetExplicit, self).__init__(**kwargs)
class TVService(RaspiWsClient):
DMT = "DMT"
CEA = "CEA"
PATH = __name__.split(".")[-1]
def __init__(self, host, timeout=3, verbose=1):
"""Init a tv service instance
:param host: raspi-io server address
:param timeout: raspi-io timeout unit second
:param verbose: verbose message output
"""
super(TVService, self).__init__(host, self.PATH, timeout, verbose)
def get_status(self):
"""Get HDMI status
:return: HDMI status
"""
ret = self._transfer(TVStatus())
return ret.data if isinstance(ret, RaspiAckMsg) and ret.ack else None
def get_modes(self, group):
"""Get supported modes for GROUP (CEA, DMT)
:return:
"""
ret = self._transfer(TVGetModes(group=group))
return ret.data if isinstance(ret, RaspiAckMsg) and ret.ack else None
def get_preferred_mode(self):
"""Get HDMI preferred settings
:return: return HDMI preferred mode and group
"""
ret = self._transfer(TVGetModes())
return ret.data if isinstance(ret, RaspiAckMsg) and ret.ack else None
def set_preferred(self):
"""Power on HDMI with preferred settings
:return:
"""
ret = self._transfer(TVSetExplicit())
return ret.data if isinstance(ret, RaspiAckMsg) and ret.ack else False
def set_explicit(self, group, mode):
"""Power on HDMI with explicit group and mode
:param group: group (DMT or CEA)
:param mode: modes get's from get_modes()
:return: result
"""
ret = self._transfer(TVSetExplicit(group=group, mode=mode))
return ret.data if isinstance(ret, RaspiAckMsg) and ret.ack else False
def power_control(self, power):
"""Power on or off HDMI
:param power: power
:return: success return True, failed return False
"""
ret = self._transfer(TVPower(power=power))
return ret.data if isinstance(ret, RaspiAckMsg) and ret.ack else None
def power_off(self):
return self.power_control(False)
|
{
"content_hash": "f3848a56db0cfb664f38054b3d9ad7d6",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 97,
"avg_line_length": 30.185185185185187,
"alnum_prop": 0.6082822085889571,
"repo_name": "amaork/raspi-io",
"id": "7675499a9f37d3c690347bcb0b3b2663c23286aa",
"size": "3284",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "raspi_io/tvservice.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "73368"
}
],
"symlink_target": ""
}
|
'''
Jobber Behaviors
'''
# pylint: disable=W0232
# Import python libs
from __future__ import absolute_import
import os
import sys
import types
import logging
import traceback
import multiprocessing
import subprocess
import json
# Import salt libs
import salt.ext.six as six
import salt.daemons.masterapi
import salt.utils.args
import salt.utils
import salt.transport
from raet import raeting, nacling
from raet.lane.stacking import LaneStack
from raet.lane.yarding import RemoteYard
from salt.executors import FUNCTION_EXECUTORS
from salt.utils import kinds, is_windows
from salt.utils.event import tagify
from salt.exceptions import (
CommandExecutionError, CommandNotFoundError, SaltInvocationError)
# Import ioflo libs
import ioflo.base.deeding
from ioflo.base.consoling import getConsole
console = getConsole()
log = logging.getLogger(__name__)
@ioflo.base.deeding.deedify(
'SaltRaetShellJobberCheck',
ioinits={'opts': '.salt.opts',
'grains': '.salt.grains',
'fun': '.salt.var.fun',
'matcher': '.salt.matcher',
'shells': '.salt.var.shells',
'stack': '.salt.road.manor.stack'})
def jobber_check(self):
'''
Iterate over the shell jobbers and return the ones that have finished
'''
rms = []
for jid in self.shells.value:
if isinstance(self.shells.value[jid]['proc'].poll(), int):
rms.append(jid)
data = self.shells.value[jid]
stdout, stderr = data['proc'].communicate()
ret = json.loads(salt.utils.to_str(stdout), object_hook=salt.utils.decode_dict)['local']
route = {'src': (self.stack.value.local.name, 'manor', 'jid_ret'),
'dst': (data['msg']['route']['src'][0], None, 'remote_cmd')}
ret['cmd'] = '_return'
ret['id'] = self.opts.value['id']
ret['jid'] = jid
msg = {'route': route, 'load': ret}
master = self.stack.value.nameRemotes.get(data['msg']['route']['src'][0])
self.stack.value.message(
msg,
master.uid)
for rm_ in rms:
self.shells.value.pop(rm_)
@ioflo.base.deeding.deedify(
'SaltRaetShellJobber',
ioinits={'opts': '.salt.opts',
'grains': '.salt.grains',
'fun': '.salt.var.fun',
'matcher': '.salt.matcher',
'modules': '.salt.loader.modules',
'shells': {'ipath': '.salt.var.shells', 'ival': {}}})
def shell_jobber(self):
'''
Shell jobber start!
'''
while self.fun.value:
msg = self.fun.value.popleft()
data = msg.get('pub')
match = getattr(
self.matcher.value,
'{0}_match'.format(
data.get('tgt_type', 'glob')
)
)(data['tgt'])
if not match:
continue
fun = data['fun']
if fun in self.modules.value:
func = self.modules.value[fun]
else:
continue
args, kwargs = salt.minion.load_args_and_kwargs(
func,
salt.utils.args.parse_input(data['arg']),
data)
cmd = ['salt-call',
'--out', 'json',
'--metadata',
'-c', salt.syspaths.CONFIG_DIR]
if 'return' in data:
cmd.append('--return')
cmd.append(data['return'])
cmd.append(fun)
for arg in args:
cmd.append(arg)
for key in kwargs:
cmd.append('{0}={1}'.format(key, kwargs[key]))
que = {'pub': data,
'msg': msg}
que['proc'] = subprocess.Popen(
cmd,
shell=False,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
self.shells.value[data['jid']] = que
class SaltRaetNixJobber(ioflo.base.deeding.Deed):
'''
Execute a function call job on a minion on a *nix based system
FloScript:
do salt raet nix jobber
'''
Ioinits = {'opts_store': '.salt.opts',
'grains': '.salt.grains',
'modules': '.salt.loader.modules',
'returners': '.salt.loader.returners',
'module_executors': '.salt.loader.executors',
'fun': '.salt.var.fun',
'matcher': '.salt.matcher',
'executors': '.salt.track.executors',
'road_stack': '.salt.road.manor.stack', }
def _prepare(self):
'''
Map opts for convenience
'''
self.opts = self.opts_store.value
self.proc_dir = salt.minion.get_proc_dir(self.opts['cachedir'])
self.serial = salt.payload.Serial(self.opts)
self.executors.value = {}
def _setup_jobber_stack(self):
'''
Setup and return the LaneStack and Yard used by the jobber yard
to communicate with the minion manor yard
'''
role = self.opts.get('id', '')
if not role:
emsg = ("Missing role required to setup Jobber Lane.")
log.error(emsg + "\n")
raise ValueError(emsg)
kind = self.opts['__role']
if kind not in kinds.APPL_KINDS:
emsg = ("Invalid application kind = '{0}' for Jobber lane.".format(kind))
log.error(emsg + "\n")
raise ValueError(emsg)
if kind == 'minion':
lanename = "{0}_{1}".format(role, kind)
else:
emsg = ("Unsupported application kind = '{0}' for Jobber Lane.".format(kind))
log.error(emsg + '\n')
raise ValueError(emsg)
sockdirpath = self.opts['sock_dir']
name = 'jobber' + nacling.uuid(size=18)
stack = LaneStack(
name=name,
lanename=lanename,
sockdirpath=sockdirpath)
stack.Pk = raeting.PackKind.pack.value
# add remote for the manor yard
stack.addRemote(RemoteYard(stack=stack,
name='manor',
lanename=lanename,
dirpath=sockdirpath))
console.concise("Created Jobber Stack {0}\n".format(stack.name))
return stack
def _return_pub(self, msg, ret, stack):
'''
Send the return data back via the uxd socket
'''
route = {'src': (self.road_stack.value.local.name, stack.local.name, 'jid_ret'),
'dst': (msg['route']['src'][0], None, 'remote_cmd')}
mid = self.opts['id']
ret['cmd'] = '_return'
ret['id'] = mid
try:
oput = self.modules.value[ret['fun']].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, str):
ret['out'] = oput
msg = {'route': route, 'load': ret}
stack.transmit(msg, stack.fetchUidByName('manor'))
stack.serviceAll()
def action(self):
'''
Pull the queue for functions to execute
'''
while self.fun.value:
msg = self.fun.value.popleft()
data = msg.get('pub')
match = getattr(
self.matcher.value,
'{0}_match'.format(
data.get('tgt_type', 'glob')
)
)(data['tgt'])
if not match:
continue
if 'user' in data:
log.info(
'User {0[user]} Executing command {0[fun]} with jid '
'{0[jid]}'.format(data))
else:
log.info(
'Executing command {0[fun]} with jid {0[jid]}'.format(data)
)
log.debug('Command details {0}'.format(data))
if is_windows():
# SaltRaetNixJobber is not picklable. Pickling is necessary
# when spawning a process in Windows. Since the process will
# be spawned and joined on non-Windows platforms, instead of
# this, just run the function directly and absorb any thrown
# exceptions.
try:
self.proc_run(msg)
except Exception as exc:
log.error(
'Exception caught by jobber: {0}'.format(exc),
exc_info=True)
else:
process = multiprocessing.Process(
target=self.proc_run,
kwargs={'msg': msg}
)
process.start()
process.join()
def proc_run(self, msg):
'''
Execute the run in a dedicated process
'''
data = msg['pub']
fn_ = os.path.join(self.proc_dir, data['jid'])
self.opts['__ex_id'] = data['jid']
salt.utils.daemonize_if(self.opts)
salt.transport.jobber_stack = stack = self._setup_jobber_stack()
# set up return destination from source
src_estate, src_yard, src_share = msg['route']['src']
salt.transport.jobber_estate_name = src_estate
salt.transport.jobber_yard_name = src_yard
sdata = {'pid': os.getpid()}
sdata.update(data)
with salt.utils.fopen(fn_, 'w+b') as fp_:
fp_.write(self.serial.dumps(sdata))
ret = {'success': False}
function_name = data['fun']
if function_name in self.modules.value:
try:
func = self.modules.value[data['fun']]
args, kwargs = salt.minion.load_args_and_kwargs(
func,
salt.utils.args.parse_input(data['arg']),
data)
sys.modules[func.__module__].__context__['retcode'] = 0
executors = data.get('module_executors') or self.opts.get('module_executors', ['direct_call.get'])
if isinstance(executors, six.string_types):
executors = [executors]
elif not isinstance(executors, list) or not executors:
raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected".
format(executors))
if self.opts.get('sudo_user', '') and executors[-1] != 'sudo.get':
if executors[-1] in FUNCTION_EXECUTORS:
executors[-1] = 'sudo.get' # replace
else:
executors.append('sudo.get') # append
log.trace("Executors list {0}".format(executors))
# Get executors
def get_executor(name):
executor_class = self.module_executors.value.get(name)
if executor_class is None:
raise SaltInvocationError("Executor '{0}' is not available".format(name))
return executor_class
# Get the last one that is function executor
executor = get_executor(executors.pop())(self.opts, data, func, args, kwargs)
# Instantiate others from bottom to the top
for executor_name in reversed(executors):
executor = get_executor(executor_name)(self.opts, data, executor)
return_data = executor.execute()
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if isinstance(single, dict) and isinstance(iret, list):
iret.update(single)
else:
if not iret:
iret = []
iret.append(single)
tag = tagify(
[data['jid'], 'prog', self.opts['id'], str(ind)],
'job')
event_data = {'return': single}
self._fire_master(event_data, tag) # Need to look into this
ind += 1
ret['return'] = iret
else:
ret['return'] = return_data
ret['retcode'] = sys.modules[func.__module__].__context__.get(
'retcode',
0
)
ret['success'] = True
except CommandNotFoundError as exc:
msg = 'Command required for \'{0}\' not found'.format(
function_name
)
log.debug(msg, exc_info=True)
ret['return'] = '{0}: {1}'.format(msg, exc)
except CommandExecutionError as exc:
log.error(
'A command in \'{0}\' had a problem: {1}'.format(
function_name,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR: {0}'.format(exc)
except SaltInvocationError as exc:
log.error(
'Problem executing \'{0}\': {1}'.format(
function_name,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR executing \'{0}\': {1}'.format(
function_name, exc
)
except TypeError as exc:
msg = ('TypeError encountered executing {0}: {1}. See '
'debug log for more info.').format(function_name, exc)
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = msg
except Exception:
msg = 'The minion function caused an exception'
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
else:
ret['return'] = '\'{0}\' is not available.'.format(function_name)
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
self._return_pub(msg, ret, stack)
if data['ret']:
ret['id'] = self.opts['id']
for returner in set(data['ret'].split(',')):
try:
self.returners.value['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
console.concise("Closing Jobber Stack {0}\n".format(stack.name))
stack.server.close()
salt.transport.jobber_stack = None
|
{
"content_hash": "3bca3644085176880b994be9f27d69de",
"timestamp": "",
"source": "github",
"line_count": 400,
"max_line_length": 118,
"avg_line_length": 37.785,
"alnum_prop": 0.4872965462485113,
"repo_name": "stephane-martin/salt-debian-packaging",
"id": "242f75e4df26b28a9ccba452797b7ed5f12c3d38",
"size": "15138",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "salt-2016.3.2/salt/daemons/flo/jobber.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13798"
},
{
"name": "C",
"bytes": "986"
},
{
"name": "Groff",
"bytes": "13634346"
},
{
"name": "HTML",
"bytes": "39558"
},
{
"name": "Makefile",
"bytes": "20902"
},
{
"name": "NSIS",
"bytes": "22316"
},
{
"name": "PowerShell",
"bytes": "38719"
},
{
"name": "Python",
"bytes": "40857506"
},
{
"name": "SaltStack",
"bytes": "58278"
},
{
"name": "Scheme",
"bytes": "1790"
},
{
"name": "Shell",
"bytes": "829927"
},
{
"name": "Tcl",
"bytes": "6532"
},
{
"name": "TeX",
"bytes": "11632"
}
],
"symlink_target": ""
}
|
import random
import molecules
import numpy.random as npr
from Input.KnowledgeBase import KnowledgeBase as know
class Process(object):
"""
Parent for all cellular processes.
"""
def __init__(self, id, name):
self.id = id
self.name = name
self.enzyme_ids = []
self.substrate_ids = []
def set_states(self, substrate_ids, enzyme_ids):
self.enzyme_ids = enzyme_ids
self.substrate_ids = substrate_ids
def update(self, model):
"""
Has to be implemented by child class.
"""
pass
class Translation(Process):
"""
Translation is instantiated in the Cell to produce proteins.
Defines Translation process. It iterates over all ribosomes and decides what
they should do. They either bind to mRNA or elongate/terminate a protein if
they are already bound.
"""
code = dict([('UCA', 'S'), ('UCG', 'S'), ('UCC', 'S'), ('UCU', 'S'),
('UUU', 'F'), ('UUC', 'F'), ('UUA', 'L'), ('UUG', 'L'),
('UAU', 'Y'), ('UAC', 'Y'), ('UAA', '*'), ('UAG', '*'),
('UGU', 'C'), ('UGC', 'C'), ('UGA', '*'), ('UGG', 'W'),
('CUA', 'L'), ('CUG', 'L'), ('CUC', 'L'), ('CUU', 'L'),
('CCA', 'P'), ('CCG', 'P'), ('CCC', 'P'), ('CCU', 'P'),
('CAU', 'H'), ('CAC', 'H'), ('CAA', 'Q'), ('CAG', 'Q'),
('CGA', 'R'), ('CGG', 'R'), ('CGC', 'R'), ('CGU', 'R'),
('AUU', 'I'), ('AUC', 'I'), ('AUA', 'I'), ('AUG', 'M'),
('ACA', 'T'), ('ACG', 'T'), ('ACC', 'T'), ('ACU', 'T'),
('AAU', 'N'), ('AAC', 'N'), ('AAA', 'K'), ('AAG', 'K'),
('AGU', 'S'), ('AGC', 'S'), ('AGA', 'R'), ('AGG', 'R'),
('GUA', 'V'), ('GUG', 'V'), ('GUC', 'V'), ('GUU', 'V'),
('GCA', 'A'), ('GCG', 'A'), ('GCC', 'A'), ('GCU', 'A'),
('GAU', 'D'), ('GAC', 'D'), ('GAA', 'E'), ('GAG', 'E'),
('GGA', 'G'), ('GGG', 'G'), ('GGC', 'G'), ('GGU', 'G')])
def __init__(self, id, name):
super(Translation, self).__init__(id, name)
self.kb = know()
# declare attributes
self.__ribosomes = []
self.__atp = []
self.__aa = []
def update(self, model):
"""
Update all mrnas and translate proteins and current metabolites.
"""
self.__ribosomes = model.states[self.enzyme_ids[0]]
self.__atp = model.states['ATP']
self.__mrna = [x for x in self.substrate_ids if "MRNA" in x]
self.__aa = model.states['AA']
for mrna_id in self.__mrna:
prot = None
mrna = model.states[mrna_id]
if not mrna.binding[0]:
self.initiate(mrna)
else:
prot = self.elongate(mrna)
if isinstance(prot, molecules.Protein):
if prot.id in model.states:
model.states[prot.id].append(prot)
else:
model.states[prot.id] = [prot]
def initiate(self, mrna):
"""
Try to bind to a given MRNA. Binding probability corresponds to the ribosome count.
@type mrna: MRNA
"""
if not mrna.binding[0]: # no mrna bound yet and target mrna still free at pos 0
# bind a nascent protein to the 0 codon
if npr.poisson(self.__ribosomes.count) > 1 and self.__atp.count > 0: # at least one binding event happens in time step
# if a ribosome binds the position a new Protein is created and stored on the
# position as if it were bound to the ribosome
# ATP has to be available
halflife = self.kb.get_protein_hl(mrna.id)
halflife = halflife.replace(',', '.')
halflife = float(halflife)
self.__atp.metacount(1)
mrna.binding[0] = molecules.Protein("Protein_{0}".format(mrna.id),
"Protein_{0}".format(mrna.id),
self.code[mrna[0:3]],
halflife = halflife)
self.__ribosomes.count -= 1
def elongate(self, mrna):
"""
Elongate the new protein by the correct amino acid. Check if an
MRNA is bound and if ribosome can move to next codon.
Terminate if the ribosome reaches a STOP codon.
@type return: Protein or False
"""
# TODO: this needs to update in a random order
for i, ribosome in enumerate(mrna.binding):
if self.__atp.count <= 0 or self.__aa.count <= 0: #if there is no more ATP, elongation can't proceed
break
if isinstance(ribosome, molecules.Protein):
codon = mrna[i*3:i*3+3]
aa = self.code[codon]
self.__atp.metacount(1)
self.__aa.metacount(1)
if aa == "*": # terminate at stop codon
return self.terminate(mrna, i)
if not mrna.binding[i + 1]: # if the next rna position is free
mrna.binding[i] + aa
mrna.binding[i + 1] = mrna.binding[i]
mrna.binding[i] = 0
return 0
def terminate(self, mrna, i):
"""
Splits the ribosome/MRNA complex and returns a protein.
@type mrna: MRNA
"""
protein = mrna.binding[i] # bound mRNA
mrna.binding[i] = 0
self.__ribosomes.count += 1
self.__atp.metacount(1)
return protein
class Transcription(Process):
"""
Implements mRNA transcription from genes on the chromosome.
"""
def __init__(self, id, name):
super(Transcription, self).__init__(id, name)
# TODO: implement transcription
class Degradation(Process):
"""
Implements Protein Degradation by chance
"""
count_s = 0
global count_s
def __init__(self, id, name):
super(Degradation, self).__init__(id, name)
self.__proteasomes = []
def set_states(self, substrate_ids, enzyme_ids):
self.protein_ids = substrate_ids
self.enzyme_ids = enzyme_ids
def update(self, model):
#Regenerates amount of Proteasomes for every step
global count_s
self.__proteasomes = model.states[self.enzyme_ids[0]]
self.__proteasomes.count = 10
count_s += 1
#Checks if there are any proteins available, computes a value sigma (depending on halflife) which is compared
# to random number between 0 and 1. If random number is < than sigma, protein is degradated and one proteasom is busy.
# If no proteasom is left, sigma is half as big as befor (protein is more unlikely to be degradated)
for p in self.protein_ids:
if len(model.states[p]) != 0:
hwz = model.states[p][0].halflife
sig = float(1.0/(2.0*hwz*60))
for pos in model.states[p]:
if self.__proteasomes.count != 0:
z = random.uniform(0,1)
if z < sig:
self.__proteasomes.count -= 1
del model.states[p][0]
else:
z = random.uniform(0,1)
sig = float(1.0/(4.0*hwz*60))
if z < sig:
del model.states[p][0]
|
{
"content_hash": "cc00cb6516ac1cb186caac662f47f43b",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 130,
"avg_line_length": 36.21904761904762,
"alnum_prop": 0.483565606100447,
"repo_name": "fugufisch/hu_bp_python_course",
"id": "d019c2fa152c75fe39767c8da4e8a5abcabe14f7",
"size": "7606",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Project/processes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "540496"
},
{
"name": "OpenEdge ABL",
"bytes": "61864"
},
{
"name": "PLpgSQL",
"bytes": "7470978"
},
{
"name": "Python",
"bytes": "75180"
}
],
"symlink_target": ""
}
|
from utils.header import MagicField, Field
from load_command import LoadCommandCommand, LoadCommandHeader
class SymtabCommand(LoadCommandHeader):
ENDIAN = None
FIELDS = (
MagicField('cmd', 'I', {LoadCommandCommand.COMMANDS['LC_SYMTAB']: 'LC_SYMTAB'}),
Field('cmdsize', 'I'),
Field('symoff', 'I'),
Field('nsyms', 'I'),
Field('stroff', 'I'),
Field('strsize', 'I'),
)
def __init__(self, bytes_, **kwargs):
self.symoff = None
self.nsyms = None
self.stroff = None
self.strsize = None
super(SymtabCommand, self).__init__('symtab_command', bytes_, **kwargs)
|
{
"content_hash": "5a73fc17b467490e368d3de7f56c1753",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 88,
"avg_line_length": 31.333333333333332,
"alnum_prop": 0.5896656534954408,
"repo_name": "hkkwok/MachOTool",
"id": "1e24ad7594bc5df6ac1ae22a9de73c2a52b95238",
"size": "658",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mach_o/headers/symtab_command.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "126"
},
{
"name": "Makefile",
"bytes": "840"
},
{
"name": "Objective-C",
"bytes": "3546"
},
{
"name": "Python",
"bytes": "231983"
}
],
"symlink_target": ""
}
|
"""Functional tests using WebTest."""
import datetime as dt
import httplib as http
import logging
import unittest
import httpretty
import markupsafe
import mock
from nose.tools import * # flake8: noqa (PEP8 asserts)
import re
from addons.wiki.utils import to_mongo_key
from framework.auth import exceptions as auth_exc
from framework.auth.core import Auth
from tests.base import OsfTestCase
from tests.base import fake
from osf_tests.factories import (UserFactory, AuthUserFactory, ProjectFactory, NodeFactory,
RegistrationFactory, UnregUserFactory, UnconfirmedUserFactory,
PrivateLinkFactory, PreprintFactory, PreprintProviderFactory, SubjectFactory)
from addons.wiki.tests.factories import NodeWikiFactory
from website import settings, language
from addons.osfstorage.models import OsfStorageFile
from website.util import web_url_for, api_url_for, permissions
from api_tests import utils as test_utils
logging.getLogger('website.project.model').setLevel(logging.ERROR)
def assert_in_html(member, container, **kwargs):
"""Looks for the specified member in markupsafe-escaped HTML output"""
member = markupsafe.escape(member)
return assert_in(member, container, **kwargs)
def assert_not_in_html(member, container, **kwargs):
"""Looks for the specified member in markupsafe-escaped HTML output"""
member = markupsafe.escape(member)
return assert_not_in(member, container, **kwargs)
class TestDisabledUser(OsfTestCase):
def setUp(self):
super(TestDisabledUser, self).setUp()
self.user = UserFactory()
self.user.set_password('Korben Dallas')
self.user.is_disabled = True
self.user.save()
def test_profile_disabled_returns_401(self):
res = self.app.get(self.user.url, expect_errors=True)
assert_equal(res.status_code, 410)
class TestAnUnregisteredUser(OsfTestCase):
def test_cant_see_profile_if_not_logged_in(self):
url = web_url_for('profile_view')
res = self.app.get(url)
res = res.follow()
assert_equal(res.status_code, 301)
assert_in('/login/', res.headers['Location'])
class TestAUser(OsfTestCase):
def setUp(self):
super(TestAUser, self).setUp()
self.user = AuthUserFactory()
self.auth = self.user.auth
def test_can_see_profile_url(self):
res = self.app.get(self.user.url).maybe_follow()
assert_in(self.user.url, res)
def test_can_see_homepage(self):
# Goes to homepage
res = self.app.get('/').maybe_follow() # Redirects
assert_equal(res.status_code, 200)
# `GET /login/` without parameters is redirected to `/dashboard/` page which has `@must_be_logged_in` decorator
# if user is not logged in, she/he is further redirected to CAS login page
def test_is_redirected_to_cas_if_not_logged_in_at_login_page(self):
res = self.app.get('/login/').follow()
assert_equal(res.status_code, 302)
location = res.headers.get('Location')
assert_in('login?service=', location)
def test_is_redirected_to_dashboard_if_already_logged_in_at_login_page(self):
res = self.app.get('/login/', auth=self.user.auth)
assert_equal(res.status_code, 302)
res = res.follow(auth=self.user.auth)
assert_equal(res.request.path, '/dashboard/')
def test_register_page(self):
res = self.app.get('/register/')
assert_equal(res.status_code, 200)
def test_is_redirected_to_dashboard_if_already_logged_in_at_register_page(self):
res = self.app.get('/register/', auth=self.user.auth)
assert_equal(res.status_code, 302)
res = res.follow(auth=self.user.auth)
assert_equal(res.request.path, '/dashboard/')
def test_sees_projects_in_her_dashboard(self):
# the user already has a project
project = ProjectFactory(creator=self.user)
project.add_contributor(self.user)
project.save()
res = self.app.get('/myprojects/', auth=self.user.auth)
assert_in('Projects', res) # Projects heading
def test_logged_in_index_route_renders_home_template(self):
res = self.app.get('/', auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_in('My Projects', res) # Will change once home page populated
def test_logged_out_index_route_renders_landing_page(self):
res = self.app.get('/')
assert_in('Simplified Scholarly Collaboration', res)
def test_does_not_see_osffiles_in_user_addon_settings(self):
res = self.app.get('/settings/addons/', auth=self.auth, auto_follow=True)
assert_not_in('OSF Storage', res)
def test_sees_osffiles_in_project_addon_settings(self):
project = ProjectFactory(creator=self.user)
project.add_contributor(
self.user,
permissions=['read', 'write', 'admin'],
save=True)
res = self.app.get('/{0}/settings/'.format(project._primary_key), auth=self.auth, auto_follow=True)
assert_in('OSF Storage', res)
def test_sees_correct_title_home_page(self):
# User goes to homepage
res = self.app.get('/', auto_follow=True)
title = res.html.title.string
# page title is correct
assert_equal('OSF | Home', title)
def test_sees_correct_title_on_dashboard(self):
# User goes to dashboard
res = self.app.get('/myprojects/', auth=self.auth, auto_follow=True)
title = res.html.title.string
assert_equal('OSF | My Projects', title)
def test_can_see_make_public_button_if_admin(self):
# User is a contributor on a project
project = ProjectFactory()
project.add_contributor(
self.user,
permissions=['read', 'write', 'admin'],
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_in('Make Public', res)
def test_cant_see_make_public_button_if_not_admin(self):
# User is a contributor on a project
project = ProjectFactory()
project.add_contributor(
self.user,
permissions=['read', 'write'],
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_not_in('Make Public', res)
def test_can_see_make_private_button_if_admin(self):
# User is a contributor on a project
project = ProjectFactory(is_public=True)
project.add_contributor(
self.user,
permissions=['read', 'write', 'admin'],
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_in('Make Private', res)
def test_cant_see_make_private_button_if_not_admin(self):
# User is a contributor on a project
project = ProjectFactory(is_public=True)
project.add_contributor(
self.user,
permissions=['read', 'write'],
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_not_in('Make Private', res)
def test_sees_logs_on_a_project(self):
project = ProjectFactory(is_public=True)
# User goes to the project's page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
# Can see log event
assert_in('created', res)
def test_no_wiki_content_message(self):
project = ProjectFactory(creator=self.user)
# Goes to project's wiki, where there is no content
res = self.app.get('/{0}/wiki/home/'.format(project._primary_key), auth=self.auth)
# Sees a message indicating no content
assert_in('Add important information, links, or images here to describe your project.', res)
# Sees that edit panel is open by default when home wiki has no content
assert_in('panelsUsed: ["view", "menu", "edit"]', res)
def test_wiki_content(self):
project = ProjectFactory(creator=self.user)
wiki_page = 'home'
wiki_content = 'Kittens'
NodeWikiFactory(user=self.user, node=project, content=wiki_content, page_name=wiki_page)
res = self.app.get('/{0}/wiki/{1}/'.format(
project._primary_key,
wiki_page,
), auth=self.auth)
assert_not_in('Add important information, links, or images here to describe your project.', res)
assert_in(wiki_content, res)
assert_in('panelsUsed: ["view", "menu"]', res)
def test_wiki_page_name_non_ascii(self):
project = ProjectFactory(creator=self.user)
non_ascii = to_mongo_key('WöRlÐé')
self.app.get('/{0}/wiki/{1}/'.format(
project._primary_key,
non_ascii
), auth=self.auth, expect_errors=True)
project.update_node_wiki(non_ascii, 'new content', Auth(self.user))
assert_in(non_ascii, project.wiki_pages_current)
def test_noncontributor_cannot_see_wiki_if_no_content(self):
user2 = UserFactory()
# user2 creates a public project and adds no wiki content
project = ProjectFactory(creator=user2, is_public=True)
# self navigates to project
res = self.app.get(project.url).maybe_follow()
# Should not see wiki widget (since non-contributor and no content)
assert_not_in('Add important information, links, or images here to describe your project.', res)
def test_wiki_does_not_exist(self):
project = ProjectFactory(creator=self.user)
res = self.app.get('/{0}/wiki/{1}/'.format(
project._primary_key,
'not a real page yet',
), auth=self.auth, expect_errors=True)
assert_in('Add important information, links, or images here to describe your project.', res)
def test_sees_own_profile(self):
res = self.app.get('/profile/', auth=self.auth)
td1 = res.html.find('td', text=re.compile(r'Public(.*?)Profile'))
td2 = td1.find_next_sibling('td')
assert_equal(td2.text, self.user.display_absolute_url)
def test_sees_another_profile(self):
user2 = UserFactory()
res = self.app.get(user2.url, auth=self.auth)
td1 = res.html.find('td', text=re.compile(r'Public(.*?)Profile'))
td2 = td1.find_next_sibling('td')
assert_equal(td2.text, user2.display_absolute_url)
class TestComponents(OsfTestCase):
def setUp(self):
super(TestComponents, self).setUp()
self.user = AuthUserFactory()
self.consolidate_auth = Auth(user=self.user)
self.project = ProjectFactory(creator=self.user)
self.project.add_contributor(contributor=self.user, auth=self.consolidate_auth)
# A non-project componenet
self.component = NodeFactory(
category='hypothesis',
creator=self.user,
parent=self.project,
)
self.component.save()
self.component.set_privacy('public', self.consolidate_auth)
self.component.set_privacy('private', self.consolidate_auth)
self.project.save()
self.project_url = self.project.web_url_for('view_project')
def test_sees_parent(self):
res = self.app.get(self.component.url, auth=self.user.auth).maybe_follow()
parent_title = res.html.find_all('h2', class_='node-parent-title')
assert_equal(len(parent_title), 1)
assert_in(self.project.title, parent_title[0].text) # Bs4 will handle unescaping HTML here
def test_delete_project(self):
res = self.app.get(
self.component.url + 'settings/',
auth=self.user.auth
).maybe_follow()
assert_in(
'Delete {0}'.format(self.component.project_or_component),
res
)
def test_cant_delete_project_if_not_admin(self):
non_admin = AuthUserFactory()
self.component.add_contributor(
non_admin,
permissions=['read', 'write'],
auth=self.consolidate_auth,
save=True,
)
res = self.app.get(
self.component.url + 'settings/',
auth=non_admin.auth
).maybe_follow()
assert_not_in(
'Delete {0}'.format(self.component.project_or_component),
res
)
def test_can_configure_comments_if_admin(self):
res = self.app.get(
self.component.url + 'settings/',
auth=self.user.auth,
).maybe_follow()
assert_in('Commenting', res)
def test_cant_configure_comments_if_not_admin(self):
non_admin = AuthUserFactory()
self.component.add_contributor(
non_admin,
permissions=['read', 'write'],
auth=self.consolidate_auth,
save=True,
)
res = self.app.get(
self.component.url + 'settings/',
auth=non_admin.auth
).maybe_follow()
assert_not_in('Commenting', res)
def test_components_should_have_component_list(self):
res = self.app.get(self.component.url, auth=self.user.auth)
assert_in('Components', res)
class TestPrivateLinkView(OsfTestCase):
def setUp(self):
super(TestPrivateLinkView, self).setUp()
self.user = AuthUserFactory() # Is NOT a contributor
self.project = ProjectFactory(is_public=False)
self.link = PrivateLinkFactory(anonymous=True)
self.link.nodes.add(self.project)
self.link.save()
self.project_url = self.project.web_url_for('view_project')
def test_anonymous_link_hide_contributor(self):
res = self.app.get(self.project_url, {'view_only': self.link.key})
assert_in("Anonymous Contributors", res.body)
assert_not_in(self.user.fullname, res)
def test_anonymous_link_hides_citations(self):
res = self.app.get(self.project_url, {'view_only': self.link.key})
assert_not_in('Citation:', res)
def test_no_warning_for_read_only_user_with_valid_link(self):
link2 = PrivateLinkFactory(anonymous=False)
link2.nodes.add(self.project)
link2.save()
self.project.add_contributor(
self.user,
permissions=['read'],
save=True,
)
res = self.app.get(self.project_url, {'view_only': link2.key},
auth=self.user.auth)
assert_not_in(
"is being viewed through a private, view-only link. "
"Anyone with the link can view this project. Keep "
"the link safe.",
res.body
)
def test_no_warning_for_read_only_user_with_invalid_link(self):
self.project.add_contributor(
self.user,
permissions=['read'],
save=True,
)
res = self.app.get(self.project_url, {'view_only': "not_valid"},
auth=self.user.auth)
assert_not_in(
"is being viewed through a private, view-only link. "
"Anyone with the link can view this project. Keep "
"the link safe.",
res.body
)
class TestMergingAccounts(OsfTestCase):
def setUp(self):
super(TestMergingAccounts, self).setUp()
self.user = UserFactory.build()
self.user.fullname = "tess' test string"
self.user.set_password('science')
self.user.save()
self.dupe = UserFactory.build()
self.dupe.set_password('example')
self.dupe.save()
def test_merged_user_is_not_shown_as_a_contributor(self):
project = ProjectFactory(is_public=True)
# Both the master and dupe are contributors
project.add_contributor(self.dupe, log=False)
project.add_contributor(self.user, log=False)
project.save()
# At the project page, both are listed as contributors
res = self.app.get(project.url).maybe_follow()
assert_in_html(self.user.fullname, res)
assert_in_html(self.dupe.fullname, res)
# The accounts are merged
self.user.merge_user(self.dupe)
self.user.save()
# Now only the master user is shown at the project page
res = self.app.get(project.url).maybe_follow()
assert_in_html(self.user.fullname, res)
assert_true(self.dupe.is_merged)
assert_not_in(self.dupe.fullname, res)
def test_merged_user_has_alert_message_on_profile(self):
# Master merges dupe
self.user.merge_user(self.dupe)
self.user.save()
# At the dupe user's profile there is an alert message at the top
# indicating that the user is merged
res = self.app.get('/profile/{0}/'.format(self.dupe._primary_key)).maybe_follow()
assert_in('This account has been merged', res)
# FIXME: These affect search in development environment. So need to migrate solr after running.
# # Remove this side effect.
@unittest.skipIf(not settings.SEARCH_ENGINE, 'Skipping because search is disabled')
class TestSearching(OsfTestCase):
'''Test searching using the search bar. NOTE: These may affect the
Solr database. May need to migrate after running these.
'''
def setUp(self):
super(TestSearching, self).setUp()
import website.search.search as search
search.delete_all()
self.user = AuthUserFactory()
self.auth = self.user.auth
@unittest.skip(reason='¯\_(ツ)_/¯ knockout.')
def test_a_user_from_home_page(self):
user = UserFactory()
# Goes to home page
res = self.app.get('/').maybe_follow()
# Fills search form
form = res.forms['searchBar']
form['q'] = user.fullname
res = form.submit().maybe_follow()
# The username shows as a search result
assert_in(user.fullname, res)
@unittest.skip(reason='¯\_(ツ)_/¯ knockout.')
def test_a_public_project_from_home_page(self):
project = ProjectFactory(title='Foobar Project', is_public=True)
# Searches a part of the name
res = self.app.get('/').maybe_follow()
project.reload()
form = res.forms['searchBar']
form['q'] = 'Foobar'
res = form.submit().maybe_follow()
# A link to the project is shown as a result
assert_in('Foobar Project', res)
@unittest.skip(reason='¯\_(ツ)_/¯ knockout.')
def test_a_public_component_from_home_page(self):
component = NodeFactory(title='Foobar Component', is_public=True)
# Searches a part of the name
res = self.app.get('/').maybe_follow()
component.reload()
form = res.forms['searchBar']
form['q'] = 'Foobar'
res = form.submit().maybe_follow()
# A link to the component is shown as a result
assert_in('Foobar Component', res)
class TestShortUrls(OsfTestCase):
def setUp(self):
super(TestShortUrls, self).setUp()
self.user = AuthUserFactory()
self.auth = self.user.auth
self.consolidate_auth = Auth(user=self.user)
self.project = ProjectFactory(creator=self.user)
# A non-project componenet
self.component = NodeFactory(parent=self.project, category='hypothesis', creator=self.user)
# Hack: Add some logs to component; should be unnecessary pending
# improvements to factories from @rliebz
self.component.set_privacy('public', auth=self.consolidate_auth)
self.component.set_privacy('private', auth=self.consolidate_auth)
self.wiki = NodeWikiFactory(user=self.user, node=self.component)
def _url_to_body(self, url):
return self.app.get(
url,
auth=self.auth
).maybe_follow(
auth=self.auth,
).normal_body
def test_project_url(self):
assert_equal(
self._url_to_body(self.project.deep_url),
self._url_to_body(self.project.url),
)
def test_component_url(self):
assert_equal(
self._url_to_body(self.component.deep_url),
self._url_to_body(self.component.url),
)
def test_wiki_url(self):
assert_equal(
self._url_to_body(self.wiki.deep_url),
self._url_to_body(self.wiki.url),
)
class TestClaiming(OsfTestCase):
def setUp(self):
super(TestClaiming, self).setUp()
self.referrer = AuthUserFactory()
self.project = ProjectFactory(creator=self.referrer, is_public=True)
def test_correct_name_shows_in_contributor_list(self):
name1, email = fake.name(), fake.email()
UnregUserFactory(fullname=name1, email=email)
name2, email = fake.name(), fake.email()
# Added with different name
self.project.add_unregistered_contributor(fullname=name2,
email=email, auth=Auth(self.referrer))
self.project.save()
res = self.app.get(self.project.url, auth=self.referrer.auth)
# Correct name is shown
assert_in_html(name2, res)
assert_not_in(name1, res)
def test_user_can_set_password_on_claim_page(self):
name, email = fake.name(), fake.email()
new_user = self.project.add_unregistered_contributor(
email=email,
fullname=name,
auth=Auth(self.referrer)
)
self.project.save()
claim_url = new_user.get_claim_url(self.project._primary_key)
res = self.app.get(claim_url)
self.project.reload()
assert_in('Set Password', res)
form = res.forms['setPasswordForm']
#form['username'] = new_user.username #Removed as long as E-mail can't be updated.
form['password'] = 'killerqueen'
form['password2'] = 'killerqueen'
res = form.submit().follow()
new_user.reload()
assert_true(new_user.check_password('killerqueen'))
def test_sees_is_redirected_if_user_already_logged_in(self):
name, email = fake.name(), fake.email()
new_user = self.project.add_unregistered_contributor(
email=email,
fullname=name,
auth=Auth(self.referrer)
)
self.project.save()
existing = AuthUserFactory()
claim_url = new_user.get_claim_url(self.project._primary_key)
# a user is already logged in
res = self.app.get(claim_url, auth=existing.auth, expect_errors=True)
assert_equal(res.status_code, 302)
def test_unregistered_users_names_are_project_specific(self):
name1, name2, email = fake.name(), fake.name(), fake.email()
project2 = ProjectFactory(creator=self.referrer)
# different projects use different names for the same unreg contributor
self.project.add_unregistered_contributor(
email=email,
fullname=name1,
auth=Auth(self.referrer)
)
self.project.save()
project2.add_unregistered_contributor(
email=email,
fullname=name2,
auth=Auth(self.referrer)
)
project2.save()
self.app.authenticate(*self.referrer.auth)
# Each project displays a different name in the contributor list
res = self.app.get(self.project.url)
assert_in_html(name1, res)
res2 = self.app.get(project2.url)
assert_in_html(name2, res2)
@unittest.skip("as long as E-mails cannot be changed")
def test_cannot_set_email_to_a_user_that_already_exists(self):
reg_user = UserFactory()
name, email = fake.name(), fake.email()
new_user = self.project.add_unregistered_contributor(
email=email,
fullname=name,
auth=Auth(self.referrer)
)
self.project.save()
# Goes to claim url and successfully claims account
claim_url = new_user.get_claim_url(self.project._primary_key)
res = self.app.get(claim_url)
self.project.reload()
assert_in('Set Password', res)
form = res.forms['setPasswordForm']
# Fills out an email that is the username of another user
form['username'] = reg_user.username
form['password'] = 'killerqueen'
form['password2'] = 'killerqueen'
res = form.submit().maybe_follow(expect_errors=True)
assert_in(
language.ALREADY_REGISTERED.format(email=reg_user.username),
res
)
def test_correct_display_name_is_shown_at_claim_page(self):
original_name = fake.name()
unreg = UnregUserFactory(fullname=original_name)
different_name = fake.name()
new_user = self.project.add_unregistered_contributor(
email=unreg.username,
fullname=different_name,
auth=Auth(self.referrer),
)
self.project.save()
claim_url = new_user.get_claim_url(self.project._primary_key)
res = self.app.get(claim_url)
# Correct name (different_name) should be on page
assert_in_html(different_name, res)
class TestConfirmingEmail(OsfTestCase):
def setUp(self):
super(TestConfirmingEmail, self).setUp()
self.user = UnconfirmedUserFactory()
self.confirmation_url = self.user.get_confirmation_url(
self.user.username,
external=False,
)
self.confirmation_token = self.user.get_confirmation_token(
self.user.username
)
def test_cannot_remove_another_user_email(self):
user1 = AuthUserFactory()
user2 = AuthUserFactory()
url = api_url_for('update_user')
header = {'id': user1.username, 'emails': [{'address': user1.username}]}
res = self.app.put_json(url, header, auth=user2.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_cannnot_make_primary_email_for_another_user(self):
user1 = AuthUserFactory()
user2 = AuthUserFactory()
email = 'test@cos.io'
user1.emails.create(address=email)
user1.save()
url = api_url_for('update_user')
header = {'id': user1.username,
'emails': [{'address': user1.username, 'primary': False, 'confirmed': True},
{'address': email, 'primary': True, 'confirmed': True}
]}
res = self.app.put_json(url, header, auth=user2.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_cannnot_add_email_for_another_user(self):
user1 = AuthUserFactory()
user2 = AuthUserFactory()
email = 'test@cos.io'
url = api_url_for('update_user')
header = {'id': user1.username,
'emails': [{'address': user1.username, 'primary': True, 'confirmed': True},
{'address': email, 'primary': False, 'confirmed': False}
]}
res = self.app.put_json(url, header, auth=user2.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_error_page_if_confirm_link_is_used(self):
self.user.confirm_email(self.confirmation_token)
self.user.save()
res = self.app.get(self.confirmation_url, expect_errors=True)
assert_in(auth_exc.InvalidTokenError.message_short, res)
assert_equal(res.status_code, http.BAD_REQUEST)
class TestClaimingAsARegisteredUser(OsfTestCase):
def setUp(self):
super(TestClaimingAsARegisteredUser, self).setUp()
self.referrer = AuthUserFactory()
self.project = ProjectFactory(creator=self.referrer, is_public=True)
name, email = fake.name(), fake.email()
self.user = self.project.add_unregistered_contributor(
fullname=name,
email=email,
auth=Auth(user=self.referrer)
)
self.project.save()
def test_claim_user_registered_with_correct_password(self):
reg_user = AuthUserFactory() # NOTE: AuthUserFactory sets password as 'queenfan86'
url = self.user.get_claim_url(self.project._primary_key)
# Follow to password re-enter page
res = self.app.get(url, auth=reg_user.auth).follow(auth=reg_user.auth)
# verify that the "Claim Account" form is returned
assert_in('Claim Contributor', res.body)
form = res.forms['claimContributorForm']
form['password'] = 'queenfan86'
res = form.submit(auth=reg_user.auth)
res = res.follow(auth=reg_user.auth)
self.project.reload()
self.user.reload()
# user is now a contributor to the project
assert_in(reg_user, self.project.contributors)
# the unregistered user (self.user) is removed as a contributor, and their
assert_not_in(self.user, self.project.contributors)
# unclaimed record for the project has been deleted
assert_not_in(self.project, self.user.unclaimed_records)
class TestExplorePublicActivity(OsfTestCase):
def setUp(self):
super(TestExplorePublicActivity, self).setUp()
self.project = ProjectFactory(is_public=True)
self.registration = RegistrationFactory(project=self.project)
self.private_project = ProjectFactory(title="Test private project")
self.popular_project = ProjectFactory(is_public=True)
self.popular_registration = RegistrationFactory(project=self.project, is_public=True)
# Add project to new and noteworthy projects
self.new_and_noteworthy_links_node = ProjectFactory(is_public=True)
self.new_and_noteworthy_links_node._id = settings.NEW_AND_NOTEWORTHY_LINKS_NODE
self.new_and_noteworthy_links_node.add_pointer(self.project, auth=Auth(self.new_and_noteworthy_links_node.creator), save=True)
# Set up popular projects and registrations
self.popular_links_node = ProjectFactory(is_public=True)
settings.POPULAR_LINKS_NODE = self.popular_links_node._id
self.popular_links_node.add_pointer(self.popular_project, auth=Auth(self.popular_links_node.creator), save=True)
self.popular_links_registrations = ProjectFactory(is_public=True)
settings.POPULAR_LINKS_REGISTRATIONS = self.popular_links_registrations._id
self.popular_links_registrations.add_pointer(self.popular_registration, auth=Auth(self.popular_links_registrations.creator), save=True)
def test_explore_page_loads_when_settings_not_configured(self):
old_settings_values = settings.POPULAR_LINKS_NODE, settings.NEW_AND_NOTEWORTHY_LINKS_NODE, settings.POPULAR_LINKS_REGISTRATIONS
settings.POPULAR_LINKS_NODE = 'notanode'
settings.NEW_AND_NOTEWORTHY_LINKS_NODE = 'alsototallywrong'
settings.POPULAR_LINKS_REGISTRATIONS = 'nopenope'
url = self.project.web_url_for('activity')
res = self.app.get(url)
assert_equal(res.status_code, 200)
settings.POPULAR_LINKS_NODE, settings.NEW_AND_NOTEWORTHY_LINKS_NODE, settings.POPULAR_LINKS_REGISTRATIONS = old_settings_values
def test_new_and_noteworthy_and_popular_nodes_show_in_explore_activity(self):
url = self.project.web_url_for('activity')
res = self.app.get(url)
assert_equal(res.status_code, 200)
# New and Noteworthy
assert_in(str(self.project.title), res)
assert_in(str(self.project.date_created.date()), res)
assert_in(str(self.registration.title), res)
assert_in(str(self.registration.registered_date.date()), res)
assert_not_in(str(self.private_project.title), res)
# Popular Projects and Registrations
assert_in(str(self.popular_project.title), res)
assert_in(str(self.popular_project.date_created.date()), res)
assert_in(str(self.popular_registration.title), res)
assert_in(str(self.popular_registration.registered_date.date()), res)
class TestResendConfirmation(OsfTestCase):
def setUp(self):
super(TestResendConfirmation, self).setUp()
self.unconfirmed_user = UnconfirmedUserFactory()
self.confirmed_user = UserFactory()
self.get_url = web_url_for('resend_confirmation_get')
self.post_url = web_url_for('resend_confirmation_post')
# test that resend confirmation page is load correctly
def test_resend_confirmation_get(self):
res = self.app.get(self.get_url)
assert_equal(res.status_code, 200)
assert_in('Resend Confirmation', res.body)
assert_in('resendForm', res.forms)
# test that unconfirmed user can receive resend confirmation email
@mock.patch('framework.auth.views.mails.send_mail')
def test_can_receive_resend_confirmation_email(self, mock_send_mail):
# load resend confirmation page and submit email
res = self.app.get(self.get_url)
form = res.forms['resendForm']
form['email'] = self.unconfirmed_user.unconfirmed_emails[0]
res = form.submit()
# check email, request and response
assert_true(mock_send_mail.called)
assert_equal(res.status_code, 200)
assert_equal(res.request.path, self.post_url)
assert_in_html('If there is an OSF account', res)
# test that confirmed user cannot receive resend confirmation email
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_receive_resend_confirmation_email_1(self, mock_send_mail):
# load resend confirmation page and submit email
res = self.app.get(self.get_url)
form = res.forms['resendForm']
form['email'] = self.confirmed_user.emails.first().address
res = form.submit()
# check email, request and response
assert_false(mock_send_mail.called)
assert_equal(res.status_code, 200)
assert_equal(res.request.path, self.post_url)
assert_in_html('has already been confirmed', res)
# test that non-existing user cannot receive resend confirmation email
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_receive_resend_confirmation_email_2(self, mock_send_mail):
# load resend confirmation page and submit email
res = self.app.get(self.get_url)
form = res.forms['resendForm']
form['email'] = 'random@random.com'
res = form.submit()
# check email, request and response
assert_false(mock_send_mail.called)
assert_equal(res.status_code, 200)
assert_equal(res.request.path, self.post_url)
assert_in_html('If there is an OSF account', res)
# test that user cannot submit resend confirmation request too quickly
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_resend_confirmation_twice_quickly(self, mock_send_mail):
# load resend confirmation page and submit email
res = self.app.get(self.get_url)
form = res.forms['resendForm']
form['email'] = self.unconfirmed_user.email
res = form.submit()
res = form.submit()
# check request and response
assert_equal(res.status_code, 200)
assert_in_html('Please wait', res)
class TestForgotPassword(OsfTestCase):
def setUp(self):
super(TestForgotPassword, self).setUp()
self.user = UserFactory()
self.auth_user = AuthUserFactory()
self.get_url = web_url_for('forgot_password_get')
self.post_url = web_url_for('forgot_password_post')
self.user.verification_key_v2 = {}
self.user.save()
# log users out before they land on forgot password page
def test_forgot_password_logs_out_user(self):
# visit forgot password link while another user is logged in
res = self.app.get(self.get_url, auth=self.auth_user.auth)
# check redirection to CAS logout
assert_equal(res.status_code, 302)
location = res.headers.get('Location')
assert_not_in('reauth', location)
assert_in('logout?service=', location)
assert_in('forgotpassword', location)
# test that forgot password page is loaded correctly
def test_get_forgot_password(self):
res = self.app.get(self.get_url)
assert_equal(res.status_code, 200)
assert_in('Forgot Password', res.body)
assert_in('forgotPasswordForm', res.forms)
# test that existing user can receive reset password email
@mock.patch('framework.auth.views.mails.send_mail')
def test_can_receive_reset_password_email(self, mock_send_mail):
# load forgot password page and submit email
res = self.app.get(self.get_url)
form = res.forms['forgotPasswordForm']
form['forgot_password-email'] = self.user.username
res = form.submit()
# check mail was sent
assert_true(mock_send_mail.called)
# check http 200 response
assert_equal(res.status_code, 200)
# check request URL is /forgotpassword
assert_equal(res.request.path, self.post_url)
# check push notification
assert_in_html('If there is an OSF account', res)
assert_not_in_html('Please wait', res)
# check verification_key_v2 is set
self.user.reload()
assert_not_equal(self.user.verification_key_v2, {})
# test that non-existing user cannot receive reset password email
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_receive_reset_password_email(self, mock_send_mail):
# load forgot password page and submit email
res = self.app.get(self.get_url)
form = res.forms['forgotPasswordForm']
form['forgot_password-email'] = 'fake' + self.user.username
res = form.submit()
# check mail was not sent
assert_false(mock_send_mail.called)
# check http 200 response
assert_equal(res.status_code, 200)
# check request URL is /forgotpassword
assert_equal(res.request.path, self.post_url)
# check push notification
assert_in_html('If there is an OSF account', res)
assert_not_in_html('Please wait', res)
# check verification_key_v2 is not set
self.user.reload()
assert_equal(self.user.verification_key_v2, {})
# test that user cannot submit forgot password request too quickly
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_reset_password_twice_quickly(self, mock_send_mail):
# load forgot password page and submit email
res = self.app.get(self.get_url)
form = res.forms['forgotPasswordForm']
form['forgot_password-email'] = self.user.username
res = form.submit()
res = form.submit()
# check http 200 response
assert_equal(res.status_code, 200)
# check push notification
assert_in_html('Please wait', res)
assert_not_in_html('If there is an OSF account', res)
@unittest.skip('Public projects/components are dynamically loaded now.')
class TestAUserProfile(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user = AuthUserFactory()
self.me = AuthUserFactory()
self.project = ProjectFactory(creator=self.me, is_public=True, title=fake.bs())
self.component = NodeFactory(creator=self.me, parent=self.project, is_public=True, title=fake.bs())
# regression test for https://github.com/CenterForOpenScience/osf.io/issues/2623
def test_has_public_projects_and_components(self):
# I go to my own profile
url = web_url_for('profile_view_id', uid=self.me._primary_key)
# I see the title of both my project and component
res = self.app.get(url, auth=self.me.auth)
assert_in_html(self.component.title, res)
assert_in_html(self.project.title, res)
# Another user can also see my public project and component
url = web_url_for('profile_view_id', uid=self.me._primary_key)
# I see the title of both my project and component
res = self.app.get(url, auth=self.user.auth)
assert_in_html(self.component.title, res)
assert_in_html(self.project.title, res)
def test_shows_projects_with_many_contributors(self):
# My project has many contributors
for _ in range(5):
user = UserFactory()
self.project.add_contributor(user, auth=Auth(self.project.creator), save=True)
# I go to my own profile
url = web_url_for('profile_view_id', uid=self.me._primary_key)
res = self.app.get(url, auth=self.me.auth)
# I see '3 more' as a link
assert_in('3 more', res)
res = res.click('3 more')
assert_equal(res.request.path, self.project.url)
def test_has_no_public_projects_or_components_on_own_profile(self):
# User goes to their profile
url = web_url_for('profile_view_id', uid=self.user._id)
res = self.app.get(url, auth=self.user.auth)
# user has no public components/projects
assert_in('You have no public projects', res)
assert_in('You have no public components', res)
def test_user_no_public_projects_or_components(self):
# I go to other user's profile
url = web_url_for('profile_view_id', uid=self.user._id)
# User has no public components/projects
res = self.app.get(url, auth=self.me.auth)
assert_in('This user has no public projects', res)
assert_in('This user has no public components', res)
# regression test
def test_does_not_show_registrations(self):
project = ProjectFactory(creator=self.user)
component = NodeFactory(parent=project, creator=self.user, is_public=False)
# User has a registration with public components
reg = RegistrationFactory(project=component.parent_node, creator=self.user, is_public=True)
for each in reg.nodes:
each.is_public = True
each.save()
# I go to other user's profile
url = web_url_for('profile_view_id', uid=self.user._id)
# Registration does not appear on profile
res = self.app.get(url, auth=self.me.auth)
assert_in('This user has no public components', res)
assert_not_in(reg.title, res)
assert_not_in(reg.nodes[0].title, res)
class TestPreprintBannerView(OsfTestCase):
def setUp(self):
super(TestPreprintBannerView, self).setUp()
self.admin = AuthUserFactory()
self.provider_one = PreprintProviderFactory()
self.provider_two = PreprintProviderFactory()
self.project_one = ProjectFactory(creator=self.admin, is_public=True)
self.project_two = ProjectFactory(creator=self.admin, is_public=True)
self.project_three = ProjectFactory(creator=self.admin, is_public=True)
self.subject_one = SubjectFactory()
self.subject_two = SubjectFactory()
self.file_one = test_utils.create_test_file(self.project_one, self.admin, 'mgla.pdf')
self.file_two = test_utils.create_test_file(self.project_two, self.admin, 'saor.pdf')
self.published_preprint = PreprintFactory(creator=self.admin, filename='mgla.pdf', provider=self.provider_one, subjects=[[self.subject_one._id]], project=self.project_one, is_published=True)
self.unpublished_preprint = PreprintFactory(creator=self.admin, filename='saor.pdf', provider=self.provider_two, subjects=[[self.subject_two._id]], project=self.project_two, is_published=False)
def test_public_project_published_preprint(self):
url = self.project_one.web_url_for('view_project')
res = self.app.get(url, auth=self.admin.auth)
assert_not_in('has a preprint, but has been made Private. Make your preprint discoverable by making this', res.body)
def test_private_project_published_preprint(self):
self.project_one.is_public = False
self.project_one.save()
url = self.project_one.web_url_for('view_project')
res = self.app.get(url, auth=self.admin.auth)
assert_in('has a preprint, but has been made Private. Make your preprint discoverable by making this', res.body)
def test_public_project_unpublished_preprint(self):
url = self.project_two.web_url_for('view_project')
res = self.app.get(url, auth=self.admin.auth)
assert_not_in('has a preprint, but has been made Private. Make your preprint discoverable by making this', res.body)
def test_private_project_unpublished_preprint(self):
# Do not show banner on unpublished preprints
self.project_two.is_public = False
self.project_two.save()
url = self.project_two.web_url_for('view_project')
res = self.app.get(url, auth=self.admin.auth)
assert_not_in('has a preprint, but has been made Private. Make your preprint discoverable by making this', res.body)
def test_public_project_no_preprint(self):
url = self.project_three.web_url_for('view_project')
res = self.app.get(url, auth=self.admin.auth)
assert_not_in('has a preprint, but has been made Private. Make your preprint discoverable by making this', res.body)
def test_private_project_no_preprint(self):
self.project_three.is_public = False
self.project_three.save()
url = self.project_three.web_url_for('view_project')
res = self.app.get(url, auth=self.admin.auth)
assert_not_in('has a preprint, but has been made Private. Make your preprint discoverable by making this', res.body)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "979c231cf3d29bd730bed3bf0f0eb4ea",
"timestamp": "",
"source": "github",
"line_count": 1114,
"max_line_length": 201,
"avg_line_length": 40.87342908438061,
"alnum_prop": 0.6384600180089166,
"repo_name": "chrisseto/osf.io",
"id": "2482feb4196daa83c334b8faaa95f49f6bf4412b",
"size": "45594",
"binary": false,
"copies": "2",
"ref": "refs/heads/feature/reviews",
"path": "tests/test_webtests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "144093"
},
{
"name": "HTML",
"bytes": "211713"
},
{
"name": "JavaScript",
"bytes": "1740074"
},
{
"name": "Mako",
"bytes": "592713"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "7895181"
}
],
"symlink_target": ""
}
|
import calendar
import datetime
import math
from django.conf import settings
from django.contrib.auth.management import create_permissions
from django.contrib.auth.models import Group, Permission
from django.core.exceptions import ValidationError
from django.core.mail.backends.smtp import EmailBackend as SMTPBackend
from django.db.models import get_app, get_models
from django.utils import timezone
from database_email_backend.backend import DatabaseEmailBackend
class DevEmailBackend(DatabaseEmailBackend):
def send_messages(self, email_messages):
"""
Intercept emails originating from SERVER_EMAIL and send them
through SMTPBackend to notify ADMINS.
"""
server_email = getattr(settings, 'SERVER_EMAIL', None)
admin_messages = filter(lambda x: x.from_email == server_email,
email_messages)
if admin_messages:
smtp_backend = SMTPBackend()
smtp_backend.send_messages(admin_messages)
return super(DevEmailBackend, self).send_messages(email_messages)
def get_object_or_none(model_class, **kwargs):
"""Identical to get_object_or_404, except instead of returning Http404,
this returns None.
"""
try:
return model_class.objects.get(**kwargs)
except (model_class.DoesNotExist, model_class.MultipleObjectsReturned):
return None
def get_or_create_instance(model_class, **kwargs):
"""Identical to get_or_create, expect instead of saving the new
object in the database, this just creates an instance.
"""
try:
return model_class.objects.get(**kwargs), False
except model_class.DoesNotExist:
return model_class(**kwargs), True
def go_back_n_months(date, n=1, first_day=False):
"""Return date minus n months."""
if first_day:
day = 1
else:
day = date.day
tmp_date = datetime.datetime(year=date.year, month=date.month, day=15)
tmp_date -= datetime.timedelta(days=31 * n)
last_day_of_month = calendar.monthrange(tmp_date.year, tmp_date.month)[1]
return datetime.datetime(year=tmp_date.year, month=tmp_date.month,
day=min(day, last_day_of_month))
def go_fwd_n_months(date, n=1, first_day=False):
"""Return date plus n months."""
if first_day:
day = 1
else:
day = date.day
tmp_date = datetime.datetime(year=date.year, month=date.month, day=15)
tmp_date += datetime.timedelta(days=31 * n)
last_day_of_month = calendar.monthrange(tmp_date.year, tmp_date.month)[1]
return datetime.datetime(year=tmp_date.year, month=tmp_date.month,
day=min(day, last_day_of_month))
def latest_object_or_none(model_class, field_name=None):
"""Identical to Model.latest, except instead of throwing exceptions,
this returns None.
"""
try:
return model_class.objects.latest(field_name)
except (model_class.DoesNotExist, model_class.MultipleObjectsReturned):
return None
def month2number(month):
"""Convert to month name to number."""
return datetime.datetime.strptime(month, '%B').month
def number2month(month, full_name=True):
"""Convert to month name to number."""
if full_name:
format = '%B'
else:
format = '%b'
return datetime.datetime(year=2000, day=1, month=month).strftime(format)
def add_permissions_to_groups(app, permissions):
"""Assign permissions to groups."""
# Make sure that all app permissions are created.
# Related to South bug http://south.aeracode.org/ticket/211
app_obj = get_app(app)
create_permissions(app_obj, get_models(app_mod=app_obj), verbosity=2)
for perm_name, groups in permissions.iteritems():
for group_name in groups:
group, created = Group.objects.get_or_create(name=group_name)
permission = Permission.objects.get(codename=perm_name,
content_type__app_label=app)
group.permissions.add(permission)
def validate_datetime(data, **kwargs):
"""Validate that /data/ is of type datetime.
Used to validate DateTime form fields, to ensure that user select
a valid date, thus a date that can be converted to a datetime
obj. Example of invalid date is 'Sept 31 2012'.
"""
if not isinstance(data, datetime.datetime):
raise ValidationError('Date chosen is invalid.')
return data
def get_date(days=0, weeks=0):
"""Return a date in UTC timezone, given an offset in days and or weeks.
The calculation is based on the current date and the
offset can be either positive or negative.
"""
return (timezone.now().date() +
datetime.timedelta(days=days, weeks=weeks))
def daterange(start_date, end_date):
"""Generator with a range of dates given a starting and ending point."""
for i in range((end_date - start_date).days + 1):
yield start_date + datetime.timedelta(i)
def get_quarter(date=None):
"""Return the quarter for this date and datetime of Q's start."""
if not date:
date = timezone.now()
quarter = int(math.ceil(date.month/3.0))
first_month_of_quarter = 1 + 3*(quarter-1)
quarter_start = datetime.datetime(date.year, first_month_of_quarter, 1)
return (quarter, quarter_start)
|
{
"content_hash": "19165e8366263ef73769a3ed23108c0d",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 77,
"avg_line_length": 32.45454545454545,
"alnum_prop": 0.6666666666666666,
"repo_name": "chirilo/remo",
"id": "7a39d3c6686a02150d400f02215cc568e9c2eb2f",
"size": "5355",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "remo/base/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "993"
},
{
"name": "Batchfile",
"bytes": "4531"
},
{
"name": "CSS",
"bytes": "372453"
},
{
"name": "HTML",
"bytes": "373393"
},
{
"name": "JavaScript",
"bytes": "606447"
},
{
"name": "Makefile",
"bytes": "4630"
},
{
"name": "Puppet",
"bytes": "7140"
},
{
"name": "Python",
"bytes": "7483058"
},
{
"name": "Shell",
"bytes": "3221"
},
{
"name": "Smarty",
"bytes": "215"
},
{
"name": "TeX",
"bytes": "1525"
}
],
"symlink_target": ""
}
|
from fabric.api import sudo, env
from fabric.contrib.files import append
from fab_deploy2.base import nginx as base_nginx
from fab_deploy2.tasks import task_method
from fab_deploy2 import functions
class Nginx(base_nginx.Nginx):
"""
Installs nginx
"""
user = 'nginx'
group = 'nginx'
remote_config_path = '/etc/nginx/nginx.conf'
def _install_package(self):
installed = functions.execute_on_host('utils.install_package',
package_name='nginx',
remote="http://nginx.org/packages/rhel/6/noarch/RPMS/nginx-release-rhel-6-0.el6.ngx.noarch.rpm")
if installed:
sudo('chkconfig nginx on')
def _setup_logging(self):
# Done by package
pass
@task_method
def start(self):
functions.execute_on_host('utils.start_or_restart_service', name='nginx',
host=[env.host_string])
@task_method
def stop(self):
sudo('service nginx stop')
Nginx().as_tasks()
|
{
"content_hash": "b21a598e71d148c468b1b4aafbc1f1e0",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 116,
"avg_line_length": 27.833333333333332,
"alnum_prop": 0.6287425149700598,
"repo_name": "ff0000/red-fab-deploy2",
"id": "9782fe17cd3a9ded8397bb5173f5d617c4eec171",
"size": "1002",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fab_deploy2/operating_systems/redhat/nginx.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "7546"
},
{
"name": "Python",
"bytes": "197452"
},
{
"name": "Shell",
"bytes": "62903"
}
],
"symlink_target": ""
}
|
VERSION = (1, 0, 0)
from .decorators import job
from .queues import enqueue, get_connection, get_queue, get_scheduler, get_failed_queue
from .workers import get_worker
|
{
"content_hash": "95f604a77ccc3eda703cbb27b68f4f66",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 87,
"avg_line_length": 33.8,
"alnum_prop": 0.7633136094674556,
"repo_name": "1024inc/django-rq",
"id": "ea2045740997fc9f00007bf888e6cee420086b3d",
"size": "169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_rq/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "28218"
},
{
"name": "Makefile",
"bytes": "173"
},
{
"name": "Python",
"bytes": "96432"
}
],
"symlink_target": ""
}
|
import tre
fz = tre.Fuzzyness(maxerr = 3)
print fz
pt = tre.compile("Don(ald( Ervin)?)? Knuth", tre.EXTENDED)
data = """
In addition to fundamental contributions in several branches of
theoretical computer science, Donnald Erwin Kuth is the creator of the
TeX computer typesetting system, the related METAFONT font definition
language and rendering system, and the Computer Modern family of
typefaces.
"""
m = pt.search(data, fz)
if m:
print m.groups()
print m[0]
|
{
"content_hash": "6ed272e98e3df47b9cef8fea5edd3fd6",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 70,
"avg_line_length": 23.85,
"alnum_prop": 0.7421383647798742,
"repo_name": "amlweems/tre",
"id": "38f23308cc774e630eaa1f687e7de51e4c7b2a2e",
"size": "477",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "python/example.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "353517"
},
{
"name": "Python",
"bytes": "1702"
},
{
"name": "Shell",
"bytes": "7260"
}
],
"symlink_target": ""
}
|
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Navigator(object):
def setupUi(self, Navigator):
Navigator.setObjectName(_fromUtf8("Navigator"))
Navigator.resize(536, 389)
self.gridLayout = QtGui.QGridLayout(Navigator)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.imageTree = QtGui.QTreeWidget(Navigator)
self.imageTree.setHeaderHidden(False)
self.imageTree.setColumnCount(2)
self.imageTree.setObjectName(_fromUtf8("imageTree"))
self.imageTree.header().setDefaultSectionSize(294)
self.gridLayout.addWidget(self.imageTree, 3, 0, 1, 1)
self.frame = QtGui.QFrame(Navigator)
self.frame.setEnabled(True)
self.frame.setFrameShape(QtGui.QFrame.NoFrame)
self.frame.setFrameShadow(QtGui.QFrame.Plain)
self.frame.setLineWidth(0)
self.frame.setObjectName(_fromUtf8("frame"))
self.horizontalLayout = QtGui.QHBoxLayout(self.frame)
self.horizontalLayout.setMargin(1)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.firstButton = QtGui.QToolButton(self.frame)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/icon/Resources/first.ico")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.firstButton.setIcon(icon)
self.firstButton.setObjectName(_fromUtf8("firstButton"))
self.horizontalLayout.addWidget(self.firstButton)
self.prevButton = QtGui.QToolButton(self.frame)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(_fromUtf8(":/icon/Resources/prev.ico")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.prevButton.setIcon(icon1)
self.prevButton.setObjectName(_fromUtf8("prevButton"))
self.horizontalLayout.addWidget(self.prevButton)
self.imageNumEdit = QtGui.QLineEdit(self.frame)
self.imageNumEdit.setEnabled(True)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.imageNumEdit.sizePolicy().hasHeightForWidth())
self.imageNumEdit.setSizePolicy(sizePolicy)
self.imageNumEdit.setBaseSize(QtCore.QSize(0, 0))
self.imageNumEdit.setObjectName(_fromUtf8("imageNumEdit"))
self.horizontalLayout.addWidget(self.imageNumEdit)
self.nextButton = QtGui.QToolButton(self.frame)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(_fromUtf8(":/icon/Resources/next.ico")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.nextButton.setIcon(icon2)
self.nextButton.setPopupMode(QtGui.QToolButton.DelayedPopup)
self.nextButton.setObjectName(_fromUtf8("nextButton"))
self.horizontalLayout.addWidget(self.nextButton)
self.lastButton = QtGui.QToolButton(self.frame)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(_fromUtf8(":/icon/Resources/last.ico")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.lastButton.setIcon(icon3)
self.lastButton.setObjectName(_fromUtf8("lastButton"))
self.horizontalLayout.addWidget(self.lastButton)
self.imageNumSlider = QtGui.QSlider(self.frame)
self.imageNumSlider.setMinimum(1)
self.imageNumSlider.setMaximum(1)
self.imageNumSlider.setTracking(True)
self.imageNumSlider.setOrientation(QtCore.Qt.Horizontal)
self.imageNumSlider.setObjectName(_fromUtf8("imageNumSlider"))
self.horizontalLayout.addWidget(self.imageNumSlider)
self.imageTotallabel = QtGui.QLabel(self.frame)
self.imageTotallabel.setObjectName(_fromUtf8("imageTotallabel"))
self.horizontalLayout.addWidget(self.imageTotallabel)
self.gridLayout.addWidget(self.frame, 1, 0, 1, 1)
self.actionNextImage = QtGui.QAction(Navigator)
self.actionNextImage.setObjectName(_fromUtf8("actionNextImage"))
self.retranslateUi(Navigator)
QtCore.QMetaObject.connectSlotsByName(Navigator)
def retranslateUi(self, Navigator):
Navigator.setWindowTitle(_translate("Navigator", "Navigator", None))
self.imageTree.headerItem().setText(0, _translate("Navigator", "Name", None))
self.imageTree.headerItem().setText(1, _translate("Navigator", "Properties", None))
self.firstButton.setText(_translate("Navigator", "...", None))
self.prevButton.setText(_translate("Navigator", "...", None))
self.nextButton.setText(_translate("Navigator", "...", None))
self.lastButton.setText(_translate("Navigator", "...", None))
self.imageTotallabel.setText(_translate("Navigator", "0", None))
self.actionNextImage.setText(_translate("Navigator", "nextImage", None))
|
{
"content_hash": "cb6c4846c1148cb5a7bac04a6c74b2c5",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 115,
"avg_line_length": 52.1,
"alnum_prop": 0.7040307101727448,
"repo_name": "rainer85ah/VisionViewer",
"id": "8351804bb5bd804bb5b4b91aa1614e40ff203c8a",
"size": "5441",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Viewer/Navigator_ui.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "114406"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
r""" A JSON data encoder and decoder.
This Python module implements the JSON (http://json.org/) data
encoding format; a subset of ECMAScript (aka JavaScript) for encoding
primitive data types (numbers, strings, booleans, lists, and
associative arrays) in a language-neutral simple text-based syntax.
It can encode or decode between JSON formatted strings and native
Python data types. Normally you would use the encode() and decode()
functions defined by this module, but if you want more control over
the processing you can use the JSON class.
This implementation tries to be as completely cormforming to all
intricacies of the standards as possible. It can operate in strict
mode (which only allows JSON-compliant syntax) or a non-strict mode
(which allows much more of the whole ECMAScript permitted syntax).
This includes complete support for Unicode strings (including
surrogate-pairs for non-BMP characters), and all number formats
including negative zero and IEEE 754 non-numbers such a NaN or
Infinity.
The JSON/ECMAScript to Python type mappings are:
---JSON--- ---Python---
null None
undefined undefined (note 1)
Boolean (true,false) bool (True or False)
Integer int or long (note 2)
Float float
String str or unicode ( "..." or u"..." )
Array [a, ...] list ( [...] )
Object {a:b, ...} dict ( {...} )
-- Note 1. an 'undefined' object is declared in this module which
represents the native Python value for this type when in
non-strict mode.
-- Note 2. some ECMAScript integers may be up-converted to Python
floats, such as 1e+40. Also integer -0 is converted to
float -0, so as to preserve the sign (which ECMAScript requires).
In addition, when operating in non-strict mode, several IEEE 754
non-numbers are also handled, and are mapped to specific Python
objects declared in this module:
NaN (not a number) nan (float('nan'))
Infinity, +Infinity inf (float('inf'))
-Infinity neginf (float('-inf'))
When encoding Python objects into JSON, you may use types other than
native lists or dictionaries, as long as they support the minimal
interfaces required of all sequences or mappings. This means you can
use generators and iterators, tuples, UserDict subclasses, etc.
To make it easier to produce JSON encoded representations of user
defined classes, if the object has a method named json_equivalent(),
then it will call that method and attempt to encode the object
returned from it instead. It will do this recursively as needed and
before any attempt to encode the object using it's default
strategies. Note that any json_equivalent() method should return
"equivalent" Python objects to be encoded, not an already-encoded
JSON-formatted string. There is no such aid provided to decode
JSON back into user-defined classes as that would dramatically
complicate the interface.
When decoding strings with this module it may operate in either
strict or non-strict mode. The strict mode only allows syntax which
is conforming to RFC 4627 (JSON), while the non-strict allows much
more of the permissible ECMAScript syntax.
The following are permitted when processing in NON-STRICT mode:
* Unicode format control characters are allowed anywhere in the input.
* All Unicode line terminator characters are recognized.
* All Unicode white space characters are recognized.
* The 'undefined' keyword is recognized.
* Hexadecimal number literals are recognized (e.g., 0xA6, 0177).
* String literals may use either single or double quote marks.
* Strings may contain \x (hexadecimal) escape sequences, as well as the
\v and \0 escape sequences.
* Lists may have omitted (elided) elements, e.g., [,,,,,], with
missing elements interpreted as 'undefined' values.
* Object properties (dictionary keys) can be of any of the
types: string literals, numbers, or identifiers (the later of
which are treated as if they are string literals)---as permitted
by ECMAScript. JSON only permits strings literals as keys.
Concerning non-strict and non-ECMAScript allowances:
* Octal numbers: If you allow the 'octal_numbers' behavior (which
is never enabled by default), then you can use octal integers
and octal character escape sequences (per the ECMAScript
standard Annex B.1.2). This behavior is allowed, if enabled,
because it was valid JavaScript at one time.
* Multi-line string literals: Strings which are more than one
line long (contain embedded raw newline characters) are never
permitted. This is neither valid JSON nor ECMAScript. Some other
JSON implementations may allow this, but this module considers
that behavior to be a mistake.
References:
* JSON (JavaScript Object Notation)
<http://json.org/>
* RFC 4627. The application/json Media Type for JavaScript Object Notation (JSON)
<http://www.ietf.org/rfc/rfc4627.txt>
* ECMA-262 3rd edition (1999)
<http://www.ecma-international.org/publications/files/ecma-st/ECMA-262.pdf>
* IEEE 754-1985: Standard for Binary Floating-Point Arithmetic.
<http://www.cs.berkeley.edu/~ejr/Projects/ieee754/>
"""
__author__ = "Deron Meranda <http://deron.meranda.us/>"
__date__ = "2008-03-19"
__version__ = "1.3"
__credits__ = """Copyright (c) 2006-2008 Deron E. Meranda <http://deron.meranda.us/>
Licensed under GNU GPL 3.0 or later. See LICENSE.txt included with this software.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# ------------------------------
# useful global constants
content_type = 'application/json'
file_ext = 'json'
hexdigits = '0123456789ABCDEFabcdef'
octaldigits = '01234567'
# ----------------------------------------------------------------------
# Decimal and float types.
#
# If a JSON number can not be stored in a Python float without loosing
# precision and the Python has the decimal type, then we will try to
# use decimal instead of float. To make this determination we need to
# know the limits of the float type, but Python doesn't have an easy
# way to tell what the largest floating-point number it supports. So,
# we detemine the precision and scale of the float type by testing it.
try:
# decimal module was introduced in Python 2.4
import decimal
except ImportError:
decimal = None
def determine_float_precision():
"""Returns a tuple (significant_digits, max_exponent) for the float type.
"""
import math
# Just count the digits in pi. The last two decimal digits
# may only be partial digits, so discount for them.
whole, frac = repr(math.pi).split('.')
sigdigits = len(whole) + len(frac) - 2
# This is a simple binary search. We find the largest exponent
# that the float() type can handle without going infinite or
# raising errors.
maxexp = None
minv = 0; maxv = 1000
while True:
if minv+1 == maxv:
maxexp = minv - 1
break
elif maxv < minv:
maxexp = None
break
m = (minv + maxv) // 2
try:
f = repr(float( '1e+%d' % m ))
except ValueError:
f = None
else:
if not f or f[0] < '0' or f[0] > '9':
f = None
if not f:
# infinite
maxv = m
else:
minv = m
return sigdigits, maxexp
float_sigdigits, float_maxexp = determine_float_precision()
# ----------------------------------------------------------------------
# The undefined value.
#
# ECMAScript has an undefined value (similar to yet distinct from null).
# Neither Python or strict JSON have support undefined, but to allow
# JavaScript behavior we must simulate it.
class _undefined_class(object):
"""Represents the ECMAScript 'undefined' value."""
__slots__ = []
def __repr__(self):
return self.__module__ + '.undefined'
def __str__(self):
return 'undefined'
def __nonzero__(self):
return False
undefined = _undefined_class()
del _undefined_class
# ----------------------------------------------------------------------
# Non-Numbers: NaN, Infinity, -Infinity
#
# ECMAScript has official support for non-number floats, although
# strict JSON does not. Python doesn't either. So to support the
# full JavaScript behavior we must try to add them into Python, which
# is unfortunately a bit of black magic. If our python implementation
# happens to be built on top of IEEE 754 we can probably trick python
# into using real floats. Otherwise we must simulate it with classes.
def _nonnumber_float_constants():
"""Try to return the Nan, Infinity, and -Infinity float values.
This is unnecessarily complex because there is no standard
platform- independent way to do this in Python as the language
(opposed to some implementation of it) doesn't discuss
non-numbers. We try various strategies from the best to the
worst.
If this Python interpreter uses the IEEE 754 floating point
standard then the returned values will probably be real instances
of the 'float' type. Otherwise a custom class object is returned
which will attempt to simulate the correct behavior as much as
possible.
"""
try:
# First, try (mostly portable) float constructor. Works under
# Linux x86 (gcc) and some Unices.
nan = float('nan')
inf = float('inf')
neginf = float('-inf')
except ValueError:
try:
# Try the AIX (PowerPC) float constructors
nan = float('NaNQ')
inf = float('INF')
neginf = float('-INF')
except ValueError:
try:
# Next, try binary unpacking. Should work under
# platforms using IEEE 754 floating point.
import struct, sys
xnan = '7ff8000000000000'.decode('hex') # Quiet NaN
xinf = '7ff0000000000000'.decode('hex')
xcheck = 'bdc145651592979d'.decode('hex') # -3.14159e-11
# Could use float.__getformat__, but it is a new python feature,
# so we use sys.byteorder.
if sys.byteorder == 'big':
nan = struct.unpack('d', xnan)[0]
inf = struct.unpack('d', xinf)[0]
check = struct.unpack('d', xcheck)[0]
else:
nan = struct.unpack('d', xnan[::-1])[0]
inf = struct.unpack('d', xinf[::-1])[0]
check = struct.unpack('d', xcheck[::-1])[0]
neginf = - inf
if check != -3.14159e-11:
raise ValueError('Unpacking raw IEEE 754 floats does not work')
except (ValueError, TypeError):
# Punt, make some fake classes to simulate. These are
# not perfect though. For instance nan * 1.0 == nan,
# as expected, but 1.0 * nan == 0.0, which is wrong.
class nan(float):
"""An approximation of the NaN (not a number) floating point number."""
def __repr__(self): return 'nan'
def __str__(self): return 'nan'
def __add__(self,x): return self
def __radd__(self,x): return self
def __sub__(self,x): return self
def __rsub__(self,x): return self
def __mul__(self,x): return self
def __rmul__(self,x): return self
def __div__(self,x): return self
def __rdiv__(self,x): return self
def __divmod__(self,x): return (self,self)
def __rdivmod__(self,x): return (self,self)
def __mod__(self,x): return self
def __rmod__(self,x): return self
def __pow__(self,exp): return self
def __rpow__(self,exp): return self
def __neg__(self): return self
def __pos__(self): return self
def __abs__(self): return self
def __lt__(self,x): return False
def __le__(self,x): return False
def __eq__(self,x): return False
def __neq__(self,x): return True
def __ge__(self,x): return False
def __gt__(self,x): return False
def __complex__(self,*a): raise NotImplementedError('NaN can not be converted to a complex')
if decimal:
nan = decimal.Decimal('NaN')
else:
nan = nan()
class inf(float):
"""An approximation of the +Infinity floating point number."""
def __repr__(self): return 'inf'
def __str__(self): return 'inf'
def __add__(self,x): return self
def __radd__(self,x): return self
def __sub__(self,x): return self
def __rsub__(self,x): return self
def __mul__(self,x):
if x is neginf or x < 0:
return neginf
elif x == 0:
return nan
else:
return self
def __rmul__(self,x): return self.__mul__(x)
def __div__(self,x):
if x == 0:
raise ZeroDivisionError('float division')
elif x < 0:
return neginf
else:
return self
def __rdiv__(self,x):
if x is inf or x is neginf or x is nan:
return nan
return 0.0
def __divmod__(self,x):
if x == 0:
raise ZeroDivisionError('float divmod()')
elif x < 0:
return (nan,nan)
else:
return (self,self)
def __rdivmod__(self,x):
if x is inf or x is neginf or x is nan:
return (nan, nan)
return (0.0, x)
def __mod__(self,x):
if x == 0:
raise ZeroDivisionError('float modulo')
else:
return nan
def __rmod__(self,x):
if x is inf or x is neginf or x is nan:
return nan
return x
def __pow__(self, exp):
if exp == 0:
return 1.0
else:
return self
def __rpow__(self, x):
if -1 < x < 1: return 0.0
elif x == 1.0: return 1.0
elif x is nan or x is neginf or x < 0:
return nan
else:
return self
def __neg__(self): return neginf
def __pos__(self): return self
def __abs__(self): return self
def __lt__(self,x): return False
def __le__(self,x):
if x is self:
return True
else:
return False
def __eq__(self,x):
if x is self:
return True
else:
return False
def __neq__(self,x):
if x is self:
return False
else:
return True
def __ge__(self,x): return True
def __gt__(self,x): return True
def __complex__(self,*a): raise NotImplementedError('Infinity can not be converted to a complex')
if decimal:
inf = decimal.Decimal('Infinity')
else:
inf = inf()
class neginf(float):
"""An approximation of the -Infinity floating point number."""
def __repr__(self): return '-inf'
def __str__(self): return '-inf'
def __add__(self,x): return self
def __radd__(self,x): return self
def __sub__(self,x): return self
def __rsub__(self,x): return self
def __mul__(self,x):
if x is self or x < 0:
return inf
elif x == 0:
return nan
else:
return self
def __rmul__(self,x): return self.__mul__(self)
def __div__(self,x):
if x == 0:
raise ZeroDivisionError('float division')
elif x < 0:
return inf
else:
return self
def __rdiv__(self,x):
if x is inf or x is neginf or x is nan:
return nan
return -0.0
def __divmod__(self,x):
if x == 0:
raise ZeroDivisionError('float divmod()')
elif x < 0:
return (nan,nan)
else:
return (self,self)
def __rdivmod__(self,x):
if x is inf or x is neginf or x is nan:
return (nan, nan)
return (-0.0, x)
def __mod__(self,x):
if x == 0:
raise ZeroDivisionError('float modulo')
else:
return nan
def __rmod__(self,x):
if x is inf or x is neginf or x is nan:
return nan
return x
def __pow__(self,exp):
if exp == 0:
return 1.0
else:
return self
def __rpow__(self, x):
if x is nan or x is inf or x is inf:
return nan
return 0.0
def __neg__(self): return inf
def __pos__(self): return self
def __abs__(self): return inf
def __lt__(self,x): return True
def __le__(self,x): return True
def __eq__(self,x):
if x is self:
return True
else:
return False
def __neq__(self,x):
if x is self:
return False
else:
return True
def __ge__(self,x):
if x is self:
return True
else:
return False
def __gt__(self,x): return False
def __complex__(self,*a): raise NotImplementedError('-Infinity can not be converted to a complex')
if decimal:
neginf = decimal.Decimal('-Infinity')
else:
neginf = neginf(0)
return nan, inf, neginf
nan, inf, neginf = _nonnumber_float_constants()
del _nonnumber_float_constants
# ----------------------------------------------------------------------
# String processing helpers
unsafe_string_chars = '"\\' + ''.join([chr(i) for i in range(0x20)])
def skipstringsafe( s, start=0, end=None ):
i = start
#if end is None:
# end = len(s)
while i < end and s[i] not in unsafe_string_chars:
#c = s[i]
#if c in unsafe_string_chars:
# break
i += 1
return i
def skipstringsafe_slow( s, start=0, end=None ):
i = start
if end is None:
end = len(s)
while i < end:
c = s[i]
if c == '"' or c == '\\' or ord(c) <= 0x1f:
break
i += 1
return i
def extend_list_with_sep( orig_seq, extension_seq, sepchar='' ):
if not sepchar:
orig_seq.extend( extension_seq )
else:
for i, x in enumerate(extension_seq):
if i > 0:
orig_seq.append( sepchar )
orig_seq.append( x )
def extend_and_flatten_list_with_sep( orig_seq, extension_seq, separator='' ):
for i, part in enumerate(extension_seq):
if i > 0 and separator:
orig_seq.append( separator )
orig_seq.extend( part )
# ----------------------------------------------------------------------
# Unicode helpers
#
# JSON requires that all JSON implementations must support the UTF-32
# encoding (as well as UTF-8 and UTF-16). But earlier versions of
# Python did not provide a UTF-32 codec. So we must implement UTF-32
# ourselves in case we need it.
def utf32le_encode( obj, errors='strict' ):
"""Encodes a Unicode string into a UTF-32LE encoded byte string."""
import struct
try:
import cStringIO as sio
except ImportError:
import StringIO as sio
f = sio.StringIO()
write = f.write
pack = struct.pack
for c in obj:
n = ord(c)
if 0xD800 <= n <= 0xDFFF: # surrogate codepoints are prohibited by UTF-32
if errors == 'ignore':
continue
elif errors == 'replace':
n = ord('?')
else:
cname = 'U+%04X'%n
raise UnicodeError('UTF-32 can not encode surrogate characters',cname)
write( pack('<L', n) )
return f.getvalue()
def utf32be_encode( obj, errors='strict' ):
"""Encodes a Unicode string into a UTF-32BE encoded byte string."""
import struct
try:
import cStringIO as sio
except ImportError:
import StringIO as sio
f = sio.StringIO()
write = f.write
pack = struct.pack
for c in obj:
n = ord(c)
if 0xD800 <= n <= 0xDFFF: # surrogate codepoints are prohibited by UTF-32
if errors == 'ignore':
continue
elif errors == 'replace':
n = ord('?')
else:
cname = 'U+%04X'%n
raise UnicodeError('UTF-32 can not encode surrogate characters',cname)
write( pack('>L', n) )
return f.getvalue()
def utf32le_decode( obj, errors='strict' ):
"""Decodes a UTF-32LE byte string into a Unicode string."""
if len(obj) % 4 != 0:
raise UnicodeError('UTF-32 decode error, data length not a multiple of 4 bytes')
import struct
unpack = struct.unpack
chars = []
i = 0
for i in range(0, len(obj), 4):
seq = obj[i:i+4]
n = unpack('<L',seq)[0]
chars.append( unichr(n) )
return u''.join( chars )
def utf32be_decode( obj, errors='strict' ):
"""Decodes a UTF-32BE byte string into a Unicode string."""
if len(obj) % 4 != 0:
raise UnicodeError('UTF-32 decode error, data length not a multiple of 4 bytes')
import struct
unpack = struct.unpack
chars = []
i = 0
for i in range(0, len(obj), 4):
seq = obj[i:i+4]
n = unpack('>L',seq)[0]
chars.append( unichr(n) )
return u''.join( chars )
def auto_unicode_decode( s ):
"""Takes a string and tries to convert it to a Unicode string.
This will return a Python unicode string type corresponding to the
input string (either str or unicode). The character encoding is
guessed by looking for either a Unicode BOM prefix, or by the
rules specified by RFC 4627. When in doubt it is assumed the
input is encoded in UTF-8 (the default for JSON).
"""
if isinstance(s, unicode):
return s
if len(s) < 4:
return s.decode('utf8') # not enough bytes, assume default of utf-8
# Look for BOM marker
import codecs
bom2 = s[:2]
bom4 = s[:4]
a, b, c, d = map(ord, s[:4]) # values of first four bytes
if bom4 == codecs.BOM_UTF32_LE:
encoding = 'utf-32le'
s = s[4:]
elif bom4 == codecs.BOM_UTF32_BE:
encoding = 'utf-32be'
s = s[4:]
elif bom2 == codecs.BOM_UTF16_LE:
encoding = 'utf-16le'
s = s[2:]
elif bom2 == codecs.BOM_UTF16_BE:
encoding = 'utf-16be'
s = s[2:]
# No BOM, so autodetect encoding used by looking at first four bytes
# according to RFC 4627 section 3.
elif a==0 and b==0 and c==0 and d!=0: # UTF-32BE
encoding = 'utf-32be'
elif a==0 and b!=0 and c==0 and d!=0: # UTF-16BE
encoding = 'utf-16be'
elif a!=0 and b==0 and c==0 and d==0: # UTF-32LE
encoding = 'utf-32le'
elif a!=0 and b==0 and c!=0 and d==0: # UTF-16LE
encoding = 'utf-16le'
else: #if a!=0 and b!=0 and c!=0 and d!=0: # UTF-8
# JSON spec says default is UTF-8, so always guess it
# if we can't guess otherwise
encoding = 'utf8'
# Make sure the encoding is supported by Python
try:
cdk = codecs.lookup(encoding)
except LookupError:
if encoding.startswith('utf-32') \
or encoding.startswith('ucs4') \
or encoding.startswith('ucs-4'):
# Python doesn't natively have a UTF-32 codec, but JSON
# requires that it be supported. So we must decode these
# manually.
if encoding.endswith('le'):
unis = utf32le_decode(s)
else:
unis = utf32be_decode(s)
else:
raise JSONDecodeError('this python has no codec for this character encoding',encoding)
else:
# Convert to unicode using a standard codec
unis = s.decode(encoding)
return unis
def surrogate_pair_as_unicode( c1, c2 ):
"""Takes a pair of unicode surrogates and returns the equivalent unicode character.
The input pair must be a surrogate pair, with c1 in the range
U+D800 to U+DBFF and c2 in the range U+DC00 to U+DFFF.
"""
n1, n2 = ord(c1), ord(c2)
if n1 < 0xD800 or n1 > 0xDBFF or n2 < 0xDC00 or n2 > 0xDFFF:
raise JSONDecodeError('illegal Unicode surrogate pair',(c1,c2))
a = n1 - 0xD800
b = n2 - 0xDC00
v = (a << 10) | b
v += 0x10000
return unichr(v)
def unicode_as_surrogate_pair( c ):
"""Takes a single unicode character and returns a sequence of surrogate pairs.
The output of this function is a tuple consisting of one or two unicode
characters, such that if the input character is outside the BMP range
then the output is a two-character surrogate pair representing that character.
If the input character is inside the BMP then the output tuple will have
just a single character...the same one.
"""
n = ord(c)
if n < 0x10000:
return (unichr(n),) # in BMP, surrogate pair not required
v = n - 0x10000
vh = (v >> 10) & 0x3ff # highest 10 bits
vl = v & 0x3ff # lowest 10 bits
w1 = 0xD800 | vh
w2 = 0xDC00 | vl
return (unichr(w1), unichr(w2))
# ----------------------------------------------------------------------
# Type identification
def isnumbertype( obj ):
"""Is the object of a Python number type (excluding complex)?"""
return isinstance(obj, (int,long,float)) \
and not isinstance(obj, bool) \
or obj is nan or obj is inf or obj is neginf
def isstringtype( obj ):
"""Is the object of a Python string type?"""
if isinstance(obj, basestring):
return True
# Must also check for some other pseudo-string types
import types, UserString
return isinstance(obj, types.StringTypes) \
or isinstance(obj, UserString.UserString) \
or isinstance(obj, UserString.MutableString)
# ----------------------------------------------------------------------
# Numeric helpers
def decode_hex( hexstring ):
"""Decodes a hexadecimal string into it's integer value."""
# We don't use the builtin 'hex' codec in python since it can
# not handle odd numbers of digits, nor raise the same type
# of exceptions we want to.
n = 0
for c in hexstring:
if '0' <= c <= '9':
d = ord(c) - ord('0')
elif 'a' <= c <= 'f':
d = ord(c) - ord('a') + 10
elif 'A' <= c <= 'F':
d = ord(c) - ord('A') + 10
else:
raise JSONDecodeError('not a hexadecimal number',hexstring)
# Could use ((n << 4 ) | d), but python 2.3 issues a FutureWarning.
n = (n * 16) + d
return n
def decode_octal( octalstring ):
"""Decodes an octal string into it's integer value."""
n = 0
for c in octalstring:
if '0' <= c <= '7':
d = ord(c) - ord('0')
else:
raise JSONDecodeError('not an octal number',octalstring)
# Could use ((n << 3 ) | d), but python 2.3 issues a FutureWarning.
n = (n * 8) + d
return n
# ----------------------------------------------------------------------
# Exception classes.
class JSONError(ValueError):
"""Our base class for all JSON-related errors.
"""
def pretty_description(self):
err = self.args[0]
if len(self.args) > 1:
err += ': '
for anum, a in enumerate(self.args[1:]):
if anum > 1:
err += ', '
astr = repr(a)
if len(astr) > 20:
astr = astr[:20] + '...'
err += astr
return err
class JSONDecodeError(JSONError):
"""An exception class raised when a JSON decoding error (syntax error) occurs."""
class JSONEncodeError(JSONError):
"""An exception class raised when a python object can not be encoded as a JSON string."""
#----------------------------------------------------------------------
# The main JSON encoder/decoder class.
class JSON(object):
"""An encoder/decoder for JSON data streams.
Usually you will call the encode() or decode() methods. The other
methods are for lower-level processing.
Whether the JSON parser runs in strict mode (which enforces exact
compliance with the JSON spec) or the more forgiving non-string mode
can be affected by setting the 'strict' argument in the object's
initialization; or by assigning True or False to the 'strict'
property of the object.
You can also adjust a finer-grained control over strictness by
allowing or preventing specific behaviors. You can get a list of
all the available behaviors by accessing the 'behaviors' property.
Likewise the allowed_behaviors and prevented_behaviors list which
behaviors will be allowed and which will not. Call the allow()
or prevent() methods to adjust these.
"""
_escapes_json = { # character escapes in JSON
'"': '"',
'/': '/',
'\\': '\\',
'b': '\b',
'f': '\f',
'n': '\n',
'r': '\r',
't': '\t',
}
_escapes_js = { # character escapes in Javascript
'"': '"',
'\'': '\'',
'\\': '\\',
'b': '\b',
'f': '\f',
'n': '\n',
'r': '\r',
't': '\t',
'v': '\v',
'0': '\x00'
}
# Following is a reverse mapping of escape characters, used when we
# output JSON. Only those escapes which are always safe (e.g., in JSON)
# are here. It won't hurt if we leave questionable ones out.
_rev_escapes = {'\n': '\\n',
'\t': '\\t',
'\b': '\\b',
'\r': '\\r',
'\f': '\\f',
'"': '\\"',
'\\': '\\\\'}
def __init__(self, strict=False, compactly=True, escape_unicode=False):
"""Creates a JSON encoder/decoder object.
If 'strict' is set to True, then only strictly-conforming JSON
output will be produced. Note that this means that some types
of values may not be convertable and will result in a
JSONEncodeError exception.
If 'compactly' is set to True, then the resulting string will
have all extraneous white space removed; if False then the
string will be "pretty printed" with whitespace and indentation
added to make it more readable.
If 'escape_unicode' is set to True, then all non-ASCII characters
will be represented as a unicode escape sequence; if False then
the actual real unicode character will be inserted if possible.
The 'escape_unicode' can also be a function, which when called
with a single argument of a unicode character will return True
if the character should be escaped or False if it should not.
If you wish to extend the encoding to ba able to handle
additional types, you should subclass this class and override
the encode_default() method.
"""
import sys
self._set_strictness(strict)
self._encode_compactly = compactly
try:
# see if we were passed a predicate function
b = escape_unicode(u'A')
self._encode_unicode_as_escapes = escape_unicode
except (ValueError, NameError, TypeError):
# Just set to True or False. We could use lambda x:True
# to make it more consistent (always a function), but it
# will be too slow, so we'll make explicit tests later.
self._encode_unicode_as_escapes = bool(escape_unicode)
self._sort_dictionary_keys = True
# The following is a boolean map of the first 256 characters
# which will quickly tell us which of those characters never
# need to be escaped.
self._asciiencodable = [32 <= c < 128 and not self._rev_escapes.has_key(chr(c))
for c in range(0,255)]
def _set_strictness(self, strict):
"""Changes the strictness behavior.
Pass True to be very strict about JSON syntax, or False to be looser.
"""
self._allow_any_type_at_start = not strict
self._allow_all_numeric_signs = not strict
self._allow_comments = not strict
self._allow_control_char_in_string = not strict
self._allow_hex_numbers = not strict
self._allow_initial_decimal_point = not strict
self._allow_js_string_escapes = not strict
self._allow_non_numbers = not strict
self._allow_nonescape_characters = not strict # "\z" -> "z"
self._allow_nonstring_keys = not strict
self._allow_omitted_array_elements = not strict
self._allow_single_quoted_strings = not strict
self._allow_trailing_comma_in_literal = not strict
self._allow_undefined_values = not strict
self._allow_unicode_format_control_chars = not strict
self._allow_unicode_whitespace = not strict
# Always disable this by default
self._allow_octal_numbers = False
def allow(self, behavior):
"""Allow the specified behavior (turn off a strictness check).
The list of all possible behaviors is available in the behaviors property.
You can see which behaviors are currently allowed by accessing the
allowed_behaviors property.
"""
p = '_allow_' + behavior
if hasattr(self, p):
setattr(self, p, True)
else:
raise AttributeError('Behavior is not known',behavior)
def prevent(self, behavior):
"""Prevent the specified behavior (turn on a strictness check).
The list of all possible behaviors is available in the behaviors property.
You can see which behaviors are currently prevented by accessing the
prevented_behaviors property.
"""
p = '_allow_' + behavior
if hasattr(self, p):
setattr(self, p, False)
else:
raise AttributeError('Behavior is not known',behavior)
def _get_behaviors(self):
return sorted([ n[len('_allow_'):] for n in self.__dict__ \
if n.startswith('_allow_')])
behaviors = property(_get_behaviors,
doc='List of known behaviors that can be passed to allow() or prevent() methods')
def _get_allowed_behaviors(self):
return sorted([ n[len('_allow_'):] for n in self.__dict__ \
if n.startswith('_allow_') and getattr(self,n)])
allowed_behaviors = property(_get_allowed_behaviors,
doc='List of known behaviors that are currently allowed')
def _get_prevented_behaviors(self):
return sorted([ n[len('_allow_'):] for n in self.__dict__ \
if n.startswith('_allow_') and not getattr(self,n)])
prevented_behaviors = property(_get_prevented_behaviors,
doc='List of known behaviors that are currently prevented')
def _is_strict(self):
return not self.allowed_behaviors
strict = property(_is_strict, _set_strictness,
doc='True if adherence to RFC 4627 syntax is strict, or False is more generous ECMAScript syntax is permitted')
def isws(self, c):
"""Determines if the given character is considered as white space.
Note that Javscript is much more permissive on what it considers
to be whitespace than does JSON.
Ref. ECMAScript section 7.2
"""
if not self._allow_unicode_whitespace:
return c in ' \t\n\r'
else:
if not isinstance(c,unicode):
c = unicode(c)
if c in u' \t\n\r\f\v':
return True
import unicodedata
return unicodedata.category(c) == 'Zs'
def islineterm(self, c):
"""Determines if the given character is considered a line terminator.
Ref. ECMAScript section 7.3
"""
if c == '\r' or c == '\n':
return True
if c == u'\u2028' or c == u'\u2029': # unicodedata.category(c) in ['Zl', 'Zp']
return True
return False
def strip_format_control_chars(self, txt):
"""Filters out all Unicode format control characters from the string.
ECMAScript permits any Unicode "format control characters" to
appear at any place in the source code. They are to be
ignored as if they are not there before any other lexical
tokenization occurs. Note that JSON does not allow them.
Ref. ECMAScript section 7.1.
"""
import unicodedata
txt2 = filter( lambda c: unicodedata.category(unicode(c)) != 'Cf',
txt )
return txt2
def decode_null(self, s, i=0):
"""Intermediate-level decoder for ECMAScript 'null' keyword.
Takes a string and a starting index, and returns a Python
None object and the index of the next unparsed character.
"""
if i < len(s) and s[i:i+4] == 'null':
return None, i+4
raise JSONDecodeError('literal is not the JSON "null" keyword', s)
def encode_undefined(self):
"""Produces the ECMAScript 'undefined' keyword."""
return 'undefined'
def encode_null(self):
"""Produces the JSON 'null' keyword."""
return 'null'
def decode_boolean(self, s, i=0):
"""Intermediate-level decode for JSON boolean literals.
Takes a string and a starting index, and returns a Python bool
(True or False) and the index of the next unparsed character.
"""
if s[i:i+4] == 'true':
return True, i+4
elif s[i:i+5] == 'false':
return False, i+5
raise JSONDecodeError('literal value is not a JSON boolean keyword',s)
def encode_boolean(self, b):
"""Encodes the Python boolean into a JSON Boolean literal."""
if bool(b):
return 'true'
return 'false'
def decode_number(self, s, i=0, imax=None):
"""Intermediate-level decoder for JSON numeric literals.
Takes a string and a starting index, and returns a Python
suitable numeric type and the index of the next unparsed character.
The returned numeric type can be either of a Python int,
long, or float. In addition some special non-numbers may
also be returned such as nan, inf, and neginf (technically
which are Python floats, but have no numeric value.)
Ref. ECMAScript section 8.5.
"""
if imax is None:
imax = len(s)
# Detect initial sign character(s)
if not self._allow_all_numeric_signs:
if s[i] == '+' or (s[i] == '-' and i+1 < imax and \
s[i+1] in '+-'):
raise JSONDecodeError('numbers in strict JSON may only have a single "-" as a sign prefix',s[i:])
sign = +1
j = i # j will point after the sign prefix
while j < imax and s[j] in '+-':
if s[j] == '-': sign = sign * -1
j += 1
# Check for ECMAScript symbolic non-numbers
if s[j:j+3] == 'NaN':
if self._allow_non_numbers:
return nan, j+3
else:
raise JSONDecodeError('NaN literals are not allowed in strict JSON')
elif s[j:j+8] == 'Infinity':
if self._allow_non_numbers:
if sign < 0:
return neginf, j+8
else:
return inf, j+8
else:
raise JSONDecodeError('Infinity literals are not allowed in strict JSON')
elif s[j:j+2] in ('0x','0X'):
if self._allow_hex_numbers:
k = j+2
while k < imax and s[k] in hexdigits:
k += 1
n = sign * decode_hex( s[j+2:k] )
return n, k
else:
raise JSONDecodeError('hexadecimal literals are not allowed in strict JSON',s[i:])
else:
# Decimal (or octal) number, find end of number.
# General syntax is: \d+[\.\d+][e[+-]?\d+]
k = j # will point to end of digit sequence
could_be_octal = ( k+1 < imax and s[k] == '0' ) # first digit is 0
decpt = None # index into number of the decimal point, if any
ept = None # index into number of the e|E exponent start, if any
esign = '+' # sign of exponent
sigdigits = 0 # number of significant digits (approx, counts end zeros)
while k < imax and (s[k].isdigit() or s[k] in '.+-eE'):
c = s[k]
if c not in octaldigits:
could_be_octal = False
if c == '.':
if decpt is not None or ept is not None:
break
else:
decpt = k-j
elif c in 'eE':
if ept is not None:
break
else:
ept = k-j
elif c in '+-':
if not ept:
break
esign = c
else: #digit
if not ept:
sigdigits += 1
k += 1
number = s[j:k] # The entire number as a string
#print 'NUMBER IS: ', repr(number), ', sign', sign, ', esign', esign, \
# ', sigdigits', sigdigits, \
# ', decpt', decpt, ', ept', ept
# Handle octal integers first as an exception. If octal
# is not enabled (the ECMAScipt standard) then just do
# nothing and treat the string as a decimal number.
if could_be_octal and self._allow_octal_numbers:
n = sign * decode_octal( number )
return n, k
# A decimal number. Do a quick check on JSON syntax restrictions.
if number[0] == '.' and not self._allow_initial_decimal_point:
raise JSONDecodeError('numbers in strict JSON must have at least one digit before the decimal point',s[i:])
elif number[0] == '0' and \
len(number) > 1 and number[1].isdigit():
if self._allow_octal_numbers:
raise JSONDecodeError('initial zero digit is only allowed for octal integers',s[i:])
else:
raise JSONDecodeError('initial zero digit must not be followed by other digits (octal numbers are not permitted)',s[i:])
# Make sure decimal point is followed by a digit
if decpt is not None:
if decpt+1 >= len(number) or not number[decpt+1].isdigit():
raise JSONDecodeError('decimal point must be followed by at least one digit',s[i:])
# Determine the exponential part
if ept is not None:
if ept+1 >= len(number):
raise JSONDecodeError('exponent in number is truncated',s[i:])
try:
exponent = int(number[ept+1:])
except ValueError:
raise JSONDecodeError('not a valid exponent in number',s[i:])
##print 'EXPONENT', exponent
else:
exponent = 0
# Try to make an int/long first.
if decpt is None and exponent >= 0:
# An integer
if ept:
n = int(number[:ept])
else:
n = int(number)
n *= sign
if exponent:
n *= 10**exponent
if n == 0 and sign < 0:
# minus zero, must preserve negative sign so make a float
n = -0.0
else:
try:
if decimal and (abs(exponent) > float_maxexp or sigdigits > float_sigdigits):
try:
n = decimal.Decimal(number)
n = n.normalize()
except decimal.Overflow:
if sign<0:
n = neginf
else:
n = inf
else:
n *= sign
else:
n = float(number) * sign
except ValueError:
raise JSONDecodeError('not a valid JSON numeric literal', s[i:j])
return n, k
def encode_number(self, n):
"""Encodes a Python numeric type into a JSON numeric literal.
The special non-numeric values of float('nan'), float('inf')
and float('-inf') are translated into appropriate JSON
literals.
Note that Python complex types are not handled, as there is no
ECMAScript equivalent type.
"""
if isinstance(n, complex):
if n.imag:
raise JSONEncodeError('Can not encode a complex number that has a non-zero imaginary part',n)
n = n.real
if isinstance(n, (int,long)):
return str(n)
if decimal and isinstance(n, decimal.Decimal):
return str(n)
global nan, inf, neginf
if n is nan:
return 'NaN'
elif n is inf:
return 'Infinity'
elif n is neginf:
return '-Infinity'
elif isinstance(n, float):
# Check for non-numbers.
# In python nan == inf == -inf, so must use repr() to distinguish
reprn = repr(n).lower()
if ('inf' in reprn and '-' in reprn) or n == neginf:
return '-Infinity'
elif 'inf' in reprn or n is inf:
return 'Infinity'
elif 'nan' in reprn or n is nan:
return 'NaN'
return repr(n)
else:
raise TypeError('encode_number expected an integral, float, or decimal number type',type(n))
def decode_string(self, s, i=0, imax=None):
"""Intermediate-level decoder for JSON string literals.
Takes a string and a starting index, and returns a Python
string (or unicode string) and the index of the next unparsed
character.
"""
if imax is None:
imax = len(s)
if imax < i+2 or s[i] not in '"\'':
raise JSONDecodeError('string literal must be properly quoted',s[i:])
closer = s[i]
if closer == '\'' and not self._allow_single_quoted_strings:
raise JSONDecodeError('string literals must use double quotation marks in strict JSON',s[i:])
i += 1 # skip quote
if self._allow_js_string_escapes:
escapes = self._escapes_js
else:
escapes = self._escapes_json
ccallowed = self._allow_control_char_in_string
chunks = []
_append = chunks.append
done = False
high_surrogate = None
while i < imax:
c = s[i]
# Make sure a high surrogate is immediately followed by a low surrogate
if high_surrogate and (i+1 >= imax or s[i:i+2] != '\\u'):
raise JSONDecodeError('High unicode surrogate must be followed by a low surrogate',s[i:])
if c == closer:
i += 1 # skip end quote
done = True
break
elif c == '\\':
# Escaped character
i += 1
if i >= imax:
raise JSONDecodeError('escape in string literal is incomplete',s[i-1:])
c = s[i]
if '0' <= c <= '7' and self._allow_octal_numbers:
# Handle octal escape codes first so special \0 doesn't kick in yet.
# Follow Annex B.1.2 of ECMAScript standard.
if '0' <= c <= '3':
maxdigits = 3
else:
maxdigits = 2
for k in range(i, i+maxdigits+1):
if k >= imax or s[k] not in octaldigits:
break
n = decode_octal(s[i:k])
if n < 128:
_append( chr(n) )
else:
_append( unichr(n) )
i = k
continue
if escapes.has_key(c):
_append(escapes[c])
i += 1
elif c == 'u' or c == 'x':
i += 1
if c == 'u':
digits = 4
else: # c== 'x'
if not self._allow_js_string_escapes:
raise JSONDecodeError(r'string literals may not use the \x hex-escape in strict JSON',s[i-1:])
digits = 2
if i+digits >= imax:
raise JSONDecodeError('numeric character escape sequence is truncated',s[i-1:])
n = decode_hex( s[i:i+digits] )
if high_surrogate:
# Decode surrogate pair and clear high surrogate
_append( surrogate_pair_as_unicode( high_surrogate, unichr(n) ) )
high_surrogate = None
elif n < 128:
# ASCII chars always go in as a str
_append( chr(n) )
elif 0xd800 <= n <= 0xdbff: # high surrogate
if imax < i + digits + 2 or s[i+digits] != '\\' or s[i+digits+1] != 'u':
raise JSONDecodeError('High unicode surrogate must be followed by a low surrogate',s[i-2:])
high_surrogate = unichr(n) # remember until we get to the low surrogate
elif 0xdc00 <= n <= 0xdfff: # low surrogate
raise JSONDecodeError('Low unicode surrogate must be proceeded by a high surrogate',s[i-2:])
else:
# Other chars go in as a unicode char
_append( unichr(n) )
i += digits
else:
# Unknown escape sequence
if self._allow_nonescape_characters:
_append( c )
i += 1
else:
raise JSONDecodeError('unsupported escape code in JSON string literal',s[i-1:])
elif ord(c) <= 0x1f: # A control character
if self.islineterm(c):
raise JSONDecodeError('line terminator characters must be escaped inside string literals',s[i:])
elif ccallowed:
_append( c )
i += 1
else:
raise JSONDecodeError('control characters must be escaped inside JSON string literals',s[i:])
else: # A normal character; not an escape sequence or end-quote.
# Find a whole sequence of "safe" characters so we can append them
# all at once rather than one a time, for speed.
j = i
i += 1
while i < imax and s[i] not in unsafe_string_chars and s[i] != closer:
i += 1
_append(s[j:i])
if not done:
raise JSONDecodeError('string literal is not terminated with a quotation mark',s)
s = ''.join( chunks )
return s, i
def encode_string(self, s):
"""Encodes a Python string into a JSON string literal.
"""
# Must handle instances of UserString specially in order to be
# able to use ord() on it's simulated "characters".
import UserString
if isinstance(s, (UserString.UserString, UserString.MutableString)):
def tochar(c):
return c.data
else:
# Could use "lambda c:c", but that is too slow. So we set to None
# and use an explicit if test inside the loop.
tochar = None
chunks = []
chunks.append('"')
revesc = self._rev_escapes
asciiencodable = self._asciiencodable
encunicode = self._encode_unicode_as_escapes
i = 0
imax = len(s)
while i < imax:
if tochar:
c = tochar(s[i])
else:
c = s[i]
cord = ord(c)
if cord < 256 and asciiencodable[cord] and isinstance(encunicode, bool):
# Contiguous runs of plain old printable ASCII can be copied
# directly to the JSON output without worry (unless the user
# has supplied a custom is-encodable function).
j = i
i += 1
while i < imax:
if tochar:
c = tochar(s[i])
else:
c = s[i]
cord = ord(c)
if cord < 256 and asciiencodable[cord]:
i += 1
else:
break
chunks.append( unicode(s[j:i]) )
elif revesc.has_key(c):
# Has a shortcut escape sequence, like "\n"
chunks.append(revesc[c])
i += 1
elif cord <= 0x1F:
# Always unicode escape ASCII-control characters
chunks.append(r'\u%04x' % cord)
i += 1
elif 0xD800 <= cord <= 0xDFFF:
# A raw surrogate character! This should never happen
# and there's no way to include it in the JSON output.
# So all we can do is complain.
cname = 'U+%04X' % cord
raise JSONEncodeError('can not include or escape a Unicode surrogate character',cname)
elif cord <= 0xFFFF:
# Other BMP Unicode character
if isinstance(encunicode, bool):
doesc = encunicode
else:
doesc = encunicode( c )
if doesc:
chunks.append(r'\u%04x' % cord)
else:
chunks.append( c )
i += 1
else: # ord(c) >= 0x10000
# Non-BMP Unicode
if isinstance(encunicode, bool):
doesc = encunicode
else:
doesc = encunicode( c )
if doesc:
for surrogate in unicode_as_surrogate_pair(c):
chunks.append(r'\u%04x' % ord(surrogate))
else:
chunks.append( c )
i += 1
chunks.append('"')
return ''.join( chunks )
def skip_comment(self, txt, i=0):
"""Skips an ECMAScript comment, either // or /* style.
The contents of the comment are returned as a string, as well
as the index of the character immediately after the comment.
"""
if i+1 >= len(txt) or txt[i] != '/' or txt[i+1] not in '/*':
return None, i
if not self._allow_comments:
raise JSONDecodeError('comments are not allowed in strict JSON',txt[i:])
multiline = (txt[i+1] == '*')
istart = i
i += 2
while i < len(txt):
if multiline:
if txt[i] == '*' and i+1 < len(txt) and txt[i+1] == '/':
j = i+2
break
elif txt[i] == '/' and i+1 < len(txt) and txt[i+1] == '*':
raise JSONDecodeError('multiline /* */ comments may not nest',txt[istart:i+1])
else:
if self.islineterm(txt[i]):
j = i # line terminator is not part of comment
break
i += 1
if i >= len(txt):
if not multiline:
j = len(txt) # // comment terminated by end of file is okay
else:
raise JSONDecodeError('comment was never terminated',txt[istart:])
return txt[istart:j], j
def skipws(self, txt, i=0, imax=None, skip_comments=True):
"""Skips whitespace.
"""
if not self._allow_comments and not self._allow_unicode_whitespace:
if imax is None:
imax = len(txt)
while i < imax and txt[i] in ' \r\n\t':
i += 1
return i
else:
return self.skipws_any(txt, i, imax, skip_comments)
def skipws_any(self, txt, i=0, imax=None, skip_comments=True):
"""Skips all whitespace, including comments and unicode whitespace
Takes a string and a starting index, and returns the index of the
next non-whitespace character.
If skip_comments is True and not running in strict JSON mode, then
comments will be skipped over just like whitespace.
"""
if imax is None:
imax = len(txt)
while i < imax:
if txt[i] == '/':
cmt, i = self.skip_comment(txt, i)
if i < imax and self.isws(txt[i]):
i += 1
else:
break
return i
def decode_composite(self, txt, i=0, imax=None):
"""Intermediate-level JSON decoder for composite literal types (array and object).
Takes text and a starting index, and returns either a Python list or
dictionary and the index of the next unparsed character.
"""
if imax is None:
imax = len(txt)
i = self.skipws(txt, i, imax)
starti = i
if i >= imax or txt[i] not in '{[':
raise JSONDecodeError('composite object must start with "[" or "{"',txt[i:])
if txt[i] == '[':
isdict = False
closer = ']'
obj = []
else:
isdict = True
closer = '}'
obj = {}
i += 1 # skip opener
i = self.skipws(txt, i, imax)
if i < imax and txt[i] == closer:
# empty composite
i += 1
done = True
else:
saw_value = False # set to false at beginning and after commas
done = False
while i < imax:
i = self.skipws(txt, i, imax)
if i < imax and (txt[i] == ',' or txt[i] == closer):
c = txt[i]
i += 1
if c == ',':
if not saw_value:
# no preceeding value, an elided (omitted) element
if isdict:
raise JSONDecodeError('can not omit elements of an object (dictionary)')
if self._allow_omitted_array_elements:
if self._allow_undefined_values:
obj.append( undefined )
else:
obj.append( None )
else:
raise JSONDecodeError('strict JSON does not permit omitted array (list) elements',txt[i:])
saw_value = False
continue
else: # c == closer
if not saw_value and not self._allow_trailing_comma_in_literal:
if isdict:
raise JSONDecodeError('strict JSON does not allow a final comma in an object (dictionary) literal',txt[i-2:])
else:
raise JSONDecodeError('strict JSON does not allow a final comma in an array (list) literal',txt[i-2:])
done = True
break
# Decode the item
if isdict and self._allow_nonstring_keys:
r = self.decodeobj(txt, i, identifier_as_string=True)
else:
r = self.decodeobj(txt, i, identifier_as_string=False)
if r:
if saw_value:
# two values without a separating comma
raise JSONDecodeError('values must be separated by a comma', txt[i:r[1]])
saw_value = True
i = self.skipws(txt, r[1], imax)
if isdict:
key = r[0] # Ref 11.1.5
if not isstringtype(key):
if isnumbertype(key):
if not self._allow_nonstring_keys:
raise JSONDecodeError('strict JSON only permits string literals as object properties (dictionary keys)',txt[starti:])
else:
raise JSONDecodeError('object properties (dictionary keys) must be either string literals or numbers',txt[starti:])
if i >= imax or txt[i] != ':':
raise JSONDecodeError('object property (dictionary key) has no value, expected ":"',txt[starti:])
i += 1
i = self.skipws(txt, i, imax)
rval = self.decodeobj(txt, i)
if rval:
i = self.skipws(txt, rval[1], imax)
obj[key] = rval[0]
else:
raise JSONDecodeError('object property (dictionary key) has no value',txt[starti:])
else: # list
obj.append( r[0] )
else: # not r
if isdict:
raise JSONDecodeError('expected a value, or "}"',txt[i:])
elif not self._allow_omitted_array_elements:
raise JSONDecodeError('expected a value or "]"',txt[i:])
else:
raise JSONDecodeError('expected a value, "," or "]"',txt[i:])
# end while
if not done:
if isdict:
raise JSONDecodeError('object literal (dictionary) is not terminated',txt[starti:])
else:
raise JSONDecodeError('array literal (list) is not terminated',txt[starti:])
return obj, i
def decode_javascript_identifier(self, name):
"""Convert a JavaScript identifier into a Python string object.
This method can be overriden by a subclass to redefine how JavaScript
identifiers are turned into Python objects. By default this just
converts them into strings.
"""
return name
def decodeobj(self, txt, i=0, imax=None, identifier_as_string=False, only_object_or_array=False):
"""Intermediate-level JSON decoder.
Takes a string and a starting index, and returns a two-tuple consting
of a Python object and the index of the next unparsed character.
If there is no value at all (empty string, etc), the None is
returned instead of a tuple.
"""
if imax is None:
imax = len(txt)
obj = None
i = self.skipws(txt, i, imax)
if i >= imax:
raise JSONDecodeError('Unexpected end of input')
c = txt[i]
if c == '[' or c == '{':
obj, i = self.decode_composite(txt, i, imax)
elif only_object_or_array:
raise JSONDecodeError('JSON document must start with an object or array type only', txt[i:i+20])
elif c == '"' or c == '\'':
obj, i = self.decode_string(txt, i, imax)
elif c.isdigit() or c in '.+-':
obj, i = self.decode_number(txt, i, imax)
elif c.isalpha() or c in'_$':
j = i
while j < imax and (txt[j].isalnum() or txt[j] in '_$'):
j += 1
kw = txt[i:j]
if kw == 'null':
obj, i = None, j
elif kw == 'true':
obj, i = True, j
elif kw == 'false':
obj, i = False, j
elif kw == 'undefined':
if self._allow_undefined_values:
obj, i = undefined, j
else:
raise JSONDecodeError('strict JSON does not allow undefined elements',txt[i:])
elif kw == 'NaN' or kw == 'Infinity':
obj, i = self.decode_number(txt, i)
else:
if identifier_as_string:
obj, i = self.decode_javascript_identifier(kw), j
else:
raise JSONDecodeError('unknown keyword or identifier',kw)
else:
raise JSONDecodeError('can not decode value',txt[i:])
return obj, i
def decode(self, txt):
"""Decodes a JSON-endoded string into a Python object."""
if self._allow_unicode_format_control_chars:
txt = self.strip_format_control_chars(txt)
r = self.decodeobj(txt, 0, only_object_or_array=not self._allow_any_type_at_start)
if not r:
raise JSONDecodeError('can not decode value',txt)
else:
obj, i = r
i = self.skipws(txt, i)
if i < len(txt):
raise JSONDecodeError('unexpected or extra text',txt[i:])
return obj
def encode(self, obj, nest_level=0):
"""Encodes the Python object into a JSON string representation.
This method will first attempt to encode an object by seeing
if it has a json_equivalent() method. If so than it will
call that method and then recursively attempt to encode
the object resulting from that call.
Next it will attempt to determine if the object is a native
type or acts like a squence or dictionary. If so it will
encode that object directly.
Finally, if no other strategy for encoding the object of that
type exists, it will call the encode_default() method. That
method currently raises an error, but it could be overridden
by subclasses to provide a hook for extending the types which
can be encoded.
"""
chunks = []
self.encode_helper(chunks, obj, nest_level)
return ''.join( chunks )
def encode_helper(self, chunklist, obj, nest_level):
#print 'encode_helper(chunklist=%r, obj=%r, nest_level=%r)'%(chunklist,obj,nest_level)
if hasattr(obj, 'json_equivalent'):
json = self.encode_equivalent( obj, nest_level=nest_level )
if json is not None:
chunklist.append( json )
return
if obj is None:
chunklist.append( self.encode_null() )
elif obj is undefined:
if self._allow_undefined_values:
chunklist.append( self.encode_undefined() )
else:
raise JSONEncodeError('strict JSON does not permit "undefined" values')
elif isinstance(obj, bool):
chunklist.append( self.encode_boolean(obj) )
elif isinstance(obj, (int,long,float,complex)) or \
(decimal and isinstance(obj, decimal.Decimal)):
chunklist.append( self.encode_number(obj) )
elif isinstance(obj, basestring) or isstringtype(obj):
chunklist.append( self.encode_string(obj) )
else:
self.encode_composite(chunklist, obj, nest_level)
def encode_composite(self, chunklist, obj, nest_level):
"""Encodes just dictionaries, lists, or sequences.
Basically handles any python type for which iter() can create
an iterator object.
This method is not intended to be called directly. Use the
encode() method instead.
"""
#print 'encode_complex_helper(chunklist=%r, obj=%r, nest_level=%r)'%(chunklist,obj,nest_level)
try:
# Is it a dictionary or UserDict? Try iterkeys method first.
it = obj.iterkeys()
except AttributeError:
try:
# Is it a sequence? Try to make an iterator for it.
it = iter(obj)
except TypeError:
it = None
if it is not None:
# Does it look like a dictionary? Check for a minimal dict or
# UserDict interface.
isdict = hasattr(obj, '__getitem__') and hasattr(obj, 'keys')
compactly = self._encode_compactly
if isdict:
chunklist.append('{')
if compactly:
dictcolon = ':'
else:
dictcolon = ' : '
else:
chunklist.append('[')
#print nest_level, 'opening sequence:', repr(chunklist)
if not compactly:
indent0 = ' ' * nest_level
indent = ' ' * (nest_level+1)
chunklist.append(' ')
sequence_chunks = [] # use this to allow sorting afterwards if dict
try: # while not StopIteration
numitems = 0
while True:
obj2 = it.next()
if obj2 is obj:
raise JSONEncodeError('trying to encode an infinite sequence',obj)
if isdict and not isstringtype(obj2):
# Check JSON restrictions on key types
if isnumbertype(obj2):
if not self._allow_nonstring_keys:
raise JSONEncodeError('object properties (dictionary keys) must be strings in strict JSON',obj2)
else:
raise JSONEncodeError('object properties (dictionary keys) can only be strings or numbers in ECMAScript',obj2)
# Encode this item in the sequence and put into item_chunks
item_chunks = []
self.encode_helper( item_chunks, obj2, nest_level=nest_level+1 )
if isdict:
item_chunks.append(dictcolon)
obj3 = obj[obj2]
self.encode_helper(item_chunks, obj3, nest_level=nest_level+2)
#print nest_level, numitems, 'item:', repr(obj2)
#print nest_level, numitems, 'sequence_chunks:', repr(sequence_chunks)
#print nest_level, numitems, 'item_chunks:', repr(item_chunks)
#extend_list_with_sep(sequence_chunks, item_chunks)
sequence_chunks.append(item_chunks)
#print nest_level, numitems, 'new sequence_chunks:', repr(sequence_chunks)
numitems += 1
except StopIteration:
pass
if isdict and self._sort_dictionary_keys:
sequence_chunks.sort() # Note sorts by JSON repr, not original Python object
if compactly:
sep = ','
else:
sep = ',\n' + indent
#print nest_level, 'closing sequence'
#print nest_level, 'chunklist:', repr(chunklist)
#print nest_level, 'sequence_chunks:', repr(sequence_chunks)
extend_and_flatten_list_with_sep( chunklist, sequence_chunks, sep )
#print nest_level, 'new chunklist:', repr(chunklist)
if not compactly:
if numitems > 1:
chunklist.append('\n' + indent0)
else:
chunklist.append(' ')
if isdict:
chunklist.append('}')
else:
chunklist.append(']')
else: # Can't create an iterator for the object
json2 = self.encode_default( obj, nest_level=nest_level )
chunklist.append( json2 )
def encode_equivalent( self, obj, nest_level=0 ):
"""This method is used to encode user-defined class objects.
The object being encoded should have a json_equivalent()
method defined which returns another equivalent object which
is easily JSON-encoded. If the object in question has no
json_equivalent() method available then None is returned
instead of a string so that the encoding will attempt the next
strategy.
If a caller wishes to disable the calling of json_equivalent()
methods, then subclass this class and override this method
to just return None.
"""
if hasattr(obj, 'json_equivalent') \
and callable(getattr(obj,'json_equivalent')):
obj2 = obj.json_equivalent()
if obj2 is obj:
# Try to prevent careless infinite recursion
raise JSONEncodeError('object has a json_equivalent() method that returns itself',obj)
json2 = self.encode( obj2, nest_level=nest_level )
return json2
else:
return None
def encode_default( self, obj, nest_level=0 ):
"""This method is used to encode objects into JSON which are not straightforward.
This method is intended to be overridden by subclasses which wish
to extend this encoder to handle additional types.
"""
raise JSONEncodeError('can not encode object into a JSON representation',obj)
# ------------------------------
def encode( obj, strict=False, compactly=True, escape_unicode=False, encoding=None ):
"""Encodes a Python object into a JSON-encoded string.
If 'strict' is set to True, then only strictly-conforming JSON
output will be produced. Note that this means that some types
of values may not be convertable and will result in a
JSONEncodeError exception.
If 'compactly' is set to True, then the resulting string will
have all extraneous white space removed; if False then the
string will be "pretty printed" with whitespace and indentation
added to make it more readable.
If 'escape_unicode' is set to True, then all non-ASCII characters
will be represented as a unicode escape sequence; if False then
the actual real unicode character will be inserted.
If no encoding is specified (encoding=None) then the output will
either be a Python string (if entirely ASCII) or a Python unicode
string type.
However if an encoding name is given then the returned value will
be a python string which is the byte sequence encoding the JSON
value. As the default/recommended encoding for JSON is UTF-8,
you should almost always pass in encoding='utf8'.
"""
import sys
encoder = None # Custom codec encoding function
bom = None # Byte order mark to prepend to final output
cdk = None # Codec to use
if encoding is not None:
import codecs
try:
cdk = codecs.lookup(encoding)
except LookupError:
cdk = None
if cdk:
pass
elif not cdk:
# No built-in codec was found, see if it is something we
# can do ourself.
encoding = encoding.lower()
if encoding.startswith('utf-32') or encoding.startswith('utf32') \
or encoding.startswith('ucs4') \
or encoding.startswith('ucs-4'):
# Python doesn't natively have a UTF-32 codec, but JSON
# requires that it be supported. So we must decode these
# manually.
if encoding.endswith('le'):
encoder = utf32le_encode
elif encoding.endswith('be'):
encoder = utf32be_encode
else:
encoder = utf32be_encode
bom = codecs.BOM_UTF32_BE
elif encoding.startswith('ucs2') or encoding.startswith('ucs-2'):
# Python has no UCS-2, but we can simulate with
# UTF-16. We just need to force us to not try to
# encode anything past the BMP.
encoding = 'utf-16'
if not escape_unicode and not callable(escape_unicode):
escape_unicode = lambda c: (0xD800 <= ord(c) <= 0xDFFF) or ord(c) >= 0x10000
else:
raise JSONEncodeError('this python has no codec for this character encoding',encoding)
if not escape_unicode and not callable(escape_unicode):
if encoding and encoding.startswith('utf'):
# All UTF-x encodings can do the whole Unicode repertoire, so
# do nothing special.
pass
else:
# Even though we don't want to escape all unicode chars,
# the encoding being used may force us to do so anyway.
# We must pass in a function which says which characters
# the encoding can handle and which it can't.
def in_repertoire( c, encoding_func ):
try:
x = encoding_func( c, errors='strict' )
except UnicodeError:
return False
return True
if encoder:
escape_unicode = lambda c: not in_repertoire(c, encoder)
elif cdk:
escape_unicode = lambda c: not in_repertoire(c, cdk[0])
else:
pass # Let the JSON object deal with it
j = JSON( strict=strict, compactly=compactly, escape_unicode=escape_unicode )
unitxt = j.encode( obj )
if encoder:
txt = encoder( unitxt )
elif encoding is not None:
txt = unitxt.encode( encoding )
else:
txt = unitxt
if bom:
txt = bom + txt
return txt
def decode( txt, strict=False, encoding=None, **kw ):
"""Decodes a JSON-encoded string into a Python object.
If 'strict' is set to True, then those strings that are not
entirely strictly conforming to JSON will result in a
JSONDecodeError exception.
The input string can be either a python string or a python unicode
string. If it is already a unicode string, then it is assumed
that no character set decoding is required.
However, if you pass in a non-Unicode text string (i.e., a python
type 'str') then an attempt will be made to auto-detect and decode
the character encoding. This will be successful if the input was
encoded in any of UTF-8, UTF-16 (BE or LE), or UTF-32 (BE or LE),
and of course plain ASCII works too.
Note though that if you know the character encoding, then you
should convert to a unicode string yourself, or pass it the name
of the 'encoding' to avoid the guessing made by the auto
detection, as with
python_object = demjson.decode( input_bytes, encoding='utf8' )
Optional keywords arguments must be of the form
allow_xxxx=True/False
or
prevent_xxxx=True/False
where each will allow or prevent the specific behavior, after the
evaluation of the 'strict' argument. For example, if strict=True
then by also passing 'allow_comments=True' then comments will be
allowed. If strict=False then prevent_comments=True will allow
everything except comments.
"""
# Initialize the JSON object
j = JSON( strict=strict )
for keyword, value in kw.items():
if keyword.startswith('allow_'):
behavior = keyword[6:]
allow = bool(value)
elif keyword.startswith('prevent_'):
behavior = keyword[8:]
allow = not bool(value)
else:
raise ValueError('unknown keyword argument', keyword)
if allow:
j.allow(behavior)
else:
j.prevent(behavior)
# Convert the input string into unicode if needed.
if isinstance(txt,unicode):
unitxt = txt
else:
if encoding is None:
unitxt = auto_unicode_decode( txt )
else:
cdk = None # codec
decoder = None
import codecs
try:
cdk = codecs.lookup(encoding)
except LookupError:
encoding = encoding.lower()
decoder = None
if encoding.startswith('utf-32') \
or encoding.startswith('ucs4') \
or encoding.startswith('ucs-4'):
# Python doesn't natively have a UTF-32 codec, but JSON
# requires that it be supported. So we must decode these
# manually.
if encoding.endswith('le'):
decoder = utf32le_decode
elif encoding.endswith('be'):
decoder = utf32be_decode
else:
if txt.startswith( codecs.BOM_UTF32_BE ):
decoder = utf32be_decode
txt = txt[4:]
elif txt.startswith( codecs.BOM_UTF32_LE ):
decoder = utf32le_decode
txt = txt[4:]
else:
if encoding.startswith('ucs'):
raise JSONDecodeError('UCS-4 encoded string must start with a BOM')
decoder = utf32be_decode # Default BE for UTF, per unicode spec
elif encoding.startswith('ucs2') or encoding.startswith('ucs-2'):
# Python has no UCS-2, but we can simulate with
# UTF-16. We just need to force us to not try to
# encode anything past the BMP.
encoding = 'utf-16'
if decoder:
unitxt = decoder(txt)
elif encoding:
unitxt = txt.decode(encoding)
else:
raise JSONDecodeError('this python has no codec for this character encoding',encoding)
# Check that the decoding seems sane. Per RFC 4627 section 3:
# "Since the first two characters of a JSON text will
# always be ASCII characters [RFC0020], ..."
#
# This check is probably not necessary, but it allows us to
# raise a suitably descriptive error rather than an obscure
# syntax error later on.
#
# Note that the RFC requirements of two ASCII characters seems
# to be an incorrect statement as a JSON string literal may
# have as it's first character any unicode character. Thus
# the first two characters will always be ASCII, unless the
# first character is a quotation mark. And in non-strict
# mode we can also have a few other characters too.
if len(unitxt) > 2:
first, second = unitxt[:2]
if first in '"\'':
pass # second can be anything inside string literal
else:
if ((ord(first) < 0x20 or ord(first) > 0x7f) or \
(ord(second) < 0x20 or ord(second) > 0x7f)) and \
(not j.isws(first) and not j.isws(second)):
# Found non-printable ascii, must check unicode
# categories to see if the character is legal.
# Only whitespace, line and paragraph separators,
# and format control chars are legal here.
import unicodedata
catfirst = unicodedata.category(unicode(first))
catsecond = unicodedata.category(unicode(second))
if catfirst not in ('Zs','Zl','Zp','Cf') or \
catsecond not in ('Zs','Zl','Zp','Cf'):
raise JSONDecodeError('the decoded string is gibberish, is the encoding correct?',encoding)
# Now ready to do the actual decoding
obj = j.decode( unitxt )
return obj
# end file
|
{
"content_hash": "28ee6d7b353e85fa2643a56e49d8db25",
"timestamp": "",
"source": "github",
"line_count": 2136,
"max_line_length": 153,
"avg_line_length": 41.13295880149813,
"alnum_prop": 0.5254495788754837,
"repo_name": "swiftcoder/ashima-iv",
"id": "580fbfa35d557751864943a97d9bf6b6314b27c9",
"size": "87860",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/demjson.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "224358"
},
{
"name": "Rich Text Format",
"bytes": "1949"
},
{
"name": "ShaderLab",
"bytes": "3589"
}
],
"symlink_target": ""
}
|
"""WAL-E is a program to assist in performing PostgreSQL continuous
archiving on S3 or Windows Azure Blob Service (WABS): it handles pushing
and fetching of WAL segments and base backups of the PostgreSQL data directory.
"""
import sys
def gevent_monkey(*args, **kwargs):
import gevent.monkey
gevent.monkey.patch_os()
gevent.monkey.patch_socket(dns=True, aggressive=True)
gevent.monkey.patch_ssl()
gevent.monkey.patch_time()
# Monkey-patch procedures early. If it doesn't work with gevent,
# sadly it cannot be used (easily) in WAL-E.
gevent_monkey()
# Instate a cipher suite that bans a series of weak and slow ciphers.
# Both RC4 (weak) 3DES (slow) have been seen in use.
#
# Only Python 2.7+ possesses the 'ciphers' keyword to wrap_socket.
if sys.version_info >= (2, 7):
def getresponse_monkey():
import httplib
original = httplib.HTTPConnection.getresponse
def monkey(*args, **kwargs):
kwargs['buffering'] = True
return original(*args, **kwargs)
httplib.HTTPConnection.getresponse = monkey
getresponse_monkey()
def ssl_monkey():
import ssl
original = ssl.wrap_socket
def wrap_socket_monkey(*args, **kwargs):
# Set up an OpenSSL cipher string.
#
# Rationale behind each part:
#
# * HIGH: only use the most secure class of ciphers and
# key lengths, generally being 128 bits and larger.
#
# * !aNULL: exclude cipher suites that contain anonymous
# key exchange, making man in the middle attacks much
# more tractable.
#
# * !SSLv2: exclude any SSLv2 cipher suite, as this
# category has security weaknesses. There is only one
# OpenSSL cipher suite that is in the "HIGH" category
# but uses SSLv2 protocols: DES_192_EDE3_CBC_WITH_MD5
# (see s2_lib.c)
#
# Technically redundant given "!3DES", but the intent in
# listing it here is more apparent.
#
# * !RC4: exclude because it's a weak block cipher.
#
# * !3DES: exclude because it's very CPU intensive and
# most peers support another reputable block cipher.
#
# * !MD5: although it doesn't seem use of known flaws in
# MD5 is able to compromise an SSL session, the wide
# deployment of SHA-family functions means the
# compatibility benefits of allowing it are slim to
# none, so disable it until someone produces material
# complaint.
kwargs['ciphers'] = 'HIGH:!aNULL:!SSLv2:!RC4:!3DES:!MD5'
return original(*args, **kwargs)
ssl.wrap_socket = wrap_socket_monkey
ssl_monkey()
import argparse
import logging
import os
import re
import textwrap
import traceback
from wal_e import log_help
from wal_e import subprocess
from wal_e.exception import UserCritical
from wal_e.exception import UserException
from wal_e import storage
from wal_e.piper import popen_sp
from wal_e.worker.pg import PSQL_BIN, psql_csv_run
from wal_e.pipeline import LZOP_BIN, PV_BIN, GPG_BIN
from wal_e.worker.pg import CONFIG_BIN, PgControlDataParser
log_help.configure(
format='%(name)-12s %(levelname)-8s %(message)s')
logger = log_help.WalELogger('wal_e.main')
def external_program_check(
to_check=frozenset([PSQL_BIN, LZOP_BIN, PV_BIN])):
"""
Validates the existence and basic working-ness of other programs
Implemented because it is easy to get confusing error output when
one does not install a dependency because of the fork-worker model
that is both necessary for throughput and makes more obscure the
cause of failures. This is intended to be a time and frustration
saving measure. This problem has confused The Author in practice
when switching rapidly between machines.
"""
could_not_run = []
error_msgs = []
def psql_err_handler(popen):
assert popen.returncode != 0
error_msgs.append(textwrap.fill(
'Could not get a connection to the database: '
'note that superuser access is required'))
# Bogus error message that is re-caught and re-raised
raise EnvironmentError('INTERNAL: Had problems running psql '
'from external_program_check')
with open(os.devnull, 'w') as nullf:
for program in to_check:
try:
if program is PSQL_BIN:
psql_csv_run('SELECT 1', error_handler=psql_err_handler)
else:
if program is PV_BIN:
extra_args = ['--quiet']
else:
extra_args = []
proc = popen_sp([program] + extra_args,
stdout=nullf, stderr=nullf,
stdin=subprocess.PIPE)
# Close stdin for processes that default to
# reading from the pipe; the programs WAL-E uses
# of this kind will terminate in this case.
proc.stdin.close()
proc.wait()
except EnvironmentError:
could_not_run.append(program)
if could_not_run:
error_msgs.append(
'Could not run the following programs, are they installed? ' +
', '.join(could_not_run))
if error_msgs:
raise UserException(
'could not run one or more external programs WAL-E depends upon',
'\n'.join(error_msgs))
return None
def extract_segment(text_with_extractable_segment):
from wal_e.storage import BASE_BACKUP_REGEXP
from wal_e.storage.base import SegmentNumber
match = re.match(BASE_BACKUP_REGEXP, text_with_extractable_segment)
if match is None:
return None
else:
groupdict = match.groupdict()
return SegmentNumber(log=groupdict['log'], seg=groupdict['seg'])
def build_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=__doc__)
aws_group = parser.add_mutually_exclusive_group()
aws_group.add_argument('-k', '--aws-access-key-id',
help='public AWS access key. Can also be defined '
'in an environment variable. If both are defined, '
'the one defined in the programs arguments takes '
'precedence.')
aws_group.add_argument('--aws-instance-profile', action='store_true',
help='Use the IAM Instance Profile associated '
'with this instance to authenticate with the S3 '
'API.')
parser.add_argument('-a', '--wabs-account-name',
help='Account name of Windows Azure Blob Service '
'account. Can also be defined in an environment'
'variable. If both are defined, the one defined'
'in the programs arguments takes precedence.')
parser.add_argument('--s3-prefix',
help='S3 prefix to run all commands against. '
'Can also be defined via environment variable '
'WALE_S3_PREFIX.')
parser.add_argument('--wabs-prefix',
help='Storage prefix to run all commands against. '
'Can also be defined via environment variable '
'WALE_WABS_PREFIX.')
parser.add_argument(
'--gpg-key-id',
help='GPG key ID to encrypt to. (Also needed when decrypting.) '
'Can also be defined via environment variable '
'WALE_GPG_KEY_ID')
parser.add_argument(
'--terse', action='store_true',
help='Only log messages as or more severe than a warning.')
subparsers = parser.add_subparsers(title='subcommands',
dest='subcommand')
# Common arguments for backup-fetch and backup-push
backup_fetchpush_parent = argparse.ArgumentParser(add_help=False)
backup_fetchpush_parent.add_argument('PG_CLUSTER_DIRECTORY',
help="Postgres cluster path, "
"such as '/var/lib/database'")
backup_fetchpush_parent.add_argument(
'--pool-size', '-p', type=int, default=4,
help='Set the maximum number of concurrent transfers')
# operator to print the wal-e version
subparsers.add_parser('version', help='print the wal-e version')
# Common arguments for backup-list and backup-fetch
#
# NB: This does not include the --detail options because some
# other commands use backup listing functionality in a way where
# --detail is never required.
backup_list_nodetail_parent = argparse.ArgumentParser(add_help=False)
# Common arguments between wal-push and wal-fetch
wal_fetchpush_parent = argparse.ArgumentParser(add_help=False)
wal_fetchpush_parent.add_argument('WAL_SEGMENT',
help='Path to a WAL segment to upload')
backup_fetch_parser = subparsers.add_parser(
'backup-fetch', help='fetch a hot backup from S3 or WABS',
parents=[backup_fetchpush_parent, backup_list_nodetail_parent])
backup_list_parser = subparsers.add_parser(
'backup-list', parents=[backup_list_nodetail_parent],
help='list backups in S3 or WABS')
backup_push_parser = subparsers.add_parser(
'backup-push', help='pushing a fresh hot backup to S3 or WABS',
parents=[backup_fetchpush_parent])
backup_push_parser.add_argument(
'--cluster-read-rate-limit',
help='Rate limit reading the PostgreSQL cluster directory to a '
'tunable number of bytes per second', dest='rate_limit',
metavar='BYTES_PER_SECOND',
type=int, default=None)
backup_push_parser.add_argument(
'--while-offline',
help=('Backup a Postgres cluster that is in a stopped state '
'(for example, a replica that you stop and restart '
'when taking a backup)'),
dest='while_offline',
action='store_true',
default=False)
# wal-push operator section
wal_push_parser = subparsers.add_parser(
'wal-push', help='push a WAL file to S3 or WABS',
parents=[wal_fetchpush_parent])
wal_push_parser.add_argument(
'--pool-size', '-p', type=int, default=8,
help='Set the maximum number of concurrent transfers')
# backup-fetch operator section
backup_fetch_parser.add_argument('BACKUP_NAME',
help='the name of the backup to fetch')
backup_fetch_parser.add_argument(
'--blind-restore',
help='Restore from backup without verification of tablespace symlinks',
dest='blind_restore',
action='store_true',
default=False)
backup_fetch_parser.add_argument(
'--restore-spec',
help=('Specification for the directory structure of the database '
'restoration (optional, see README for more information).'),
type=str,
default=None)
# backup-list operator section
backup_list_parser.add_argument(
'QUERY', nargs='?', default=None,
help='a string qualifying backups to list')
backup_list_parser.add_argument(
'--detail', default=False, action='store_true',
help='show more detailed information about every backup')
# wal-fetch operator section
wal_fetch_parser = subparsers.add_parser(
'wal-fetch', help='fetch a WAL file from S3 or WABS',
parents=[wal_fetchpush_parent])
wal_fetch_parser.add_argument('WAL_DESTINATION',
help='Path to download the WAL segment to')
wal_fetch_parser.add_argument(
'--prefetch', '-p', type=int, default=8,
help='Set the maximum number of WAL segments to prefetch.')
wal_prefetch_parser = subparsers.add_parser('wal-prefetch',
help='Prefetch WAL')
wal_prefetch_parser.add_argument(
'BASE_DIRECTORY',
help='Contains writable directory to place ".wal-e" directory in.')
wal_prefetch_parser.add_argument('SEGMENT',
help='Segment by name to download.')
# delete subparser section
delete_parser = subparsers.add_parser(
'delete', help='operators to destroy specified data in S3 or WABS')
delete_parser.add_argument('--dry-run', '-n', action='store_true',
help=('Only print what would be deleted, '
'do not actually delete anything'))
delete_parser.add_argument('--confirm', action='store_true',
help=('Actually delete data. '
'By default, a dry run is performed. '
'Overridden by --dry-run.'))
delete_subparsers = delete_parser.add_subparsers(
title='delete subcommands',
description=('All operators that may delete data are contained '
'in this subcommand.'),
dest='delete_subcommand')
# delete 'before' operator
delete_before_parser = delete_subparsers.add_parser(
'before', help=('Delete all backups and WAL segments strictly before '
'the given base backup name or WAL segment number. '
'The passed backup is *not* deleted.'))
delete_before_parser.add_argument(
'BEFORE_SEGMENT_EXCLUSIVE',
help='A WAL segment number or base backup name')
# delete 'retain' operator
delete_retain_parser = delete_subparsers.add_parser(
'retain', help=('Delete backups and WAL segments older than the '
'NUM_TO_RETAIN oldest base backup. This will leave '
'NUM_TO_RETAIN working backups in place.'))
delete_retain_parser.add_argument(
'NUM_TO_RETAIN', type=int,
help='The number of base backups to retain')
# delete old versions operator
delete_subparsers.add_parser(
'old-versions',
help=('Delete all old versions of WAL-E backup files. One probably '
'wants to ensure that they take a new backup with the new '
'format first. '
'This is useful after a WAL-E major release upgrade.'))
# delete *everything* operator
delete_subparsers.add_parser(
'everything',
help=('Delete all data in the current WAL-E context. '
'Typically this is only appropriate when decommissioning an '
'entire WAL-E archive.'))
return parser
def _config_hint_generate(optname, both_env_and_param):
"""Generate HINT language for missing configuration"""
env = optname.replace('-', '_').upper()
if both_env_and_param:
option = '--' + optname.lower()
return ('Pass "{0}" or set the environment variable "{1}".'
.format(option, env))
else:
return 'Set the environment variable {0}.'.format(env)
def s3_explicit_creds(args):
access_key = args.aws_access_key_id or os.getenv('AWS_ACCESS_KEY_ID')
if access_key is None:
raise UserException(
msg='AWS Access Key credential is required but not provided',
hint=(_config_hint_generate('aws-access-key-id', True)))
secret_key = os.getenv('AWS_SECRET_ACCESS_KEY')
if secret_key is None:
raise UserException(
msg='AWS Secret Key credential is required but not provided',
hint=_config_hint_generate('aws-secret-access-key', False))
security_token = os.getenv('AWS_SECURITY_TOKEN')
from wal_e.blobstore import s3
return s3.Credentials(access_key, secret_key, security_token)
def s3_instance_profile(args):
from wal_e.blobstore import s3
assert args.aws_instance_profile
return s3.InstanceProfileCredentials()
def configure_backup_cxt(args):
# Try to find some WAL-E prefix to store data in.
prefix = (args.s3_prefix or args.wabs_prefix
or os.getenv('WALE_S3_PREFIX') or os.getenv('WALE_WABS_PREFIX')
or os.getenv('WALE_SWIFT_PREFIX'))
if prefix is None:
raise UserException(
msg='no storage prefix defined',
hint=(
'Either set one of the --wabs-prefix or --s3-prefix options or'
' define one of the WALE_WABS_PREFIX, WALE_S3_PREFIX, or '
'WALE_SWIFT_PREFIX environment variables.'
)
)
store = storage.StorageLayout(prefix)
# GPG can be optionally layered atop of every backend, so a common
# code path suffices.
gpg_key_id = args.gpg_key_id or os.getenv('WALE_GPG_KEY_ID')
if gpg_key_id is not None:
external_program_check([GPG_BIN])
# Enumeration of reading in configuration for all supported
# backend data stores, yielding value adhering to the
# 'operator.Backup' protocol.
if store.is_s3:
if args.aws_instance_profile:
creds = s3_instance_profile(args)
else:
creds = s3_explicit_creds(args)
from wal_e.blobstore import s3
s3.sigv4_check_apply()
from wal_e.operator import s3_operator
return s3_operator.S3Backup(store, creds, gpg_key_id)
elif store.is_wabs:
account_name = args.wabs_account_name or os.getenv('WABS_ACCOUNT_NAME')
if account_name is None:
raise UserException(
msg='WABS account name is undefined',
hint=_config_hint_generate('wabs-account-name', True))
access_key = os.getenv('WABS_ACCESS_KEY')
if access_key is None:
raise UserException(
msg='WABS access key credential is required but not provided',
hint=_config_hint_generate('wabs-access-key', False))
from wal_e.blobstore import wabs
from wal_e.operator.wabs_operator import WABSBackup
creds = wabs.Credentials(account_name, access_key)
return WABSBackup(store, creds, gpg_key_id)
elif store.is_swift:
from wal_e.blobstore import swift
from wal_e.operator.swift_operator import SwiftBackup
creds = swift.Credentials(
os.getenv('SWIFT_AUTHURL'),
os.getenv('SWIFT_USER'),
os.getenv('SWIFT_PASSWORD'),
os.getenv('SWIFT_TENANT'),
os.getenv('SWIFT_REGION'),
os.getenv('SWIFT_ENDPOINT_TYPE', 'publicURL'),
os.getenv('SWIFT_AUTH_VERSION', '2'),
)
return SwiftBackup(store, creds, gpg_key_id)
else:
raise UserCritical(
msg='no unsupported blob stores should get here',
hint='Report a bug.')
def monkeypatch_tarfile_copyfileobj():
"""Monkey-patch tarfile.copyfileobj to exploit large buffers"""
import tarfile
from wal_e import copyfileobj
tarfile.copyfileobj = copyfileobj.copyfileobj
def render_subcommand(args):
"""Render a subcommand for human-centric viewing"""
if args.subcommand == 'delete':
return 'delete ' + args.delete_subcommand
else:
return args.subcommand
def main():
parser = build_parser()
args = parser.parse_args()
subcommand = args.subcommand
# Adjust logging level if terse output is set.
if args.terse:
log_help.set_level(logging.WARNING)
# Handle version printing specially, because it doesn't need
# credentials.
if subcommand == 'version':
import pkgutil
print pkgutil.get_data('wal_e', 'VERSION').strip()
sys.exit(0)
# Print a start-up message right away.
#
# Otherwise, it is hard to tell when and how WAL-E started in logs
# because often emits status output too late.
logger.info(msg='starting WAL-E',
detail=('The subcommand is "{0}".'
.format(render_subcommand(args))))
try:
backup_cxt = configure_backup_cxt(args)
if subcommand == 'backup-fetch':
monkeypatch_tarfile_copyfileobj()
external_program_check([LZOP_BIN])
backup_cxt.database_fetch(
args.PG_CLUSTER_DIRECTORY,
args.BACKUP_NAME,
blind_restore=args.blind_restore,
restore_spec=args.restore_spec,
pool_size=args.pool_size)
elif subcommand == 'backup-list':
backup_cxt.backup_list(query=args.QUERY, detail=args.detail)
elif subcommand == 'backup-push':
monkeypatch_tarfile_copyfileobj()
if args.while_offline:
# we need to query pg_config first for the
# pg_controldata's bin location
external_program_check([CONFIG_BIN])
parser = PgControlDataParser(args.PG_CLUSTER_DIRECTORY)
controldata_bin = parser.controldata_bin()
external_programs = [
LZOP_BIN,
PV_BIN,
controldata_bin]
else:
external_programs = [LZOP_BIN, PSQL_BIN, PV_BIN]
external_program_check(external_programs)
rate_limit = args.rate_limit
while_offline = args.while_offline
backup_cxt.database_backup(
args.PG_CLUSTER_DIRECTORY,
rate_limit=rate_limit,
while_offline=while_offline,
pool_size=args.pool_size)
elif subcommand == 'wal-fetch':
external_program_check([LZOP_BIN])
res = backup_cxt.wal_restore(args.WAL_SEGMENT,
args.WAL_DESTINATION,
args.prefetch)
if not res:
sys.exit(1)
elif subcommand == 'wal-prefetch':
external_program_check([LZOP_BIN])
backup_cxt.wal_prefetch(args.BASE_DIRECTORY, args.SEGMENT)
elif subcommand == 'wal-push':
external_program_check([LZOP_BIN])
backup_cxt.wal_archive(args.WAL_SEGMENT,
concurrency=args.pool_size)
elif subcommand == 'delete':
# Set up pruning precedence, optimizing for *not* deleting data
#
# Canonicalize the passed arguments into the value
# "is_dry_run_really"
if args.dry_run is False and args.confirm is True:
# Actually delete data *only* if there are *no* --dry-runs
# present and --confirm is present.
logger.info(msg='deleting data in the store')
is_dry_run_really = False
else:
logger.info(msg='performing dry run of data deletion')
is_dry_run_really = True
import boto.s3.key
import boto.s3.bucket
# This is not necessary, but "just in case" to find bugs.
def just_error(*args, **kwargs):
assert False, ('About to delete something in '
'dry-run mode. Please report a bug.')
boto.s3.key.Key.delete = just_error
boto.s3.bucket.Bucket.delete_keys = just_error
# Handle the subcommands and route them to the right
# implementations.
if args.delete_subcommand == 'old-versions':
backup_cxt.delete_old_versions(is_dry_run_really)
elif args.delete_subcommand == 'everything':
backup_cxt.delete_all(is_dry_run_really)
elif args.delete_subcommand == 'retain':
backup_cxt.delete_with_retention(is_dry_run_really,
args.NUM_TO_RETAIN)
elif args.delete_subcommand == 'before':
segment_info = extract_segment(args.BEFORE_SEGMENT_EXCLUSIVE)
assert segment_info is not None
backup_cxt.delete_before(is_dry_run_really, segment_info)
else:
assert False, 'Should be rejected by argument parsing.'
else:
logger.error(msg='subcommand not implemented',
detail=('The submitted subcommand was {0}.'
.format(subcommand)),
hint='Check for typos or consult wal-e --help.')
sys.exit(127)
# Report on all encountered exceptions, and raise the last one
# to take advantage of the final catch-all reporting and exit
# code management.
if backup_cxt.exceptions:
for exc in backup_cxt.exceptions[:-1]:
if isinstance(exc, UserException):
logger.log(level=exc.severity,
msg=exc.msg, detail=exc.detail, hint=exc.hint)
else:
logger.error(msg=exc)
raise backup_cxt.exceptions[-1]
except UserException, e:
logger.log(level=e.severity,
msg=e.msg, detail=e.detail, hint=e.hint)
sys.exit(1)
except Exception, e:
logger.critical(
msg='An unprocessed exception has avoided all error handling',
detail=''.join(traceback.format_exception(*sys.exc_info())))
sys.exit(2)
|
{
"content_hash": "d6448736eb7dc405aa52c74642c2e071",
"timestamp": "",
"source": "github",
"line_count": 656,
"max_line_length": 79,
"avg_line_length": 39.26219512195122,
"alnum_prop": 0.5895325361080913,
"repo_name": "ArtemZ/wal-e",
"id": "a453c959a9b029edfa9618529f439cd8c9093ae5",
"size": "25778",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "wal_e/cmd.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "986"
},
{
"name": "Python",
"bytes": "388743"
}
],
"symlink_target": ""
}
|
version_info = (1, 0)
version = '.'.join(map(str, version_info))
|
{
"content_hash": "1dc9ef46ecd87154007aae2a4742fd7a",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 42,
"avg_line_length": 32.5,
"alnum_prop": 0.6307692307692307,
"repo_name": "8468/tint",
"id": "02f6932a4c62edacee60e5a35803000d6aa0515c",
"size": "65",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tint/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2244"
},
{
"name": "HTML",
"bytes": "6240"
},
{
"name": "JavaScript",
"bytes": "4854"
},
{
"name": "Makefile",
"bytes": "193"
},
{
"name": "Python",
"bytes": "57502"
}
],
"symlink_target": ""
}
|
"""
TODO: docs!
"""
import os, pickle, base64, time
from gppylib.gplog import *
from gppylib.db import dbconn
from gppylib.db import catalog
from gppylib import gparray
from base import *
from unix import *
import pg
from gppylib import pgconf
from gppylib.utils import writeLinesToFile, createFromSingleHostFile, shellEscape
logger = get_default_logger()
#TODO: need a better way of managing environment variables.
GPHOME=os.environ.get('GPHOME')
#Default timeout for segment start
SEGMENT_TIMEOUT_DEFAULT=600
SEGMENT_STOP_TIMEOUT_DEFAULT=120
#"Command not found" return code in bash
COMMAND_NOT_FOUND=127
def get_postmaster_pid_locally(datadir):
cmdStr = "ps -ef | grep postgres | grep -v grep | awk '{print $2}' | grep `cat %s/postmaster.pid | head -1` || echo -1" % (datadir)
name = "get postmaster"
cmd = Command(name, cmdStr)
try:
cmd.run(validateAfter=True)
sout = cmd.get_results().stdout.lstrip(' ')
return int(sout.split()[0])
except:
return -1
def getPostmasterPID(hostname, datadir):
cmdStr="ps -ef | grep postgres | grep -v grep | awk '{print $2}' | grep \\`cat %s/postmaster.pid | head -1\\` || echo -1" % (datadir)
name="get postmaster pid"
cmd=Command(name,cmdStr,ctxt=REMOTE,remoteHost=hostname)
try:
cmd.run(validateAfter=True)
sout=cmd.get_results().stdout.lstrip(' ')
return int(sout.split()[1])
except:
return -1
def get_max_dbid(name,conn):
try:
curs=conn.cursor()
curs.execute("SELECT max(dbid) FROM gp_configuration")
rows = curs.fetchall()
if len(rows) != 1:
raise Exception, 'Failed to retrieve maximum dbid from catalog'
return rows[0][0]
finally:
curs.close()
#-----------------------------------------------
class PySync(Command):
def __init__(self,name,srcDir,dstHost,dstDir,ctxt=LOCAL,remoteHost=None, options=None):
psync_executable=GPHOME + "/bin/lib/pysync.py"
# MPP-13617
if ':' in dstHost and not ']' in dstHost:
dstHost = '[' + dstHost + ']'
self.cmdStr="%s %s %s %s:%s" % (psync_executable,
options if options else "",
srcDir,
dstHost,
dstDir)
Command.__init__(self,name,self.cmdStr,ctxt,remoteHost)
#-----------------------------------------------
class CmdArgs(list):
"""
Conceptually this is a list of an executable path and executable options
built in a structured manner with a canonical string representation suitable
for execution via a shell.
Examples
--------
>>> str(CmdArgs(['foo']).set_verbose(True))
'foo -v'
>>> str(CmdArgs(['foo']).set_verbose(False).set_wait_timeout(True,600))
'foo -w -t 600'
"""
def __init__(self, l):
list.__init__(self, l)
def __str__(self):
return " ".join(self)
def set_verbose(self, verbose):
"""
@param verbose - true if verbose output desired
"""
if verbose: self.append("-v")
return self
def set_wrapper(self, wrapper, args):
"""
@param wrapper - wrapper executable ultimately passed to pg_ctl
@param args - wrapper arguments ultimately passed to pg_ctl
"""
if wrapper:
self.append("--wrapper=\"%s\"" % wrapper)
if args:
self.append("--wrapper-args=\"%s\"" % args)
return self
def set_wait_timeout(self, wait, timeout):
"""
@param wait: true if should wait until operation completes
@param timeout: number of seconds to wait before giving up
"""
if wait:
self.append("-w")
if timeout:
self.append("-t")
self.append(str(timeout))
return self
def set_segments(self, segments):
"""
The reduces the command line length of the gpsegstart.py and other
commands. There are shell limitations to the length and if there are a
large number of segments and filespaces this limit can be exceeded.
Since filespaces are not used by our callers, we remove all but one of them.
@param segments - segments (from GpArray.getSegmentsByHostName)
"""
for seg in segments:
cfg_array = repr(seg).split('|')[0:-1]
self.append("-D '%s'" % ('|'.join(cfg_array) + '|'))
return self
class PgCtlBackendOptions(CmdArgs):
"""
List of options suitable for use with the -o option of pg_ctl.
Used by MasterStart, SegmentStart to format the backend options
string passed via pg_ctl -o
Examples
--------
>>> str(PgCtlBackendOptions(5432, 1, 2))
'-p 5432 --gp_dbid=1 --gp_num_contents_in_cluster=2 --silent-mode=true'
>>> str(PgCtlBackendOptions(5432, 1, 2).set_master(2, False, False))
'-p 5432 --gp_dbid=1 --gp_num_contents_in_cluster=2 --silent-mode=true -i -M master --gp_contentid=-1 -x 2'
>>> str(PgCtlBackendOptions(5432, 1, 2).set_master(2, False, True))
'-p 5432 --gp_dbid=1 --gp_num_contents_in_cluster=2 --silent-mode=true -i -M master --gp_contentid=-1 -x 2 -E'
>>> str(PgCtlBackendOptions(5432, 1, 2).set_segment('mirror', 1))
'-p 5432 --gp_dbid=1 --gp_num_contents_in_cluster=2 --silent-mode=true -i -M mirror --gp_contentid=1'
>>> str(PgCtlBackendOptions(5432, 1, 2).set_special('upgrade'))
'-p 5432 --gp_dbid=1 --gp_num_contents_in_cluster=2 --silent-mode=true -U'
>>> str(PgCtlBackendOptions(5432, 1, 2).set_special('maintenance'))
'-p 5432 --gp_dbid=1 --gp_num_contents_in_cluster=2 --silent-mode=true -m'
>>> str(PgCtlBackendOptions(5432, 1, 2).set_utility(True))
'-p 5432 --gp_dbid=1 --gp_num_contents_in_cluster=2 --silent-mode=true -c gp_role=utility'
>>> str(PgCtlBackendOptions(5432, 1, 2).set_utility(False))
'-p 5432 --gp_dbid=1 --gp_num_contents_in_cluster=2 --silent-mode=true'
>>> str(PgCtlBackendOptions(5432, 1, 2).set_restricted(True,1))
'-p 5432 --gp_dbid=1 --gp_num_contents_in_cluster=2 --silent-mode=true -c superuser_reserved_connections=1'
>>>
"""
def __init__(self, port, dbid, numcids):
"""
@param port: backend port
@param dbid: backed dbid
@param numcids: total number of content ids in cluster
"""
CmdArgs.__init__(self, [
"-p", str(port),
"--gp_dbid="+ str(dbid),
"--gp_num_contents_in_cluster="+ str(numcids),
"--silent-mode=true"
])
#
# master/segment-specific options
#
def set_master(self, standby_dbid, disable, seqserver):
"""
@param standby_dbid: standby dbid
@param disable: start without master mirroring?
@param seqserver: start with seqserver?
"""
self.extend(["-i", "-M", "master", "--gp_contentid=-1", "-x", str(standby_dbid)])
if disable: self.append("-y")
if seqserver: self.append("-E")
return self
def set_segment(self, mode, content):
"""
@param mode: mirroring mode
@param content: content id
"""
self.extend(["-i", "-M", str(mode), "--gp_contentid="+str(content)])
return self
#
# startup mode options
#
def set_special(self, special):
"""
@param special: special mode (none, 'upgrade' or 'maintenance')
"""
opt = {None:None, 'upgrade':'-U', 'maintenance':'-m'}[special]
if opt: self.append(opt)
return self
def set_utility(self, utility):
"""
@param utility: true if starting in utility mode
"""
if utility: self.append("-c gp_role=utility")
return self
def set_restricted(self, restricted, max_connections):
"""
@param restricted: true if restricting connections
@param max_connections: connection limit
"""
if restricted: self.append("-c superuser_reserved_connections=%s" % max_connections)
return self
class PgCtlStartArgs(CmdArgs):
"""
Used by MasterStart, SegmentStart to format the pg_ctl command
to start a backend postmaster.
Examples
--------
>>> a = PgCtlStartArgs("/data1/master/gpseg-1", str(PgCtlBackendOptions(5432, 1, 2)), 123, None, None, True, 600)
>>> str(a).split(' ') #doctest: +NORMALIZE_WHITESPACE
['env', GPERA=123', '$GPHOME/bin/pg_ctl', '-D', '/data1/master/gpseg-1', '-l',
'/data1/master/gpseg-1/pg_log/startup.log', '-w', '-t', '600',
'-o', '"', '-p', '5432', '--gp_dbid=1', '--gp_num_contents_in_cluster=2', '--silent-mode=true', '"', 'start']
"""
def __init__(self, datadir, backend, era, wrapper, args, wait, timeout=None):
"""
@param datadir: database data directory
@param backend: backend options string from PgCtlBackendOptions
@param era: gpdb master execution era
@param wrapper: wrapper executable for pg_ctl
@param args: wrapper arguments for pg_ctl
@param wait: true if pg_ctl should wait until backend starts completely
@param timeout: number of seconds to wait before giving up
"""
CmdArgs.__init__(self, [
"env", # variables examined by gpkill/gpdebug/etc
"GPSESSID=0000000000", # <- overwritten with gp_session_id to help identify orphans
"GPERA=%s" % str(era), # <- master era used to help identify orphans
"$GPHOME/bin/pg_ctl",
"-D", str(datadir),
"-l", "%s/pg_log/startup.log" % datadir,
])
self.set_wrapper(wrapper, args)
self.set_wait_timeout(wait, timeout)
self.extend([
"-o", "\"", str(backend), "\"",
"start"
])
class PgCtlStopArgs(CmdArgs):
"""
Used by MasterStop, SegmentStop to format the pg_ctl command
to stop a backend postmaster
>>> str(PgCtlStopArgs("/data1/master/gpseg-1", "smart", True, 600))
'$GPHOME/bin/pg_ctl -D /data1/master/gpseg-1 -m smart -w -t 600 stop'
"""
def __init__(self, datadir, mode, wait, timeout):
"""
@param datadir: database data directory
@param mode: shutdown mode (smart, fast, immediate)
@param wait: true if pg_ctlshould wait for backend to stop
@param timeout: number of seconds to wait before giving up
"""
CmdArgs.__init__(self, [
"$GPHOME/bin/pg_ctl",
"-D", str(datadir),
"-m", str(mode),
])
self.set_wait_timeout(wait, timeout)
self.append("stop")
class MasterStart(Command):
def __init__(self, name, dataDir, port, dbid, standby_dbid, numContentsInCluster, era,
wrapper, wrapper_args, specialMode=None, restrictedMode=False, timeout=SEGMENT_TIMEOUT_DEFAULT,
max_connections=1, disableMasterMirror=False, utilityMode=False, ctxt=LOCAL, remoteHost=None,
wait=True
):
self.dataDir=dataDir
self.port=port
self.utilityMode=utilityMode
self.wrapper=wrapper
self.wrapper_args=wrapper_args
# build backend options
b = PgCtlBackendOptions(port, dbid, numContentsInCluster)
b.set_master(standby_dbid, disableMasterMirror, seqserver=not utilityMode)
b.set_utility(utilityMode)
b.set_special(specialMode)
b.set_restricted(restrictedMode, max_connections)
# build pg_ctl command
c = PgCtlStartArgs(dataDir, b, era, wrapper, wrapper_args, wait, timeout)
self.cmdStr = str(c)
Command.__init__(self, name, self.cmdStr, ctxt, remoteHost)
@staticmethod
def local(name, dataDir, port, dbid, standbydbid, numContentsInCluster, era,
wrapper, wrapper_args, specialMode=None, restrictedMode=False, timeout=SEGMENT_TIMEOUT_DEFAULT,
max_connections=1, disableMasterMirror=False, utilityMode=False):
cmd=MasterStart(name, dataDir, port, dbid, standbydbid, numContentsInCluster, era,
wrapper, wrapper_args, specialMode, restrictedMode, timeout,
max_connections, disableMasterMirror, utilityMode)
cmd.run(validateAfter=True)
#-----------------------------------------------
class MasterStop(Command):
def __init__(self,name,dataDir,mode='smart',timeout=SEGMENT_STOP_TIMEOUT_DEFAULT, ctxt=LOCAL,remoteHost=None):
self.dataDir = dataDir
self.cmdStr = str( PgCtlStopArgs(dataDir, mode, True, timeout) )
Command.__init__(self, name, self.cmdStr, ctxt, remoteHost)
@staticmethod
def local(name,dataDir):
cmd=MasterStop(name,dataDir)
cmd.run(validateAfter=True)
#-----------------------------------------------
class SegmentStart(Command):
"""
SegmentStart is used to start a single segment.
Note: Most code should probably use GpSegStartCmd instead which starts up
all of the segments on a specified GpHost.
"""
def __init__(self, name, gpdb, numContentsInCluster, era, mirrormode,
utilityMode=False, ctxt=LOCAL, remoteHost=None,
noWait=False, timeout=SEGMENT_TIMEOUT_DEFAULT,
specialMode=None, wrapper=None, wrapper_args=None):
# This is referenced from calling code
self.segment = gpdb
# Interesting data from our input segment
dbid = gpdb.getSegmentDbId()
content = gpdb.getSegmentContentId()
port = gpdb.getSegmentPort()
datadir = gpdb.getSegmentDataDirectory()
# build backend options
b = PgCtlBackendOptions(port, dbid, numContentsInCluster)
b.set_segment(mirrormode, content)
b.set_utility(utilityMode)
b.set_special(specialMode)
# build pg_ctl command
c = PgCtlStartArgs(datadir, b, era, wrapper, wrapper_args, not noWait, timeout)
self.cmdStr = str(c) + ' 2>&1'
Command.__init__(self, name, self.cmdStr, ctxt, remoteHost)
@staticmethod
def local(name, gpdb, numContentsInCluster, era, mirrormode, utilityMode=False):
cmd=SegmentStart(name, gpdb, numContentsInCluster, era, mirrormode, utilityMode)
cmd.run(validateAfter=True)
@staticmethod
def remote(name, remoteHost, gpdb, numContentsInCluster, era, mirrormode, utilityMode=False):
cmd=SegmentStart(name, gpdb, numContentsInCluster, era, mirrormode, utilityMode, ctxt=REMOTE, remoteHost=remoteHost)
cmd.run(validateAfter=True)
#-----------------------------------------------
class SendFilerepTransitionMessage(Command):
# see gpmirrortransition.c and primary_mirror_transition_client.h
TRANSITION_ERRCODE_SUCCESS = 0
TRANSITION_ERRCODE_ERROR_UNSPECIFIED = 1
TRANSITION_ERRCODE_ERROR_SERVER_DID_NOT_RETURN_DATA = 10
TRANSITION_ERRCODE_ERROR_PROTOCOL_VIOLATED = 11
TRANSITION_ERRCODE_ERROR_HOST_LOOKUP_FAILED = 12
TRANSITION_ERRCODE_ERROR_INVALID_ARGUMENT = 13
TRANSITION_ERRCODE_ERROR_READING_INPUT = 14
TRANSITION_ERRCODE_ERROR_SOCKET = 15
#
# note: this should be cleaned up -- there are two hosts involved,
# the host on which to run gp_primarymirror, AND the host to pass to gp_primarymirror -h
#
# Right now, it uses the same for both which is pretty wrong for anything but a local context.
#
def __init__(self, name, inputFile, port=None,ctxt=LOCAL, remoteHost=None, dataDir=None):
if not remoteHost:
remoteHost = "localhost"
self.cmdStr='$GPHOME/bin/gp_primarymirror -h %s -p %s -i %s' % (remoteHost,port,inputFile)
self.dataDir = dataDir
Command.__init__(self,name,self.cmdStr,ctxt,remoteHost)
@staticmethod
def local(name,inputFile,port=None,remoteHost=None):
cmd=SendFilerepTransitionMessage(name, inputFile, port, LOCAL, remoteHost)
cmd.run(validateAfter=True)
return cmd
@staticmethod
def buildTransitionMessageCommand(transitionData, dir, port):
dbData = transitionData["dbsByPort"][int(port)]
targetMode = dbData["targetMode"]
argsArr = []
argsArr.append(targetMode)
if targetMode == 'mirror' or targetMode == 'primary':
mode = dbData["mode"]
if mode == 'r' and dbData["fullResyncFlag"]:
# full resync requested, convert 'r' to 'f'
argsArr.append( 'f' )
else:
# otherwise, pass the mode through
argsArr.append( dbData["mode"])
argsArr.append( dbData["hostName"])
argsArr.append( "%d" % dbData["hostPort"])
argsArr.append( dbData["peerName"])
argsArr.append( "%d" % dbData["peerPort"])
argsArr.append( "%d" % dbData["peerPMPort"])
#
# write arguments to input file. We will leave this file around. It can be useful for debugging
#
inputFile = os.path.join( dir, "gp_pmtransition_args" )
writeLinesToFile(inputFile, argsArr)
return SendFilerepTransitionMessage("Changing seg at dir %s" % dir, inputFile, port=port, dataDir=dir)
class SendFilerepTransitionStatusMessage(Command):
def __init__(self, name, msg, dataDir=None, port=None,ctxt=LOCAL, remoteHost=None):
if not remoteHost:
remoteHost = "localhost"
self.cmdStr='$GPHOME/bin/gp_primarymirror -h %s -p %s' % (remoteHost,port)
self.dataDir = dataDir
logger.debug("Sending msg %s and cmdStr %s" % (msg, self.cmdStr))
Command.__init__(self, name, self.cmdStr, ctxt, remoteHost, stdin=msg)
def unpackSuccessLine(self):
"""
After run() has been called on this cmd, call this to find the "Success" data in the output
That line is returned if successful, otherwise None is returned
"""
res = self.get_results()
if res.rc != 0:
logger.warn("Error getting data stdout:\"%s\" stderr:\"%s\"" % \
(res.stdout.replace("\n", " "), res.stderr.replace("\n", " ")))
return None
else:
logger.info("Result: stdout:\"%s\" stderr:\"%s\"" % \
(res.stdout.replace("\n", " "), res.stderr.replace("\n", " ")))
line = res.stderr
if line.startswith("Success:"):
line = line[len("Success:"):]
return line
#-----------------------------------------------
class SendFilerepVerifyMessage(Command):
DEFAULT_IGNORE_FILES = [
'pg_internal.init', 'pgstat.stat', 'pga_hba.conf',
'pg_ident.conf', 'pg_fsm.cache', 'gp_dbid', 'gp_pmtransitions_args',
'gp_dump', 'postgresql.conf', 'postmaster.log', 'postmaster.opts',
'postmaser.pids', 'postgresql.conf.bak', 'core', 'wet_execute.tbl',
'recovery.done', 'gp_temporary_files_filespace', 'gp_transaction_files_filespace']
DEFAULT_IGNORE_DIRS = [
'pgsql_tmp', 'pg_xlog', 'pg_log', 'pg_stat_tmp', 'pg_changetracking', 'pg_verify', 'db_dumps', 'pg_utilitymodedtmredo', 'gpperfmon'
]
def __init__(self, name, host, port, token, full=None, verify_file=None, verify_dir=None,
abort=None, suspend=None, resume=None, ignore_dir=None, ignore_file=None,
results=None, results_level=None, ctxt=LOCAL, remoteHost=None):
"""
Sends gp_verify message to backend to either start or get results of a
mirror verification.
"""
self.host = host
self.port = port
msg_contents = ['gp_verify']
## The ordering of the following appends is critical. Do not rearrange without
## an associated change in gp_primarymirror
# full
msg_contents.append('true') if full else msg_contents.append('')
# verify_file
msg_contents.append(verify_file) if verify_file else msg_contents.append('')
# verify_dir
msg_contents.append(verify_dir) if verify_dir else msg_contents.append('')
# token
msg_contents.append(token)
# abort
msg_contents.append('true') if abort else msg_contents.append('')
# suspend
msg_contents.append('true') if suspend else msg_contents.append('')
# resume
msg_contents.append('true') if resume else msg_contents.append('')
# ignore_directory
ignore_dir_list = SendFilerepVerifyMessage.DEFAULT_IGNORE_DIRS + (ignore_dir.split(',') if ignore_dir else [])
msg_contents.append(','.join(ignore_dir_list))
# ignore_file
ignore_file_list = SendFilerepVerifyMessage.DEFAULT_IGNORE_FILES + (ignore_file.split(',') if ignore_file else [])
msg_contents.append(','.join(ignore_file_list))
# resultslevel
msg_contents.append(str(results_level)) if results_level else msg_contents.append('')
logger.debug("gp_verify message sent to %s:%s:\n%s" % (host, port, "\n".join(msg_contents)))
self.cmdStr='$GPHOME/bin/gp_primarymirror -h %s -p %s' % (host, port)
Command.__init__(self, name, self.cmdStr, ctxt, remoteHost, stdin="\n".join(msg_contents))
#-----------------------------------------------
class SegmentStop(Command):
def __init__(self, name, dataDir,mode='smart', nowait=False, ctxt=LOCAL,
remoteHost=None, timeout=SEGMENT_STOP_TIMEOUT_DEFAULT):
self.cmdStr = str( PgCtlStopArgs(dataDir, mode, not nowait, timeout) )
Command.__init__(self,name,self.cmdStr,ctxt,remoteHost)
@staticmethod
def local(name, dataDir,mode='smart'):
cmd=SegmentStop(name, dataDir,mode)
cmd.run(validateAfter=True)
return cmd
@staticmethod
def remote(name, hostname, dataDir, mode='smart'):
cmd=SegmentStop(name, dataDir, mode, ctxt=REMOTE, remoteHost=hostname)
cmd.run(validateAfter=True)
return cmd
#-----------------------------------------------
class SegmentIsShutDown(Command):
"""
Get the pg_controldata status, and check that it says 'shut down'
"""
def __init__(self,name,directory,ctxt=LOCAL,remoteHost=None):
cmdStr = "$GPHOME/bin/pg_controldata %s" % directory
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
def is_shutdown(self):
for key, value in self.results.split_stdout():
if key == 'Database cluster state':
return value.strip() == 'shut down'
return False
@staticmethod
def local(name,directory):
cmd=SegmentIsShutDown(name,directory)
cmd.run(validateAfter=True)
#
# list of valid segment statuses that can be requested
#
SEGMENT_STATUS_GET_STATUS = "getStatus"
#
# corresponds to a postmaster string value; result is a string object, or None if version could not be fetched
#
SEGMENT_STATUS__GET_VERSION = "getVersion"
#
# corresponds to a postmaster string value; result is a dictionary object, or None if data could not be fetched
#
# dictionary will contain:
# mode -> string
# segmentState -> string
# dataState -> string
# resyncNumCompleted -> large integer
# resyncTotalToComplete -> large integer
# elapsedTimeSeconds -> large integer
#
SEGMENT_STATUS__GET_MIRROR_STATUS = "getMirrorStatus"
#
# fetch the active PID of this segment; result is a dict with "pid" and "error" values
#
# see comments on getPidStatus in GpSegStatusProgram class
#
SEGMENT_STATUS__GET_PID = "__getPid"
#
# fetch True or False depending on whether the /tmp/.s.PSQL.<port>.lock file is there
#
SEGMENT_STATUS__HAS_LOCKFILE = "__hasLockFile"
#
# fetch True or False depending on whether the postmaster pid file is there
#
SEGMENT_STATUS__HAS_POSTMASTER_PID_FILE = "__hasPostmasterPidFile"
class GpGetStatusUsingTransitionArgs(CmdArgs):
"""
Examples
--------
>>> str(GpGetStatusUsingTransitionArgs([],'request'))
'$GPHOME/sbin/gpgetstatususingtransition.py -s request'
"""
def __init__(self, segments, status_request):
"""
@param status_request
"""
CmdArgs.__init__(self, [
"$GPHOME/sbin/gpgetstatususingtransition.py",
"-s", str(status_request)
])
self.set_segments(segments)
class GpGetSegmentStatusValues(Command):
"""
Fetch status values for segments on a host
Results will be a bin-hexed/pickled value that, when unpacked, will give a
two-level map:
outer-map maps from SEGMENT_STATUS__* value to inner-map
inner-map maps from dbid to result (which is usually a string, but may be different)
@param statusRequestArr an array of SEGMENT_STATUS__ constants
"""
def __init__(self, name, segments, statusRequestArr, verbose=False, ctxt=LOCAL, remoteHost=None):
# clone the list
self.dblist = [x for x in segments]
# build gpgetstatususingtransition commadn
status_request = ":".join(statusRequestArr)
c = GpGetStatusUsingTransitionArgs(segments, status_request)
c.set_verbose(verbose)
cmdStr = str(c)
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
def decodeResults(self):
"""
return (warning,outputFromCmd) tuple, where if warning is None then
results were returned and outputFromCmd should be read. Otherwise, the warning should
be logged and outputFromCmd ignored
"""
if self.get_results().rc != 0:
return ("Error getting status from host %s" % self.remoteHost, None)
outputFromCmd = None
for line in self.get_results().stdout.split('\n'):
if line.startswith("STATUS_RESULTS:"):
toDecode = line[len("STATUS_RESULTS:"):]
outputFromCmd = pickle.loads(base64.urlsafe_b64decode(toDecode))
break
if outputFromCmd is None:
return ("No status output provided from host %s" % self.remoteHost, None)
return (None, outputFromCmd)
SEGSTART_ERROR_UNKNOWN_ERROR = -1
SEGSTART_SUCCESS = 0
SEGSTART_ERROR_MIRRORING_FAILURE = 1
SEGSTART_ERROR_POSTMASTER_DIED = 2
SEGSTART_ERROR_INVALID_STATE_TRANSITION = 3
SEGSTART_ERROR_SERVER_IS_IN_SHUTDOWN = 4
SEGSTART_ERROR_STOP_RUNNING_SEGMENT_FAILED = 5
SEGSTART_ERROR_DATA_DIRECTORY_DOES_NOT_EXIST = 6
SEGSTART_ERROR_SERVER_DID_NOT_RESPOND = 7
SEGSTART_ERROR_PG_CTL_FAILED = 8
SEGSTART_ERROR_CHECKING_CONNECTION_AND_LOCALE_FAILED = 9
SEGSTART_ERROR_PING_FAILED = 10 # not actually done inside GpSegStartCmd, done instead by caller
SEGSTART_ERROR_OTHER = 1000
class GpSegStartArgs(CmdArgs):
"""
Examples
--------
>>> str(GpSegStartArgs('en_US.utf-8:en_US.utf-8:en_US.utf-8', 'mirrorless', 'gpversion', 1, 123, 600))
"$GPHOME/sbin/gpsegstart.py -C en_US.utf-8:en_US.utf-8:en_US.utf-8 -M mirrorless -V 'gpversion' -n 1 --era 123 -t 600"
"""
def __init__(self, localeData, mirrormode, gpversion, num_cids, era, timeout):
"""
@param localeData - string built from ":".join([lc_collate, lc_monetary, lc_numeric]), e.g. gpEnv.getLocaleData()
@param mirrormode - mirror start mode (START_AS_PRIMARY_OR_MIRROR or START_AS_MIRRORLESS)
@param gpversion - version (from postgres --gp-version)
@param num_cids - number content ids
@param era - master era
@param timeout - seconds to wait before giving up
"""
CmdArgs.__init__(self, [
"$GPHOME/sbin/gpsegstart.py",
"-C", str(localeData),
"-M", str(mirrormode),
"-V '%s'" % gpversion,
"-n", str(num_cids),
"--era", str(era),
"-t", str(timeout)
])
def set_special(self, special):
"""
@param special - special mode
"""
assert(special in [None, 'upgrade', 'maintenance'])
if special:
self.append("-U")
self.append(special)
return self
def set_transition(self, data):
"""
@param data - pickled transition data
"""
if data is not None:
self.append("-p")
self.append(data)
return self
class GpSegStartCmd(Command):
def __init__(self, name, gphome, segments, localeData, gpversion,
mirrormode, numContentsInCluster, era,
timeout=SEGMENT_TIMEOUT_DEFAULT, verbose=False,
ctxt=LOCAL, remoteHost=None, pickledTransitionData=None,
specialMode=None, wrapper=None, wrapper_args=None,
logfileDirectory=False):
# Referenced by calling code (in operations/startSegments.py), create a clone
self.dblist = [x for x in segments]
# build gpsegstart command string
c = GpSegStartArgs(localeData, mirrormode, gpversion, numContentsInCluster, era, timeout)
c.set_verbose(verbose)
c.set_special(specialMode)
c.set_transition(pickledTransitionData)
c.set_wrapper(wrapper, wrapper_args)
c.set_segments(segments)
cmdStr = str(c)
logger.debug(cmdStr)
if (logfileDirectory):
cmdStr = cmdStr + " -l '" + logfileDirectory + "'"
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
class GpSegChangeMirrorModeCmd(Command):
def __init__(self, name, gphome, localeData, gpversion, dbs, targetMode,
pickledParams, verbose=False, ctxt=LOCAL, remoteHost=None):
self.gphome=gphome
self.dblist=dbs
self.dirlist=[]
for db in dbs:
datadir = db.getSegmentDataDirectory()
port = db.getSegmentPort()
self.dirlist.append(datadir + ':' + str(port))
dirstr=" -D ".join(self.dirlist)
if verbose:
setverbose=" -v "
else:
setverbose=""
cmdStr="$GPHOME/sbin/gpsegtoprimaryormirror.py %s -D %s -C %s -M %s -p %s -V '%s'" % \
(setverbose,dirstr,localeData,targetMode,pickledParams,gpversion)
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
#-----------------------------------------------
class GpSegStopCmd(Command):
def __init__(self, name, gphome, version,mode,dbs,timeout=SEGMENT_STOP_TIMEOUT_DEFAULT,
verbose=False, ctxt=LOCAL, remoteHost=None, logfileDirectory=False):
self.gphome=gphome
self.dblist=dbs
self.dirportlist=[]
self.mode=mode
self.version=version
for db in dbs:
datadir = db.getSegmentDataDirectory()
port = db.getSegmentPort()
self.dirportlist.append(datadir + ':' + str(port))
self.timeout=timeout
dirstr=" -D ".join(self.dirportlist)
if verbose:
setverbose=" -v "
else:
setverbose=""
self.cmdStr="$GPHOME/sbin/gpsegstop.py %s -D %s -m %s -t %s -V '%s'" %\
(setverbose,dirstr,mode,timeout,version)
if (logfileDirectory):
self.cmdStr = self.cmdStr + " -l '" + logfileDirectory + "'"
Command.__init__(self,name,self.cmdStr,ctxt,remoteHost)
#-----------------------------------------------
class GpStandbyStart(MasterStart, object):
"""
Start up the master standby. The options to postgres in standby
are almost same as primary master, with a few exceptions.
The standby will be up as dispatch mode, and could be in remote.
"""
def __init__(self, name, datadir, port, ncontents, ctxt=LOCAL,
remoteHost=None, dbid=None, era=None,
wrapper=None, wrapper_args=None):
super(GpStandbyStart, self).__init__(
name=name,
dataDir=datadir,
port=port,
dbid=dbid,
standby_dbid=0,
numContentsInCluster=ncontents,
era=era,
wrapper=wrapper,
wrapper_args=wrapper_args,
disableMasterMirror=True,
ctxt=ctxt,
remoteHost=remoteHost,
wait=False
)
@staticmethod
def local(name, datadir, port, ncontents, dbid, era=None,
wrapper=None, wrapper_args=None):
cmd = GpStandbyStart(name, datadir, port, ncontents,
dbid=dbid, era=era,
wrapper=wrapper, wrapper_args=wrapper_args)
cmd.run(validateAfter=True)
return cmd
@staticmethod
def remote(name, host, datadir, port, ncontents, dbid, era=None,
wrapper=None, wrapper_args=None):
cmd = GpStandbyStart(name, datadir, port, ncontents, ctxt=REMOTE,
remoteHost=host, dbid=dbid, era=era,
wrapper=wrapper, wrapper_args=wrapper_args)
cmd.run(validateAfter=True)
return cmd
#-----------------------------------------------
class GpInitSystem(Command):
def __init__(self,name,configFile,hostsFile, ctxt=LOCAL, remoteHost=None):
self.configFile=configFile
self.hostsFile=hostsFile
self.cmdStr="$GPHOME/bin/gpinitsystem -a -c %s -h %s" % (configFile,hostsFile)
Command.__init__(self, name, self.cmdStr, ctxt, remoteHost)
#-----------------------------------------------
class GpDeleteSystem(Command):
def __init__(self,name,datadir, ctxt=LOCAL, remoteHost=None):
self.datadir=datadir
self.input="y\ny\n"
self.cmdStr="$GPHOME/bin/gpdeletesystem -d %s -f " % (datadir)
Command.__init__(self, name, self.cmdStr, ctxt, remoteHost,stdin=self.input)
#-----------------------------------------------
class GpStart(Command):
def __init__(self, name, masterOnly=False, restricted=False, verbose=False,ctxt=LOCAL, remoteHost=None):
self.cmdStr="$GPHOME/bin/gpstart -a"
if masterOnly:
self.cmdStr += " -m"
self.propagate_env_map['GPSTART_INTERNAL_MASTER_ONLY'] = 1
if restricted:
self.cmdStr += " -R"
if verbose or logging_is_verbose():
self.cmdStr += " -v"
Command.__init__(self,name,self.cmdStr,ctxt,remoteHost)
@staticmethod
def local(name,masterOnly=False,restricted=False):
cmd=GpStart(name,masterOnly,restricted)
cmd.run(validateAfter=True)
#-----------------------------------------------
class NewGpStart(Command):
def __init__(self, name, masterOnly=False, restricted=False, verbose=False,nostandby=False,ctxt=LOCAL, remoteHost=None, masterDirectory=None):
self.cmdStr="$GPHOME/bin/gpstart -a"
if masterOnly:
self.cmdStr += " -m"
self.propagate_env_map['GPSTART_INTERNAL_MASTER_ONLY'] = 1
if restricted:
self.cmdStr += " -R"
if verbose or logging_is_verbose():
self.cmdStr += " -v"
if nostandby:
self.cmdStr += " -y"
if masterDirectory:
self.cmdStr += " -d " + masterDirectory
Command.__init__(self,name,self.cmdStr,ctxt,remoteHost)
@staticmethod
def local(name,masterOnly=False,restricted=False,verbose=False,nostandby=False,
masterDirectory=None):
cmd=NewGpStart(name,masterOnly,restricted,verbose,nostandby,
masterDirectory=masterDirectory)
cmd.run(validateAfter=True)
#-----------------------------------------------
class NewGpStop(Command):
def __init__(self, name, masterOnly=False, restart=False, fast=False, force=False, verbose=False, ctxt=LOCAL, remoteHost=None):
self.cmdStr="$GPHOME/bin/gpstop -a"
if masterOnly:
self.cmdStr += " -m"
if verbose or logging_is_verbose():
self.cmdStr += " -v"
if fast:
self.cmdStr += " -f"
if restart:
self.cmdStr += " -r"
if force:
self.cmdStr += " -M immediate"
Command.__init__(self,name,self.cmdStr,ctxt,remoteHost)
@staticmethod
def local(name,masterOnly=False, restart=False, fast=False, force=False, verbose=False):
cmd=NewGpStop(name,masterOnly,restart, fast, force, verbose)
cmd.run(validateAfter=True)
#-----------------------------------------------
class GpStop(Command):
def __init__(self, name, masterOnly=False, verbose=False, quiet=False, restart=False, fast=False, force=False, datadir=None, ctxt=LOCAL, remoteHost=None, logfileDirectory=False):
self.cmdStr="$GPHOME/bin/gpstop -a"
if masterOnly:
self.cmdStr += " -m"
if restart:
self.cmdStr += " -r"
if fast:
self.cmdStr += " -f"
if force:
self.cmdStr += " -M immediate"
if datadir:
self.cmdStr += " -d %s" % datadir
if verbose or logging_is_verbose():
self.cmdStr += " -v"
if quiet:
self.cmdStr += " -q"
if logfileDirectory:
self.cmdStr += " -l '" + logfileDirectory + "'"
Command.__init__(self,name,self.cmdStr,ctxt,remoteHost)
@staticmethod
def local(name,masterOnly=False, verbose=False, quiet=False,restart=False, fast=False, force=False, datadir=None):
cmd=GpStop(name,masterOnly,verbose,quiet,restart,fast,force,datadir)
cmd.run(validateAfter=True)
return cmd
#-----------------------------------------------
class GpRecoverseg(Command):
def __init__(self, name, ctxt=LOCAL, remoteHost=None):
self.cmdStr = "$GPHOME/bin/gprecoverseg -a"
Command.__init__(self, name, self.cmdStr, ctxt, remoteHost)
#-----------------------------------------------
class Psql(Command):
def __init__(self, name, query=None, filename=None, database='template1', port=None, utilityMode=False, ctxt=LOCAL, remoteHost=None):
env = ''
if utilityMode:
env = 'PGOPTIONS="-c gp_session_role=utility"'
cmdStr = '%s $GPHOME/bin/psql ' % env
if port is not None:
cmdStr += '-p %d ' % port
if query is not None and filename is not None:
raise Exception('Psql can accept only a query or a filename, not both.')
elif query is not None:
cmdStr += '-c "%s" ' % query
elif filename is not None:
cmdStr += '-f %s ' % filename
else:
raise Exception('Psql must be passed a query or a filename.')
# shell escape and force double quote of database in case of any funny chars
cmdStr += '"%s" ' % shellEscape(database)
# Need to escape " for REMOTE or it'll interfere with ssh
if ctxt == REMOTE:
cmdStr = cmdStr.replace('"', '\\"')
self.cmdStr=cmdStr
Command.__init__(self, name, self.cmdStr, ctxt, remoteHost)
#-----------------------------------------------
class ModifyPostgresqlConfSetting(Command):
def __init__(self, name, file, optName, optVal, optType='string', ctxt=LOCAL, remoteHost=None):
cmdStr = None
if optType == 'number':
cmdStr = "perl -p -i.bak -e 's/^%s[ ]*=[ ]*\\d+/%s=%d/' %s" % (optName, optName, optVal, file)
elif optType == 'string':
cmdStr = "perl -i -p -e \"s/^%s[ ]*=[ ]*'[^']*'/%s='%s'/\" %s" % (optName, optName, optVal, file)
else:
raise Exception, "Invalid optType for ModifyPostgresqlConfSetting"
self.cmdStr = cmdStr
Command.__init__(self, name, self.cmdStr, ctxt, remoteHost)
#-----------------------------------------------
class GpCleanSegmentDirectories(Command):
"""
Clean all of the directories for a set of segments on the host.
Does NOT delete all files in the data directories -- tries to preserve logs and any non-database
files the user has placed there
"""
def __init__(self, name, segmentsToClean, ctxt, remoteHost):
pickledSegmentsStr = base64.urlsafe_b64encode(pickle.dumps(segmentsToClean))
cmdStr = "$GPHOME/sbin/gpcleansegmentdir.py -p %s" % pickledSegmentsStr
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
#-----------------------------------------------
class GpDumpDirsExist(Command):
"""
Checks if gp_dump* directories exist in the given directory
"""
def __init__(self, name, baseDir, ctxt=LOCAL, remoteHost=None):
cmdStr = "find %s -name '*dump*' -print" % baseDir
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
@staticmethod
def local(name, baseDir):
cmd = GpDumpDirsExist(name, baseDir)
cmd.run(validateAfter=True)
dirCount = len(cmd.get_results().stdout.split('\n'))
# This is > 1 because the command output will terminate with \n
return dirCount > 1
#-----------------------------------------------
class ConfigureNewSegment(Command):
"""
Configure a new segment, usually from a template, as is done during gpexpand, gpaddmirrors, gprecoverseg (full),
etc.
"""
def __init__(self, name, confinfo, newSegments=False, tarFile=None,
batchSize=None, verbose=False,ctxt=LOCAL, remoteHost=None, validationOnly=False, writeGpIdFileOnly=False):
cmdStr = '$GPHOME/bin/lib/gpconfigurenewsegment -c \"%s\"' % (confinfo)
if newSegments:
cmdStr += ' -n'
if tarFile:
cmdStr += ' -t %s' % tarFile
if verbose:
cmdStr += ' -v '
if batchSize:
cmdStr += ' -B %s' % batchSize
if validationOnly:
cmdStr += " --validation-only"
if writeGpIdFileOnly:
cmdStr += " --write-gpid-file-only"
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
#-----------------------------------------------
@staticmethod
def buildSegmentInfoForNewSegment(segments, isTargetReusedLocationArr = None, primaryMirror = 'both'):
"""
Build the new segment info that can be used to get the confinfo argument to pass to ConfigureNewSegment
@param segments list of segments
@param isTargetReusedLocationArr if not None, then is an array of boolean values in parallel with segments
True values indicate that the directory has been cleaned by gpcleansegmentdir.py
and we should have lighter restrictions on how to check it for emptiness
Passing None is the same as passing an array of all False values
@param primaryMirror Process 'primary' or 'mirror' or 'both'
@return A dictionary with the following format:
Name = <host name>
Value = <system data directory>
: <port>
: if primary then 'true' else 'false'
: if target is reused location then 'true' else 'false'
: <segment dbid>
[ : <filespace oid> : <file space directory> ]...
"""
result = {}
for segIndex, seg in enumerate(segments):
if primaryMirror == 'primary' and seg.isSegmentPrimary() == False:
continue
elif primaryMirror == 'mirror' and seg.isSegmentPrimary() == True:
continue
hostname = seg.getSegmentHostName()
if result.has_key(hostname):
result[hostname] += ','
else:
result[hostname] = ''
isTargetReusedLocation = isTargetReusedLocationArr and isTargetReusedLocationArr[segIndex]
filespaces = []
for fsOid, path in seg.getSegmentFilespaces().iteritems():
if fsOid not in [gparray.SYSTEM_FILESPACE]:
filespaces.append(str(fsOid) + ":" + path)
result[hostname] += '%s:%d:%s:%s:%d%s' % (seg.getSegmentDataDirectory(), seg.getSegmentPort(),
"true" if seg.isSegmentPrimary(current_role=True) else "false",
"true" if isTargetReusedLocation else "false",
seg.getSegmentDbId(),
"" if len(filespaces) == 0 else (":" + ":".join(filespaces))
)
return result
#-----------------------------------------------
class GpVersion(Command):
def __init__(self,name,gphome,ctxt=LOCAL,remoteHost=None):
# XXX this should make use of the gphome that was passed
# in, but this causes problems in some environments and
# requires further investigation.
self.gphome=gphome
#self.cmdStr="%s/bin/postgres --gp-version" % gphome
self.cmdStr="$GPHOME/bin/postgres --gp-version"
Command.__init__(self,name,self.cmdStr,ctxt,remoteHost)
def get_version(self):
return self.results.stdout.strip()
@staticmethod
def local(name,gphome):
cmd=GpVersion(name,gphome)
cmd.run(validateAfter=True)
return cmd.get_version()
#-----------------------------------------------
class GpCatVersion(Command):
"""
Get the catalog version of the binaries in a given GPHOME
"""
def __init__(self,name,gphome,ctxt=LOCAL,remoteHost=None):
# XXX this should make use of the gphome that was passed
# in, but this causes problems in some environments and
# requires further investigation.
self.gphome=gphome
#cmdStr="%s/bin/postgres --catalog-version" % gphome
cmdStr="$GPHOME/bin/postgres --catalog-version"
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
def get_version(self):
# Version comes out like this:
# "Catalog version number: 201002021"
# We only want the number
return self.results.stdout.split(':')[1].strip()
@staticmethod
def local(name,gphome):
cmd=GpCatVersion(name,gphome)
cmd.run(validateAfter=True)
return cmd.get_version()
#-----------------------------------------------
class GpCatVersionDirectory(Command):
"""
Get the catalog version of a given database directory
"""
def __init__(self,name,directory,ctxt=LOCAL,remoteHost=None):
cmdStr = "$GPHOME/bin/pg_controldata %s" % directory
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
def get_version(self):
"sift through pg_controldata looking for the catalog version number"
for key, value in self.results.split_stdout():
if key == 'Catalog version number':
return value.strip()
@staticmethod
def local(name,directory):
cmd=GpCatVersionDirectory(name,directory)
cmd.run(validateAfter=True)
return cmd.get_version()
#-----------------------------------------------
class GpAddConfigScript(Command):
def __init__(self, name, directorystring, entry, value=None, removeonly=False, ctxt=LOCAL, remoteHost=None):
cmdStr="echo '%s' | $GPHOME/sbin/gpaddconfig.py --entry %s" % (directorystring, entry)
if value:
# value will be encoded and unencoded in the script to protect against shell interpretation
value = base64.urlsafe_b64encode(pickle.dumps(value))
cmdStr = cmdStr + " --value '" + value + "'"
if removeonly:
cmdStr = cmdStr + " --removeonly "
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
#-----------------------------------------------
class GpAppendGucToFile(Command):
# guc value will come in pickled and base64 encoded
def __init__(self,name,file,guc,value,ctxt=LOCAL,remoteHost=None):
unpickledText = pickle.loads(base64.urlsafe_b64decode(value))
finalText = unpickledText.replace('"', '\\\"')
cmdStr = 'echo "%s=%s" >> %s' % (guc, finalText, file)
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
#-----------------------------------------------
class GpLogFilter(Command):
def __init__(self, name, filename, start=None, end=None, duration=None,
case=None, count=None, search_string=None,
exclude_string=None, search_regex=None, exclude_regex=None,
trouble=None, ctxt=LOCAL,remoteHost=None):
cmdfrags = []
if start:
cmdfrags.append('--begin=%s' % start)
if end:
cmdfrags.append('--end=%s' % end)
if duration:
cmdfrags.append('--duration=%s' % duration)
if case:
cmdfrags.append('--case=%s' % case)
if search_string:
cmdfrags.append('--find=\'%s\'' % search_string)
if exclude_string:
cmdfrags.append('--nofind=\'%s\'' % exclude_string)
if search_regex:
cmdfrags.append('--match=\'%s\'' % search_regex)
if count:
cmdfrags.append('-n %s' % count)
if exclude_regex:
cmdfrags.append('--nomatch=\'%s\'' % exclude_regex)
if trouble:
cmdfrags.append('-t')
cmdfrags.append(filename)
self.cmdStr = "$GPHOME/bin/gplogfilter %s" % ' '.join(cmdfrags)
Command.__init__(self, name, self.cmdStr, ctxt,remoteHost)
@staticmethod
def local(name, filename, start=None, end=None, duration=None,
case=None, count=None, search_string=None,
exclude_string=None, search_regex=None, exclude_regex=None,
trouble=None):
cmd = GpLogFilter(name, filename, start, end, duration, case, count, search_string,
exclude_string, search_regex, exclude_regex, trouble)
cmd.run(validateAfter=True)
return "".join(cmd.get_results().stdout).split("\r\n")
#-----------------------------------------------
def distribute_tarball(queue,list,tarball):
logger.debug("distributeTarBall start")
for db in list:
hostname = db.getSegmentHostName()
datadir = db.getSegmentDataDirectory()
(head,tail)=os.path.split(datadir)
scp_cmd=RemoteCopy("copy master",tarball,hostname,head)
queue.addCommand(scp_cmd)
queue.join()
queue.check_results()
logger.debug("distributeTarBall finished")
class GpError(Exception): pass
######
def get_user():
logger.debug("Checking if LOGNAME or USER env variable is set.")
username = os.environ.get('LOGNAME') or os.environ.get('USER')
if not username:
raise GpError('Environment Variable LOGNAME or USER not set')
return username
def get_gphome():
logger.debug("Checking if GPHOME env variable is set.")
gphome=os.getenv('GPHOME',None)
if not gphome:
raise GpError('Environment Variable GPHOME not set')
return gphome
######
def get_masterdatadir():
logger.debug("Checking if MASTER_DATA_DIRECTORY env variable is set.")
master_datadir = os.environ.get('MASTER_DATA_DIRECTORY')
if not master_datadir:
raise GpError("Environment Variable MASTER_DATA_DIRECTORY not set!")
return master_datadir
######
def get_masterport(datadir):
return pgconf.readfile(os.path.join(datadir, 'postgresql.conf')).int('port')
######
def check_permissions(username):
logger.debug("--Checking that current user can use GP binaries")
chk_gpdb_id(username)
#=-=-=-=-=-=-=-=-=-= Bash Migration Helper Functions =-=-=-=-=-=-=-=-
def start_standbymaster(host, datadir, port, dbid, ncontents, era=None,
wrapper=None, wrapper_args=None):
logger.info("Starting standby master")
logger.info("Checking if standby master is running on host: %s in directory: %s" % (host,datadir))
cmd = Command("recovery_startup",
("python -c "
"'from gppylib.commands.gp import recovery_startup; "
"""recovery_startup("{0}", "{1}")'""").format(
datadir, port),
ctxt=REMOTE, remoteHost=host)
cmd.run()
res = cmd.get_results().stderr
if res:
logger.warning("Unable to cleanup previously started standby: '%s'" % res)
#create a pg_log directory if necessary
CreateDirIfNecessary.remote('create standby logdir if needed', host, datadir + "/pg_log")
cmd = GpStandbyStart.remote('start standby master',
host, datadir, port, ncontents, dbid, era=era,
wrapper=wrapper, wrapper_args=wrapper_args)
logger.debug("Starting standby: %s" % cmd )
logger.debug("Starting standby master results: %s" % cmd.get_results() )
if cmd.get_results().rc != 0:
logger.warning("Could not start standby master: %s" % cmd)
return False
# Wait for the standby to start recovery. Ideally this means the
# standby connection is recoginized by the primary, but locally this
# function it is better to work with only standby. If recovery has
# started, this means now postmaster is responsive to signals, which
# allows shutdown etc. If we exit earlier, there is a big chance
# a shutdown message from other process is missed.
for i in xrange(60):
# Fetch it every time, as postmaster might not have been up yet for
# the first few cycles, which we have seen when trying wrapper
# shell script.
pid = getPostmasterPID(host, datadir)
cmd = Command("get pids",
("python -c "
"'from gppylib.commands import unix; "
"print unix.getDescendentProcesses({0})'".format(pid)),
ctxt=REMOTE, remoteHost=host)
cmd.run()
logger.debug(str(cmd))
result = cmd.get_results()
logger.debug(result)
# We want more than postmaster and logger processes.
if result.rc == 0 and len(result.stdout.split(',')) > 2:
return True
time.sleep(1)
logger.warning("Could not start standby master")
return False
def get_pid_from_remotehost(host, datadir):
cmd = Command(name = 'get the pid from postmaster file',
cmdStr = 'head -1 %s/postmaster.pid' % datadir,
ctxt=REMOTE, remoteHost = host)
cmd.run()
pid = None
if cmd.get_results().rc == 0 and cmd.get_results().stdout.strip():
pid = int(cmd.get_results().stdout.strip())
return pid
def is_pid_postmaster(datadir, pid, remoteHost=None):
"""
This function returns true on any uncertancy: if it cannot execute pgrep, pwdx or just connect to the standby host
it will return true
"""
def validate_command (commandName, datadir, ctxt, remoteHost):
cmd = Command ('Check %s availability' % commandName, commandName, ctxt=ctxt, remoteHost=remoteHost)
cmd.run()
if cmd.get_results().rc == COMMAND_NOT_FOUND:
if not remoteHost is None:
logger.warning('command "%s" is not found on host %s. cannot check postmaster status, assuming it is running', commandName, remoteHost)
else:
logger.warning('command "%s" is not found. cannot check postmaster status, assuming it is running', commandName)
return False
return True
if remoteHost is not None:
ctxt = REMOTE
else:
ctxt = LOCAL
is_postmaster = True
if (validate_command ('pgrep', datadir, ctxt, remoteHost) and
validate_command ('pwdx', datadir, ctxt, remoteHost)):
cmdStr = 'pgrep postgres | xargs -i pwdx {} | grep "%s" | grep "^%s:" | cat' % (datadir, pid)
cmd = Command("search for postmaster process", cmdStr, ctxt=ctxt, remoteHost=remoteHost)
res = None
try:
cmd.run(validateAfter=True)
res = cmd.get_results()
if not res.stdout.strip():
is_postmaster = False
else:
logger.info(res.stdout.strip())
except Exception as e:
if not remoteHost is None:
logger.warning('failed to get the status of postmaster %s on %s. assuming that postmaster is running' % (datadir, remoteHost))
else:
logger.warning('failed to get the status of postmaster %s. assuming that postmaster is running' % (datadir))
return is_postmaster
######
def recovery_startup(datadir, port=None):
""" investigate a db that may still be running """
pid=read_postmaster_pidfile(datadir)
if check_pid(pid) and is_pid_postmaster(datadir, pid):
info_str="found postmaster with pid: %d for datadir: %s still running" % (pid,datadir)
logger.info(info_str)
logger.info("attempting to shutdown db with datadir: %s" % datadir )
cmd=SegmentStop('db shutdown' , datadir,mode='fast')
cmd.run()
if check_pid(pid) and is_pid_postmaster(datadir, pid):
info_str="unable to stop postmaster with pid: %d for datadir: %s still running" % (pid,datadir)
logger.info(info_str)
return info_str
else:
logger.info("shutdown of db successful with datadir: %s" % datadir)
return None
else:
# If we get this far it means we don't have a pid and need to do some
# cleanup. Use port number if supplied. postgresql.conf may be
# bogus as we pass port number anyway instead of postgresql.conf
# default value.
if port is None:
pgconf_dict = pgconf.readfile(datadir + "/postgresql.conf")
port = pgconf_dict.int('port')
lockfile="/tmp/.s.PGSQL.%s" % port
tmpfile_exists = os.path.exists(lockfile)
logger.info("No db instance process, entering recovery startup mode")
if tmpfile_exists:
logger.info("Clearing db instance lock files")
os.remove(lockfile)
postmaster_pid_file = "%s/postmaster.pid" % datadir
if os.path.exists(postmaster_pid_file):
logger.info("Clearing db instance pid file")
os.remove("%s/postmaster.pid" % datadir)
return None
# these match names from gp_bash_functions.sh
def chk_gpdb_id(username):
path="%s/bin/initdb" % GPHOME
if not os.access(path,os.X_OK):
raise GpError("File permission mismatch. The current user %s does not have sufficient"
" privileges to run the Greenplum binaries and management utilities." % username )
pass
def chk_local_db_running(datadir, port):
"""Perform a few checks to see if the db is running. We 1st look at:
1) /tmp/.s.PGSQL.<PORT> and /tmp/.s.PGSQL.<PORT>.lock
2) DATADIR/postmaster.pid
3) netstat
Returns tuple in format (postmaster_pid_file_exists, tmpfile_exists, lockfile_exists, port_active, postmaster_pid)
postmaster_pid value is 0 if postmaster_pid_exists is False Note that this is the PID from the postmaster.pid file
"""
# determine if postmaster.pid is there, grab pid from it
postmaster_pid_exists = True
f = None
try:
f = open(datadir + "/postmaster.pid")
except IOError:
postmaster_pid_exists = False
pid_value = 0
if postmaster_pid_exists:
try:
for line in f:
pid_value = int(line) # grab first line only
break
finally:
f.close()
cmd=FileDirExists('check for /tmp/.s.PGSQL file file', "/tmp/.s.PGSQL.%d" % port)
cmd.run(validateAfter=True)
tmpfile_exists = cmd.filedir_exists()
cmd=FileDirExists('check for lock file', get_lockfile_name(port))
cmd.run(validateAfter=True)
lockfile_exists = cmd.filedir_exists()
netstat_port_active = PgPortIsActive.local('check netstat for postmaster port',"/tmp/.s.PGSQL.%d" % port, port)
logger.debug("postmaster_pid_exists: %s tmpfile_exists: %s lockfile_exists: %s netstat port: %s pid: %s" %\
(postmaster_pid_exists, tmpfile_exists, lockfile_exists, netstat_port_active, pid_value))
return (postmaster_pid_exists, tmpfile_exists, lockfile_exists, netstat_port_active, pid_value)
def get_lockfile_name(port):
return "/tmp/.s.PGSQL.%d.lock" % port
def get_local_db_mode(master_data_dir):
""" Gets the mode Greenplum is running in.
Possible return values are:
'NORMAL'
'RESTRICTED'
'UTILITY'
"""
mode = 'NORMAL'
if not os.path.exists(master_data_dir + '/postmaster.pid'):
raise Exception('Greenplum database appears to be stopped')
try:
fp = open(master_data_dir + '/postmaster.opts', 'r')
optline = fp.readline()
if optline.find('superuser_reserved_connections') > 0:
mode = 'RESTRICTED'
elif optline.find('gp_role=utility') > 0:
mode = 'UTILITY'
except OSError:
raise Exception('Failed to open %s. Is Greenplum Database running?' % master_data_dir + '/postmaster.opts')
except IOError:
raise Exception('Failed to read options from %s' % master_data_dir + '/postmaster.opts')
finally:
if fp: fp.close()
return mode
######
def read_postmaster_pidfile(datadir, host=None):
if host:
cmdStr ="""python -c 'from {module} import {func}; print {func}("{args}")'""".format(module=sys.modules[__name__].__name__,
func='read_postmaster_pidfile',
args=datadir)
cmd = Command(name='run this method remotely', cmdStr=cmdStr, ctxt=REMOTE, remoteHost=host)
cmd.run(validateAfter=True)
return int(cmd.get_results().stdout.strip())
pid=0
f = None
try:
f = open(datadir + '/postmaster.pid')
pid = int(f.readline().strip())
except Exception:
pass
finally:
if f: f.close()
return pid
def pausePg(db):
"""
This function will pause an instance of postgres which is part of a GPDB
1) pause the postmaster (this prevents new connections from being made)
2) get list of processes that are descendent from postmaster process
3) pause all descendent processes and ignore any failures (a process may have died betwen getting pid list and doing the pauses)
4) again, get list of processes that are descendent from postmaster process
5) pause all descendent processes and ignore any failures (do not ignore errors, errors are failures, no pids can die between being paused the first time and getting the pid list)
"""
datadir = db.getSegmentDataDirectory()
content = db.getSegmentContentId()
postmasterPID = read_postmaster_pidfile(datadir)
if postmasterPID == 0:
raise Exception, 'print "could not locate postmasterPID during pause'
Kill.local(name="pausep "+str(content), pid=postmasterPID, signal="STOP")
decsendentProcessPids = getDescendentProcesses(postmasterPID)
for killpid in decsendentProcessPids:
try:
Kill.local(name="pausep "+str(killpid), pid=killpid, signal="STOP")
except:
pass
decsendentProcessPids = getDescendentProcesses(postmasterPID)
for killpid in decsendentProcessPids:
Kill.local(name="pausep "+str(killpid), pid=killpid, signal="STOP")
def resumePg(db):
"""
1) resume the processes descendent from the postmaster process
2) resume the postmaster process
"""
datadir = db.getSegmentDataDirectory()
content = db.getSegmentContentId()
postmasterPID = read_postmaster_pidfile(datadir)
if postmasterPID == 0:
raise Exception, 'print "could not locate postmasterPID during resume'
decsendentProcessPids = getDescendentProcesses(postmasterPID)
for killpid in decsendentProcessPids:
Kill.local(name="pausep "+str(killpid), pid=killpid, signal="CONT")
Kill.local(name="pausep "+str(content), pid=postmasterPID, signal="CONT")
def createTempDirectoryName(masterDataDirectory, tempDirPrefix):
return '%s/%s_%s_%d' % (os.sep.join(os.path.normpath(masterDataDirectory).split(os.sep)[:-1]),
tempDirPrefix,
datetime.datetime.now().strftime('%m%d%Y'),
os.getpid())
#-------------------------------------------------------------------------
# gp_dbid methods moved to gp_dbid.py, but this class was left here
# to avoid changing gpmigrator and gpmigrator_mirror (which is the only caller).
#
class GpCreateDBIdFile(Command):
def __init__(self, name, directory, dbid, verbose=False, ctxt=LOCAL, remoteHost=None):
if verbose:
setverbose="-v"
else:
setverbose=""
args = [
"$GPHOME/sbin/gpsetdbid.py",
"-d %s" % directory,
"-i %s" % dbid,
setverbose,
]
cmdStr = " ".join(args)
Command.__init__(self, name, cmdStr, ctxt, remoteHost)
@staticmethod
def local(name, directory, dbid):
cmd = GpCreateDBIdFile(name, directory, dbid)
cmd.run(validateAfter=True)
@staticmethod
def remote(name, remoteHost, directory, dbid):
cmd = GpCreateDBIdFile(name, directory, dbid, ctxt=REMOTE, remoteHost=remoteHost)
cmd.run(validateAfter=True)
#-------------------------------------------------------------------------
class GpRecoverSeg(Command):
"""
This command will execute the gprecoverseg utility
"""
def __init__(self, name, options = "", ctxt = LOCAL, remoteHost = None):
self.name = name
self.options = options
self.ctxt = ctxt
self.remoteHost = remoteHost
cmdStr = "$GPHOME/bin/gprecoverseg %s" % (options)
Command.__init__(self,name,cmdStr,ctxt,remoteHost)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
{
"content_hash": "c3f979c27627a52f39d35b53d142e059",
"timestamp": "",
"source": "github",
"line_count": 1729,
"max_line_length": 183,
"avg_line_length": 38.26778484673221,
"alnum_prop": 0.5938487115544472,
"repo_name": "lintzc/gpdb",
"id": "cae5558a2b57e9b90c9fad4ffcb2688ea5417ac7",
"size": "66249",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "gpMgmt/bin/gppylib/commands/gp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5665"
},
{
"name": "Batchfile",
"bytes": "11028"
},
{
"name": "C",
"bytes": "35144943"
},
{
"name": "C++",
"bytes": "3731160"
},
{
"name": "CMake",
"bytes": "17118"
},
{
"name": "CSS",
"bytes": "7068"
},
{
"name": "Csound Score",
"bytes": "179"
},
{
"name": "Cucumber",
"bytes": "829167"
},
{
"name": "DTrace",
"bytes": "1160"
},
{
"name": "Fortran",
"bytes": "14777"
},
{
"name": "GDB",
"bytes": "576"
},
{
"name": "Groff",
"bytes": "631842"
},
{
"name": "HTML",
"bytes": "169455"
},
{
"name": "Java",
"bytes": "307541"
},
{
"name": "Lex",
"bytes": "196276"
},
{
"name": "M4",
"bytes": "78510"
},
{
"name": "Makefile",
"bytes": "431523"
},
{
"name": "Objective-C",
"bytes": "22149"
},
{
"name": "PLSQL",
"bytes": "190501"
},
{
"name": "PLpgSQL",
"bytes": "8131027"
},
{
"name": "Perl",
"bytes": "3933982"
},
{
"name": "Perl6",
"bytes": "14219"
},
{
"name": "Python",
"bytes": "9229659"
},
{
"name": "Ruby",
"bytes": "21343"
},
{
"name": "SQLPL",
"bytes": "1860160"
},
{
"name": "Shell",
"bytes": "484246"
},
{
"name": "XS",
"bytes": "8405"
},
{
"name": "XSLT",
"bytes": "5779"
},
{
"name": "Yacc",
"bytes": "486035"
}
],
"symlink_target": ""
}
|
"""
WSGI config for django_api project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "unpp_api.settings")
application = Cling(get_wsgi_application())
|
{
"content_hash": "016eb7b50f92e69ef9e75b7769fbe462",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 78,
"avg_line_length": 23.944444444444443,
"alnum_prop": 0.7679814385150812,
"repo_name": "unicef/un-partner-portal",
"id": "3d8561d4d7b6e5e3bd3e1337f5a82c0ef8c9033d",
"size": "431",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "backend/unpp_api/wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "468629"
},
{
"name": "Dockerfile",
"bytes": "2303"
},
{
"name": "HTML",
"bytes": "49027"
},
{
"name": "JavaScript",
"bytes": "2199879"
},
{
"name": "Python",
"bytes": "1322681"
},
{
"name": "Shell",
"bytes": "4734"
},
{
"name": "Smarty",
"bytes": "751"
}
],
"symlink_target": ""
}
|
import sys
import os
PATHS = [
# src and config
'.',
'src',
]
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
PROJECT_ROOT_DIR = os.path.abspath(os.path.join(CURRENT_DIR, os.pardir))
LOCAL_SETTINGS_DIR = os.path.abspath(
os.path.join(PROJECT_ROOT_DIR, os.pardir, 'local_config'))
LOCAL_SETTINGS_PATH = os.path.join(LOCAL_SETTINGS_DIR, 'settings.py')
def get_paths(paths):
# only include a path if not already in sys.path to avoid duplication of
# paths when using code reloading
path_set = set(sys.path)
for p in paths:
path = os.path.abspath(os.path.join(PROJECT_ROOT_DIR, p))
if path not in path_set:
yield path
def setup_paths():
sys.path = list(get_paths(PATHS)) + sys.path
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_project.settings')
if __name__ == '__main__':
# For use in shell scripting
# e.g. $(python paths.py)
print("export PYTHONPATH=%s" % ":".join(get_paths(PATHS)))
print("export DJANGO_SETTINGS_MODULE=django_project.settings")
|
{
"content_hash": "5b705c76d92b30a37a82572772674115",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 78,
"avg_line_length": 28.64864864864865,
"alnum_prop": 0.6575471698113208,
"repo_name": "memodir/cv",
"id": "1c52c158458a38b6e2e70d19fe46e3cd1190cdf5",
"size": "1084",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_project/paths.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "497484"
},
{
"name": "Dockerfile",
"bytes": "1149"
},
{
"name": "HTML",
"bytes": "50820"
},
{
"name": "JavaScript",
"bytes": "696134"
},
{
"name": "Makefile",
"bytes": "5820"
},
{
"name": "Python",
"bytes": "188601"
},
{
"name": "Shell",
"bytes": "1268"
},
{
"name": "Vue",
"bytes": "6858"
}
],
"symlink_target": ""
}
|
import mock
import six
from nova import exception
from nova import objects
from nova.pci import devspec
from nova import test
dev = {"vendor_id": "8086",
"product_id": "5057",
"address": "0000:0a:00.5",
"parent_addr": "0000:0a:00.0"}
class PciAddressSpecTestCase(test.NoDBTestCase):
def test_pci_address_spec_abstact_instance_fail(self):
self.assertRaises(TypeError, devspec.PciAddressSpec)
class PhysicalPciAddressTestCase(test.NoDBTestCase):
pci_addr = {"domain": "0000",
"bus": "0a",
"slot": "00",
"function": "5"}
def test_init_by_dict(self):
phys_addr = devspec.PhysicalPciAddress(self.pci_addr)
self.assertEqual(phys_addr.domain, self.pci_addr['domain'])
self.assertEqual(phys_addr.bus, self.pci_addr['bus'])
self.assertEqual(phys_addr.slot, self.pci_addr['slot'])
self.assertEqual(phys_addr.func, self.pci_addr['function'])
def test_init_by_dict_invalid_address_values(self):
invalid_val_addr = {"domain": devspec.MAX_DOMAIN + 1,
"bus": devspec.MAX_BUS + 1,
"slot": devspec.MAX_SLOT + 1,
"function": devspec.MAX_FUNC + 1}
for component in invalid_val_addr:
address = dict(self.pci_addr)
address[component] = str(invalid_val_addr[component])
self.assertRaises(exception.PciConfigInvalidWhitelist,
devspec.PhysicalPciAddress, address)
def test_init_by_dict_missing_values(self):
for component in self.pci_addr:
address = dict(self.pci_addr)
del address[component]
self.assertRaises(exception.PciDeviceWrongAddressFormat,
devspec.PhysicalPciAddress, address)
def test_init_by_string(self):
address_str = "0000:0a:00.5"
phys_addr = devspec.PhysicalPciAddress(address_str)
self.assertEqual(phys_addr.domain, "0000")
self.assertEqual(phys_addr.bus, "0a")
self.assertEqual(phys_addr.slot, "00")
self.assertEqual(phys_addr.func, "5")
def test_init_by_string_invalid_values(self):
invalid_addresses = [str(devspec.MAX_DOMAIN + 1) + ":0a:00.5",
"0000:" + str(devspec.MAX_BUS + 1) + ":00.5",
"0000:0a:" + str(devspec.MAX_SLOT + 1) + ".5",
"0000:0a:00." + str(devspec.MAX_FUNC + 1)]
for address in invalid_addresses:
self.assertRaises(exception.PciConfigInvalidWhitelist,
devspec.PhysicalPciAddress, address)
def test_init_by_string_missing_values(self):
invalid_addresses = ["00:0000:0a:00.5", "0a:00.5", "0000:00.5"]
for address in invalid_addresses:
self.assertRaises(exception.PciDeviceWrongAddressFormat,
devspec.PhysicalPciAddress, address)
def test_match(self):
address_str = "0000:0a:00.5"
phys_addr1 = devspec.PhysicalPciAddress(address_str)
phys_addr2 = devspec.PhysicalPciAddress(address_str)
self.assertTrue(phys_addr1.match(phys_addr2))
def test_false_match(self):
address_str = "0000:0a:00.5"
phys_addr1 = devspec.PhysicalPciAddress(address_str)
addresses = ["0010:0a:00.5", "0000:0b:00.5",
"0000:0a:01.5", "0000:0a:00.4"]
for address in addresses:
phys_addr2 = devspec.PhysicalPciAddress(address)
self.assertFalse(phys_addr1.match(phys_addr2))
class PciAddressGlobSpecTestCase(test.NoDBTestCase):
def test_init(self):
address_str = "0000:0a:00.5"
phys_addr = devspec.PciAddressGlobSpec(address_str)
self.assertEqual(phys_addr.domain, "0000")
self.assertEqual(phys_addr.bus, "0a")
self.assertEqual(phys_addr.slot, "00")
self.assertEqual(phys_addr.func, "5")
def test_init_invalid_address(self):
invalid_addresses = ["00:0000:0a:00.5"]
for address in invalid_addresses:
self.assertRaises(exception.PciDeviceWrongAddressFormat,
devspec.PciAddressGlobSpec, address)
def test_init_invalid_values(self):
invalid_addresses = [str(devspec.MAX_DOMAIN + 1) + ":0a:00.5",
"0000:" + str(devspec.MAX_BUS + 1) + ":00.5",
"0000:0a:" + str(devspec.MAX_SLOT + 1) + ".5",
"0000:0a:00." + str(devspec.MAX_FUNC + 1)]
for address in invalid_addresses:
self.assertRaises(exception.PciConfigInvalidWhitelist,
devspec.PciAddressGlobSpec, address)
def test_match(self):
address_str = "0000:0a:00.5"
phys_addr = devspec.PhysicalPciAddress(address_str)
addresses = ["0000:0a:00.5", "*:0a:00.5", "0000:*:00.5",
"0000:0a:*.5", "0000:0a:00.*"]
for address in addresses:
glob_addr = devspec.PciAddressGlobSpec(address)
self.assertTrue(glob_addr.match(phys_addr))
def test_false_match(self):
address_str = "0000:0a:00.5"
phys_addr = devspec.PhysicalPciAddress(address_str)
addresses = ["0010:0a:00.5", "0000:0b:00.5",
"*:0a:01.5", "0000:0a:*.4"]
for address in addresses:
glob_addr = devspec.PciAddressGlobSpec(address)
self.assertFalse(phys_addr.match(glob_addr))
class PciAddressRegexSpecTestCase(test.NoDBTestCase):
def test_init(self):
address_regex = {"domain": ".*",
"bus": "02",
"slot": "01",
"function": "[0-2]"}
phys_addr = devspec.PciAddressRegexSpec(address_regex)
self.assertEqual(phys_addr.domain, ".*")
self.assertEqual(phys_addr.bus, "02")
self.assertEqual(phys_addr.slot, "01")
self.assertEqual(phys_addr.func, "[0-2]")
def test_init_invalid_address(self):
invalid_addresses = [{"domain": "*",
"bus": "02",
"slot": "01",
"function": "[0-2]"}]
for address in invalid_addresses:
self.assertRaises(exception.PciDeviceWrongAddressFormat,
devspec.PciAddressRegexSpec, address)
def test_match(self):
address_str = "0000:0a:00.5"
phys_addr = devspec.PhysicalPciAddress(address_str)
addresses = [{"domain": ".*", "bus": "0a",
"slot": "00", "function": "[5-6]"},
{"domain": ".*", "bus": "0a",
"slot": ".*", "function": "[4-5]"},
{"domain": ".*", "bus": "0a",
"slot": "[0-3]", "function": ".*"}]
for address in addresses:
regex_addr = devspec.PciAddressRegexSpec(address)
self.assertTrue(regex_addr.match(phys_addr))
def test_false_match(self):
address_str = "0000:0b:00.5"
phys_addr = devspec.PhysicalPciAddress(address_str)
addresses = [{"domain": ".*", "bus": "0a",
"slot": "00", "function": "[5-6]"},
{"domain": ".*", "bus": "02",
"slot": ".*", "function": "[4-5]"},
{"domain": ".*", "bus": "02",
"slot": "[0-3]", "function": ".*"}]
for address in addresses:
regex_addr = devspec.PciAddressRegexSpec(address)
self.assertFalse(regex_addr.match(phys_addr))
class PciAddressTestCase(test.NoDBTestCase):
def test_wrong_address(self):
pci_info = {"vendor_id": "8086", "address": "*: *: *.6",
"product_id": "5057", "physical_network": "hr_net"}
pci = devspec.PciDeviceSpec(pci_info)
self.assertFalse(pci.match(dev))
def test_address_too_big(self):
pci_info = {"address": "0000:0a:0b:00.5",
"physical_network": "hr_net"}
self.assertRaises(exception.PciDeviceWrongAddressFormat,
devspec.PciDeviceSpec, pci_info)
def test_address_invalid_character(self):
pci_info = {"address": "0000:h4.12:6", "physical_network": "hr_net"}
exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
devspec.PciDeviceSpec, pci_info)
msg = ('Invalid PCI devices Whitelist config invalid func 12:6')
self.assertEqual(msg, six.text_type(exc))
def test_max_func(self):
pci_info = {"address": "0000:0a:00.%s" % (devspec.MAX_FUNC + 1),
"physical_network": "hr_net"}
exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
devspec.PciDeviceSpec, pci_info)
msg = ('Invalid PCI devices Whitelist config invalid func %x'
% (devspec.MAX_FUNC + 1))
self.assertEqual(msg, six.text_type(exc))
def test_max_domain(self):
pci_info = {"address": "%x:0a:00.5" % (devspec.MAX_DOMAIN + 1),
"physical_network": "hr_net"}
exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
devspec.PciDeviceSpec, pci_info)
msg = ('Invalid PCI devices Whitelist config invalid domain %x'
% (devspec.MAX_DOMAIN + 1))
self.assertEqual(msg, six.text_type(exc))
def test_max_bus(self):
pci_info = {"address": "0000:%x:00.5" % (devspec.MAX_BUS + 1),
"physical_network": "hr_net"}
exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
devspec.PciDeviceSpec, pci_info)
msg = ('Invalid PCI devices Whitelist config invalid bus %x'
% (devspec.MAX_BUS + 1))
self.assertEqual(msg, six.text_type(exc))
def test_max_slot(self):
pci_info = {"address": "0000:0a:%x.5" % (devspec.MAX_SLOT + 1),
"physical_network": "hr_net"}
exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
devspec.PciDeviceSpec, pci_info)
msg = ('Invalid PCI devices Whitelist config invalid slot %x'
% (devspec.MAX_SLOT + 1))
self.assertEqual(msg, six.text_type(exc))
def test_address_is_undefined(self):
pci_info = {"vendor_id": "8086", "product_id": "5057"}
pci = devspec.PciDeviceSpec(pci_info)
self.assertTrue(pci.match(dev))
def test_partial_address(self):
pci_info = {"address": ":0a:00.", "physical_network": "hr_net"}
pci = devspec.PciDeviceSpec(pci_info)
dev = {"vendor_id": "1137",
"product_id": "0071",
"address": "0000:0a:00.5",
"parent_addr": "0000:0a:00.0"}
self.assertTrue(pci.match(dev))
def test_partial_address_func(self):
pci_info = {"address": ".5", "physical_network": "hr_net"}
pci = devspec.PciDeviceSpec(pci_info)
dev = {"vendor_id": "1137",
"product_id": "0071",
"address": "0000:0a:00.5",
"phys_function": "0000:0a:00.0"}
self.assertTrue(pci.match(dev))
@mock.patch('nova.pci.utils.is_physical_function', return_value=True)
def test_address_is_pf(self, mock_is_physical_function):
pci_info = {"address": "0000:0a:00.0", "physical_network": "hr_net"}
pci = devspec.PciDeviceSpec(pci_info)
self.assertTrue(pci.match(dev))
@mock.patch('nova.pci.utils.is_physical_function', return_value=True)
def test_address_pf_no_parent_addr(self, mock_is_physical_function):
_dev = dev.copy()
_dev.pop('parent_addr')
pci_info = {"address": "0000:0a:00.5", "physical_network": "hr_net"}
pci = devspec.PciDeviceSpec(pci_info)
self.assertTrue(pci.match(_dev))
def test_spec_regex_match(self):
pci_info = {"address": {"domain": ".*",
"bus": ".*",
"slot": "00",
"function": "[5-6]"
},
"physical_network": "hr_net"}
pci = devspec.PciDeviceSpec(pci_info)
self.assertTrue(pci.match(dev))
def test_spec_regex_no_match(self):
pci_info = {"address": {"domain": ".*",
"bus": ".*",
"slot": "00",
"function": "[6-7]"
},
"physical_network": "hr_net"}
pci = devspec.PciDeviceSpec(pci_info)
self.assertFalse(pci.match(dev))
def test_spec_invalid_regex(self):
pci_info = {"address": {"domain": ".*",
"bus": ".*",
"slot": "00",
"function": "[6[-7]"
},
"physical_network": "hr_net"}
self.assertRaises(exception.PciDeviceWrongAddressFormat,
devspec.PciDeviceSpec, pci_info)
def test_spec_invalid_regex2(self):
pci_info = {"address": {"domain": "*",
"bus": "*",
"slot": "00",
"function": "[6-7]"
},
"physical_network": "hr_net"}
self.assertRaises(exception.PciDeviceWrongAddressFormat,
devspec.PciDeviceSpec, pci_info)
def test_spec_partial_bus_regex(self):
pci_info = {"address": {"domain": ".*",
"slot": "00",
"function": "[5-6]"
},
"physical_network": "hr_net"}
pci = devspec.PciDeviceSpec(pci_info)
self.assertTrue(pci.match(dev))
def test_spec_partial_address_regex(self):
pci_info = {"address": {"domain": ".*",
"bus": ".*",
"slot": "00",
},
"physical_network": "hr_net"}
pci = devspec.PciDeviceSpec(pci_info)
self.assertTrue(pci.match(dev))
def test_spec_invalid_address(self):
pci_info = {"address": [".*", ".*", "00", "[6-7]"],
"physical_network": "hr_net"}
self.assertRaises(exception.PciDeviceWrongAddressFormat,
devspec.PciDeviceSpec, pci_info)
@mock.patch('nova.pci.utils.is_physical_function', return_value=True)
def test_address_is_pf_regex(self, mock_is_physical_function):
pci_info = {"address": {"domain": "0000",
"bus": "0a",
"slot": "00",
"function": "0"
},
"physical_network": "hr_net"}
pci = devspec.PciDeviceSpec(pci_info)
self.assertTrue(pci.match(dev))
class PciDevSpecTestCase(test.NoDBTestCase):
def test_spec_match(self):
pci_info = {"vendor_id": "8086", "address": "*: *: *.5",
"product_id": "5057", "physical_network": "hr_net"}
pci = devspec.PciDeviceSpec(pci_info)
self.assertTrue(pci.match(dev))
def test_invalid_vendor_id(self):
pci_info = {"vendor_id": "8087", "address": "*: *: *.5",
"product_id": "5057", "physical_network": "hr_net"}
pci = devspec.PciDeviceSpec(pci_info)
self.assertFalse(pci.match(dev))
def test_vendor_id_out_of_range(self):
pci_info = {"vendor_id": "80860", "address": "*:*:*.5",
"product_id": "5057", "physical_network": "hr_net"}
exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
devspec.PciDeviceSpec, pci_info)
self.assertEqual("Invalid PCI devices Whitelist config "
"invalid vendor_id 80860", six.text_type(exc))
def test_invalid_product_id(self):
pci_info = {"vendor_id": "8086", "address": "*: *: *.5",
"product_id": "5056", "physical_network": "hr_net"}
pci = devspec.PciDeviceSpec(pci_info)
self.assertFalse(pci.match(dev))
def test_product_id_out_of_range(self):
pci_info = {"vendor_id": "8086", "address": "*:*:*.5",
"product_id": "50570", "physical_network": "hr_net"}
exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
devspec.PciDeviceSpec, pci_info)
self.assertEqual("Invalid PCI devices Whitelist config "
"invalid product_id 50570", six.text_type(exc))
def test_devname_and_address(self):
pci_info = {"devname": "eth0", "vendor_id": "8086",
"address": "*:*:*.5", "physical_network": "hr_net"}
self.assertRaises(exception.PciDeviceInvalidDeviceName,
devspec.PciDeviceSpec, pci_info)
@mock.patch('nova.pci.utils.get_function_by_ifname',
return_value = ("0000:0a:00.0", True))
def test_by_name(self, mock_get_function_by_ifname):
pci_info = {"devname": "eth0", "physical_network": "hr_net"}
pci = devspec.PciDeviceSpec(pci_info)
self.assertTrue(pci.match(dev))
@mock.patch('nova.pci.utils.get_function_by_ifname',
return_value = (None, False))
def test_invalid_name(self, mock_get_function_by_ifname):
pci_info = {"devname": "lo", "physical_network": "hr_net"}
pci = devspec.PciDeviceSpec(pci_info)
self.assertFalse(pci.match(dev))
def test_pci_obj(self):
pci_info = {"vendor_id": "8086", "address": "*:*:*.5",
"product_id": "5057", "physical_network": "hr_net"}
pci = devspec.PciDeviceSpec(pci_info)
pci_dev = {
'compute_node_id': 1,
'address': '0000:00:00.5',
'product_id': '5057',
'vendor_id': '8086',
'status': 'available',
'parent_addr': None,
'extra_k1': 'v1',
}
pci_obj = objects.PciDevice.create(None, pci_dev)
self.assertTrue(pci.match_pci_obj(pci_obj))
|
{
"content_hash": "1a3aaf293dbf70f9869a09b09f41bf8f",
"timestamp": "",
"source": "github",
"line_count": 422,
"max_line_length": 76,
"avg_line_length": 43.14691943127962,
"alnum_prop": 0.5366871704745168,
"repo_name": "rajalokan/nova",
"id": "ad35d190e28b59fcc5dc1d86e13aaa75354c1ba2",
"size": "18784",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "nova/tests/unit/pci/test_devspec.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "601"
},
{
"name": "PHP",
"bytes": "4503"
},
{
"name": "Python",
"bytes": "19100322"
},
{
"name": "Shell",
"bytes": "26793"
},
{
"name": "Smarty",
"bytes": "299237"
}
],
"symlink_target": ""
}
|
"""readgroupsets list command."""
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.genomics import genomics_util
from googlecloudsdk.calliope import base
class List(base.ListCommand):
"""List genomics read group sets in a dataset.
Prints a table with summary information on read group sets in the dataset.
"""
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Args:
parser: An argparse parser that you can use to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
parser.add_argument(
'dataset_ids',
nargs='+',
help="""Restrict the query to read group sets within the given datasets.
At least one ID must be provided.""")
parser.add_argument(
'--name',
help="""Only return read group sets for which a substring of the
name matches this string.""")
base.PAGE_SIZE_FLAG.SetDefault(parser, 128)
def Collection(self):
return 'genomics.readGroupSets'
def Run(self, args):
"""Run 'readgroupsets list'.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
Returns:
The list of read group sets matching the given dataset ids.
Raises:
HttpException: An http error response was received while executing api
request.
"""
apitools_client = genomics_util.GetGenomicsClient()
messages = genomics_util.GetGenomicsMessages()
req_class = messages.SearchReadGroupSetsRequest
fields = genomics_util.GetQueryFields(
self.GetReferencedKeyNames(args), 'readGroupSets')
if fields:
global_params = messages.StandardQueryParameters(fields=fields)
else:
global_params = None
page_size = args.page_size
if args.limit and args.limit < page_size:
page_size = args.limit
return list_pager.YieldFromList(apitools_client.readgroupsets,
req_class(name=args.name,
datasetIds=args.dataset_ids),
method='Search',
global_params=global_params,
limit=args.limit,
batch_size_attribute='pageSize',
batch_size=page_size,
field='readGroupSets')
|
{
"content_hash": "b0a27ac08cc3beccde835f9b4bc3ef6e",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 80,
"avg_line_length": 33.54666666666667,
"alnum_prop": 0.609697933227345,
"repo_name": "KaranToor/MA450",
"id": "600728db1c46916fb4b23ca2bb5588349a5d1b02",
"size": "3112",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/.install/.backup/lib/surface/genomics/readgroupsets/list.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3162"
},
{
"name": "CSS",
"bytes": "1930"
},
{
"name": "HTML",
"bytes": "13381"
},
{
"name": "Java",
"bytes": "151442"
},
{
"name": "JavaScript",
"bytes": "4906"
},
{
"name": "Makefile",
"bytes": "1636"
},
{
"name": "Objective-C",
"bytes": "13335"
},
{
"name": "PHP",
"bytes": "9086"
},
{
"name": "Pascal",
"bytes": "62"
},
{
"name": "Python",
"bytes": "19710731"
},
{
"name": "Roff",
"bytes": "2069494"
},
{
"name": "Ruby",
"bytes": "690"
},
{
"name": "Shell",
"bytes": "32272"
},
{
"name": "Smarty",
"bytes": "4968"
},
{
"name": "SourcePawn",
"bytes": "616"
},
{
"name": "Swift",
"bytes": "14225"
}
],
"symlink_target": ""
}
|
from xml.dom import minidom
import simple_xml_utils as x
from xmlcomp import XmlComponent
import urllib
import codecs
from xmlxplode import BOM_MAP, BOM_LOOKUP
from functools import partial
import os
XInclude_NS = 'http://www.w3.org/2001/XInclude'
class EncodingWriter(object):
def __init__(self, wrapped, encoding):
self.wrapped = wrapped
self.encoding = encoding
def write(self, o):
if isinstance(o, unicode):
o = o.encode(self.encoding)
self.wrapped.write(o)
class BasicXmlComponent(XmlComponent):
def getFileName(self):
return '%s.xml' % self.getLocalName()
def getComponentClass(self, xml):
return XmlComponent
class Exploder(object):
'''
classdocs
'''
def __init__(self, source, fs):
self.fs = fs
if hasattr(source, 'open'):
f = source.open('rb')
try:
source = f.read()
finally:
f.close()
elif hasattr(source, 'read'):
source = source.read()
self.eol = "\r\n" if "\r\n" in source else "\n" if "\n" in source else ''
self.dom = self.getDom(source)
for bom, codec in BOM_MAP.iteritems():
if source.startswith(bom):
self.dom.encoding = codec.name
break
self.domImpl = minidom.getDOMImplementation()
def getDom(self, source):
return minidom.parseString(source)
@classmethod
def explode(cls, source, fs):
return cls(source, fs)._explode()
def _explode(self):
root = self.rootElement(self.dom.documentElement)
self.writeComponent(self.fs, self.fs, root, None)
def rootElement(self, elem):
return BasicXmlComponent(elem)
def writeComponent(self, parentFs, fs, comp, parentElem):
fileName = parse = None
if comp.getFileName():
fs, fileName, parse = self.writeComponentFile(fs, comp)
else:
self.writeComponentIntoParent(parentFs, fs, comp, parentElem)
return fs, fileName, parse
def writeComponentFile(self, fs, comp):
fileName = comp.getFileName()
subFs = fs.relativeFs(comp.getComponentSubPath())
parse = comp.getXIncludeParseType()
with subFs.open(fileName, 'wb') as f:
if parse == 'xml':
self.writeComponentXmlFile(subFs, comp, f)
else:
comp.writeInto(f)
return subFs, fileName, parse
def writeComponentXmlFile(self, subFs, comp, f):
namespaceURI = comp.getNamespaceURI()
name = '%s:%s' % (comp.mapNamespace(namespaceURI), comp.getLocalName()) if namespaceURI else comp.getLocalName()
dom = self.domImpl.createDocument(namespaceURI, name, None)
elem = dom.documentElement
x.makeNamespacePrefix(elem, namespaceURI, prefered_prefix=comp.mapNamespace(namespaceURI))
comp.writeInto(elem)
self.writeComponents(subFs, subFs, comp, elem)
if self.dom.encoding:
f.write(BOM_LOOKUP[codecs.lookup(self.dom.encoding)])
f = EncodingWriter(f, self.dom.encoding)
self.writexml(dom, f, encoding=self.dom.encoding)
def writexml(self, dom, f, encoding):
dom.writexml(f, encoding=encoding)
def writeComponentIntoParent(self, parentFs, fs, comp, parentElem):
if comp.getLocalName():
elem = self.writeComponentElement(comp, parentElem)
else:
elem = parentElem
comp.writeInto(elem)
self.writeComponents(parentFs, fs, comp, elem)
def writeComponentElement(self, comp, elem):
dom = elem.ownerDocument
namespaceURI = comp.getNamespaceURI()
prefix = x.makeNamespacePrefix(elem, namespaceURI, prefered_prefix=comp.mapNamespace(namespaceURI))
subElem = dom.createElementNS(comp.getNamespaceURI(), '%s:%s' % (prefix, comp.getLocalName()) if prefix else comp.getLocalName())
elem.appendChild(subElem)
comp.writeInto(subElem)
return subElem
def writeComponents(self, parentFs, fs, comp, parentElem):
compFs = fs.relativeFs(comp.getComponentsSubPath())
for subComp in comp.getComponents():
subFs, subName, parse = self.writeComponent(parentFs, compFs, subComp, parentElem)
if subName and parse:
subName = subFs.getRelativePathFrom(parentFs, subName)
xi = x.makeNamespacePrefix(parentElem, XInclude_NS, prefered_prefix='xi')
elem = parentElem.ownerDocument.createElementNS(XInclude_NS, '%s:include' % xi)
elem.setAttributeNS(XInclude_NS, '%s:href' % xi, urllib.pathname2url(subName))
elem.setAttributeNS(XInclude_NS, '%s:parse' % xi, parse)
parentElem.appendChild(elem)
|
{
"content_hash": "e193031a7df4619b7246175cab01f946",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 137,
"avg_line_length": 36.372262773722625,
"alnum_prop": 0.6094722054986955,
"repo_name": "LeonPoon/XMLExplode",
"id": "0fbfaeee71bfdc179c0e0dc427095026b42ff0ea",
"size": "5585",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/xmlxplode/xploder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "67590"
}
],
"symlink_target": ""
}
|
import random
import logging
import os
from slackclient import SlackClient
import json
logger = logging.getLogger(__name__)
outputs = []
crontable = []
titles = ["Superior Person",
"Dear Leader",
"Respected Leader",
"Wise Leader",
"Brilliant Leader",
"Unique Leader",
"Dear Leader, who is a perfect incarnation of the appearance that a leader should have",
"Commander-in-Chief",
"Great Leader",
"Father of the People",
"Sun of the Communist Future",
"Guiding Sun Ray",
"Leader of the Revolutionary Armed Forces",
"Guarantee of the Fatherland's Unification",
"Symbol of the Fatherland's Unification",
"Fate of the Nation",
"Beloved Father",
"Leader of the Party, the country, and the Army",
"Leader",
"General",
"Great Leader of our Party and of our Nation",
"Great General",
"Beloved and Respected General",
"Great Leader",
"Beloved and Respected Leader",
"Ever-Victorious, Iron-Willed Commander",
"Sun of Socialism",
"Sun of the Nation",
"The Great Sun of Life",
"Great Sun of The Nation",
"Father of the Nation",
"World Leader of The 21st Century",
"Peerless Leader",
"Bright Sun of the 21st Century",
"Great Sun of the 21st Century",
"Leader of the 21st Century",
"Amazing Politician",
"Great Man, Who Descended From Heaven",
"Glorious General, Who Descended From Heaven",
"Supreme Leader of the Nation",
"Bright Sun of Juche",
"Leader of the Party and the People",
"Great Marshal",
"Invincible and Triumphant General",
"Dear Father",
"Guiding Star of the 21st Century",
"Great Man, Who Is a Man of Deeds",
"Great Defender",
"Savior",
"Mastermind of the Revolution",
"Highest Incarnation of the Revolutionary Comradeship",
"His Excellency",
"Eternal General Secretary of the Party"]
token = os.environ["SLACK_TOKEN"]
slack_client = SlackClient(token)
result = slack_client.api_call('channels.list')
general_channel_id = None
if result:
result = result.decode("utf-8")
result = json.loads(result)
for channel in result['channels']:
if channel['name'].lower() == "general":
general_channel_id = channel['id']
def process_message(data):
if data['type'] == 'message' and 'channel' in data and data['channel'] == general_channel_id:
if 'bob' in data['text'] or '@U08CRJ648' in data['text']:
title_found = False
for title in titles:
if title in data['text']:
title_found = True
break
if not title_found:
title = random.choice(titles)
message = "Hey <@{}>! Show some respect that's {} @bob to you!".format(data['user'], title)
logger.info(message)
outputs.append([data["channel"], message])
logger.info(data)
|
{
"content_hash": "0eb8e81f13d08206ff0a0bcd72b55fdd",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 107,
"avg_line_length": 35.54945054945055,
"alnum_prop": 0.5672333848531684,
"repo_name": "jamestenglish/PySlackRespectBot",
"id": "c5d0a1599d9341e5d6fe2f6334e5776e69b5a6ce",
"size": "3235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "respectbot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3235"
}
],
"symlink_target": ""
}
|
"""
Class for multi-threaded compression tool
"""
__author__ = 'sergey'
from time import sleep, time
from .base import BaseCompressTool, Task, Result
from threading import Thread
from queue import Queue
from multiprocessing import cpu_count
from dedupsqlfs.lib import constants
class MultiThreadCompressTool(BaseCompressTool):
_threads = None
"""
@ivar _threads: list of Thread objects
@type _threads: list[ Thread,.. ]
"""
_np = 0
_np_limit = 0
_task_queues = None
_result_queue = None
def checkCpuLimit(self):
if self.getOption("cpu_limit"):
self._np_limit = int(self.getOption("cpu_limit"))
self._np = cpu_count()
if self._np_limit > 0:
if self._np > self._np_limit:
self._np = self._np_limit
return self._np
def init(self, logger):
super().init(logger)
self._threads = []
self._task_queues = []
self._np = self.checkCpuLimit()
self._result_queue = Queue()
for n in range(self._np):
tq = Queue()
self._task_queues.append(tq)
p = Thread(target=self._worker, name="Compressor-%s" % n, args=(tq, self._result_queue,))
p.start()
self._threads.append(p)
return self
def stop(self):
count = 50
alive = True
while alive:
for n in range(self._np):
tq = self._task_queues[ n ]
tq.put_nowait("stop")
sleep(0.1)
alive = False
for n in range(self._np):
t = self._threads[n]
"""
@type t: StoppableThread
"""
if t.is_alive():
alive = True
count -= 1
if count <= 0:
break
for n in range(self._np):
t = self._threads[n]
"""
@type t: Thread
"""
if t.is_alive():
t._stop()
return self
def _worker(self, in_queue, out_queue):
"""
@param in_queue: {multiprocessing.JoinableQueue}
@param out_queue: {multiprocessing.JoinableQueue}
@var task: Task
@return:
"""
sleep_wait = 0.01
while True:
try:
task = in_queue.get_nowait()
except:
task = None
if task is None:
sleep(sleep_wait)
continue
if type(task) is float:
sleep_wait = task
in_queue.task_done()
sleep(sleep_wait)
continue
if type(task) is str and task == "stop":
in_queue.task_done()
break
if type(task) is Task:
result = Result()
result.cdata, result.method = self._compressData(task.data)
result.key = task.key
out_queue.put_nowait(result)
in_queue.task_done()
return
def compressData(self, dataToCompress):
"""
Compress data and returns back
@param dataToCompress: dict { hash id: bytes data }
@return dict { hash id: (compressed data (bytes), compresion method (string) ) }
"""
isNoneOnly = not self._methods or (len(self._methods) == 1 and constants.COMPRESSION_TYPE_NONE in self._methods)
if isNoneOnly:
for hash_id, item in super().compressData(dataToCompress):
yield hash_id, item
return
start_time = time()
nkeys = len(dataToCompress.keys())
for n in range(self._np):
tq = self._task_queues[n]
tq.put_nowait(0.001)
i = 0
for key, data in dataToCompress.items():
task = Task()
task.key = key
task.data = data
nq = i % self._np
tq = self._task_queues[ nq ]
tq.put_nowait(task)
i += 1
gotKeys = 0
while gotKeys < nkeys:
try:
res = self._result_queue.get_nowait()
except:
res = None
if res is None:
sleep(0.001)
continue
if type(res) is Result:
self._result_queue.task_done()
yield res.key, (res.cdata, res.method,)
gotKeys += 1
for n in range(self._np):
tq = self._task_queues[n]
tq.put_nowait(0.01)
self.time_spent_compressing = time() - start_time
return
pass
|
{
"content_hash": "a5fb9d197ba6f7943180e58639e2349d",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 120,
"avg_line_length": 24.804232804232804,
"alnum_prop": 0.48293515358361777,
"repo_name": "sergey-dryabzhinsky/dedupsqlfs",
"id": "942a92c7b67fd472883defd5fea6811f25f3174a",
"size": "4711",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dedupsqlfs/fuse/compress/mt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5509796"
},
{
"name": "C++",
"bytes": "33360"
},
{
"name": "Cython",
"bytes": "107356"
},
{
"name": "Python",
"bytes": "1042676"
},
{
"name": "Shell",
"bytes": "1480"
}
],
"symlink_target": ""
}
|
import sys
import os
import shlex
import sphinx_rtd_theme
# Mock to make docs buildable on readthedocs.org
try:
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['vcf', 'requests']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
except ImportError:
pass
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
from version import __version__
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
]
intersphinx_mapping = {'python': ('http://docs.python.org/2.7', None)}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Genestack Tasks API'
copyright = u'2017, Genestack'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/logo.svg'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'GenestackTaskLibrarydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'GenestackTaskLibrary.tex', u'Genestack Task Library Documentation',
u'Genestack', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'genestacktasklibrary', u'Genestack Task Library Documentation',
['Genestack Limited'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'GenestackTaskLibrary', u'Genestack Task Library Documentation',
'Genestack Limited', 'GenestackTaskLibrary', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
autoclass_content = 'both'
from genestack import File, StringMapFile
File.__module__ = 'genestack'
StringMapFile.__module__ = 'genestack'
|
{
"content_hash": "aa791c76d4c9feefb86346af319af9c8",
"timestamp": "",
"source": "github",
"line_count": 312,
"max_line_length": 83,
"avg_line_length": 31.59294871794872,
"alnum_prop": 0.7051841331033784,
"repo_name": "genestack/task-library",
"id": "8c07458b4fcc344491c53512cfcfa1be98a9935a",
"size": "10293",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "252859"
}
],
"symlink_target": ""
}
|
import urllib
from pytube import YouTube
import os
def downloadYt(url, path):
if os.path.exists(path):
os.remove(path)
folder = path[0: path.rfind("/")]
fname = path[path.rfind("/") + 1: path.rfind(".")]
yt = YouTube(url)
video = yt.filter('mp4')[-1] # highest res
yt.set_filename(fname)
video.download(folder)
if __name__ == '__main__':
downloadYt('https://www.youtube.com/watch?v=LQVDJtfpQU0', 'videos/test.mp4')
|
{
"content_hash": "783f82cdeac59db3ed685bc67e140eb2",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 80,
"avg_line_length": 24.263157894736842,
"alnum_prop": 0.6182212581344902,
"repo_name": "Jspsun/HackTheNorth2017",
"id": "1b9c439ec02c0bb8db6c14dd5e9040893ea8e3d3",
"size": "461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/VideoParse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "87"
},
{
"name": "HTML",
"bytes": "1409"
},
{
"name": "JavaScript",
"bytes": "14093"
},
{
"name": "Python",
"bytes": "18967"
}
],
"symlink_target": ""
}
|
import os
import vcr
absolute_cassette_lib_path = os.path.join(os.path.dirname(__file__))
vcr_explicit_path = vcr.VCR(
cassette_library_dir=absolute_cassette_lib_path,
)
|
{
"content_hash": "a92b5fc63f4aee360df3350bda740d27",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 68,
"avg_line_length": 19.666666666666668,
"alnum_prop": 0.7288135593220338,
"repo_name": "geometalab/osmaxx-frontend",
"id": "1476a845e21e3129b92faae34abe00cb4b44e1b3",
"size": "177",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "tests/test_helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "26077"
},
{
"name": "HTML",
"bytes": "22722"
},
{
"name": "JavaScript",
"bytes": "271988"
},
{
"name": "Python",
"bytes": "194135"
},
{
"name": "Shell",
"bytes": "823"
}
],
"symlink_target": ""
}
|
import sys
from PyQt5.QtWidgets import QListWidgetItem
from Commands.CommandType import CommandType
class CommandItem(QListWidgetItem):
name = None
delay = 0
code = None
description = None
text = None
def __init__(self, delay = 0, commandType = None, cmdParams = []):
if (not isinstance(delay, int)) or (delay < 0):
raise TypeError("CommandItem: delay must be a positive integer.")
self.delay = delay
if commandType is None:
self.text = "Delay (" + str(self.delay) + ")"
self.code = "DELAY"
self.description = "Delay of " + str(self.delay) + " milliseconds"
else:
if not isinstance(commandType, CommandType):
raise TypeError("CommandItem: commandType should be an instance of CommandType class.")
self.name = commandType.name
text = self.name + "("
for p in cmdParams:
text += str(p) + ", "
self.text = text[:-2] + ")"
currentParam = 0
cmdCode = commandType.code
cmdCodeCopy = commandType.code
#iterate over copy to avoid out of range error - we're modifying cmdCode's length inside the loop
for i in range(len(cmdCodeCopy)):
if cmdCodeCopy[i] is not '%':
continue
if cmdCodeCopy[i+1] == '1':
cmdCode = cmdCode.replace("%1", str(chr(cmdParams[currentParam])), 1)
elif cmdCodeCopy[i+1] == '2':
cmdCode = cmdCode.replace("%2", str(chr(int(cmdParams[currentParam] / 255)) + chr(cmdParams[currentParam] % 255)), 1)
currentParam += 1
self.code = cmdCode.encode('ascii', 'ignore')
self.description = commandType.description
QListWidgetItem.__init__(self)
self.setText(self.text)
self.setToolTip(str(self.code).replace("b", "", 1) + " - " + self.description)
|
{
"content_hash": "51f2ae7d7c343da08f6e6c25acb299d6",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 122,
"avg_line_length": 35.42553191489362,
"alnum_prop": 0.6720720720720721,
"repo_name": "akkenoth/BTSerial",
"id": "e6fbea190b7b7a2f7f70b9ae49510ee985168968",
"size": "1665",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Commands/CommandItem.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "79"
},
{
"name": "Python",
"bytes": "67542"
},
{
"name": "Shell",
"bytes": "558"
}
],
"symlink_target": ""
}
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:6668")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:6668")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Errorcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Errorcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
{
"content_hash": "31a90e5c4296bbc3fe510183fca472a6",
"timestamp": "",
"source": "github",
"line_count": 324,
"max_line_length": 79,
"avg_line_length": 24.19753086419753,
"alnum_prop": 0.6618622448979592,
"repo_name": "error-dev/ErrorCoin",
"id": "b79e357b7d2e938c6fbd7b8b841bc2163ff1adb8",
"size": "7840",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/bitrpc/bitrpc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "32009"
},
{
"name": "C++",
"bytes": "2614130"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "18284"
},
{
"name": "HTML",
"bytes": "50615"
},
{
"name": "Makefile",
"bytes": "19681"
},
{
"name": "NSIS",
"bytes": "5970"
},
{
"name": "Objective-C",
"bytes": "1052"
},
{
"name": "Objective-C++",
"bytes": "5864"
},
{
"name": "Python",
"bytes": "69719"
},
{
"name": "QMake",
"bytes": "15215"
},
{
"name": "Shell",
"bytes": "13173"
}
],
"symlink_target": ""
}
|
import logging
import subprocess
import Adafruit_GPIO.Platform as Platform
def reverseByteOrder(data):
"""Reverses the byte order of an int (16-bit) or long (32-bit) value."""
# Courtesy Vishal Sapre
byteCount = len(hex(data)[2:].replace('L','')[::2])
val = 0
for i in range(byteCount):
val = (val << 8) | (data & 0xff)
data >>= 8
return val
def get_default_bus():
"""Return the default bus number based on the device platform. For a
Raspberry Pi either bus 0 or 1 (based on the Pi revision) will be returned.
For a Beaglebone Black the first user accessible bus, 1, will be returned.
"""
plat = Platform.platform_detect()
if plat == Platform.RASPBERRY_PI:
if Platform.pi_revision() == 1:
# Revision 1 Pi uses I2C bus 0.
return 0
else:
# Revision 2 Pi uses I2C bus 1.
return 1
elif plat == Platform.BEAGLEBONE_BLACK:
# Beaglebone Black has multiple I2C buses, default to 1 (P9_19 and P9_20).
return 1
else:
raise RuntimeError('Could not determine default I2C bus for platform.')
def get_i2c_device(address, busnum=None, i2c_interface=None, **kwargs):
"""Return an I2C device for the specified address and on the specified bus.
If busnum isn't specified, the default I2C bus for the platform will attempt
to be detected.
"""
if busnum is None:
busnum = get_default_bus()
return Device(address, busnum, i2c_interface, **kwargs)
def require_repeated_start():
"""Enable repeated start conditions for I2C register reads. This is the
normal behavior for I2C, however on some platforms like the Raspberry Pi
there are bugs which disable repeated starts unless explicitly enabled with
this function. See this thread for more details:
http://www.raspberrypi.org/forums/viewtopic.php?f=44&t=15840
"""
plat = Platform.platform_detect()
if plat == Platform.RASPBERRY_PI:
# On the Raspberry Pi there is a bug where register reads don't send a
# repeated start condition like the kernel smbus I2C driver functions
# define. As a workaround this bit in the BCM2708 driver sysfs tree can
# be changed to enable I2C repeated starts.
subprocess.check_call('chmod 666 /sys/module/i2c_bcm2708/parameters/combined', shell=True)
subprocess.check_call('echo -n 1 > /sys/module/i2c_bcm2708/parameters/combined', shell=True)
# Other platforms are a no-op because they (presumably) have the correct
# behavior and send repeated starts.
class Device(object):
"""Class for communicating with an I2C device using the adafruit-pureio pure
python smbus library, or other smbus compatible I2C interface. Allows reading
and writing 8-bit, 16-bit, and byte array values to registers
on the device."""
def __init__(self, address, busnum, i2c_interface=None):
"""Create an instance of the I2C device at the specified address on the
specified I2C bus number."""
self._address = address
if i2c_interface is None:
# Use pure python I2C interface if none is specified.
import Adafruit_PureIO.smbus
self._bus = Adafruit_PureIO.smbus.SMBus(busnum)
else:
# Otherwise use the provided class to create an smbus interface.
self._bus = i2c_interface(busnum)
self._logger = logging.getLogger('Adafruit_I2C.Device.Bus.{0}.Address.{1:#0X}' \
.format(busnum, address))
def writeRaw8(self, value):
"""Write an 8-bit value on the bus (without register)."""
value = value & 0xFF
self._bus.write_byte(self._address, value)
self._logger.debug("Wrote 0x%02X",
value)
def write8(self, register, value):
"""Write an 8-bit value to the specified register."""
value = value & 0xFF
self._bus.write_byte_data(self._address, register, value)
self._logger.debug("Wrote 0x%02X to register 0x%02X",
value, register)
def write16(self, register, value):
"""Write a 16-bit value to the specified register."""
value = value & 0xFFFF
self._bus.write_word_data(self._address, register, value)
self._logger.debug("Wrote 0x%04X to register pair 0x%02X, 0x%02X",
value, register, register+1)
def writeList(self, register, data):
"""Write bytes to the specified register."""
self._bus.write_i2c_block_data(self._address, register, data)
self._logger.debug("Wrote to register 0x%02X: %s",
register, data)
def readList(self, register, length):
"""Read a length number of bytes from the specified register. Results
will be returned as a bytearray."""
results = self._bus.read_i2c_block_data(self._address, register, length)
self._logger.debug("Read the following from register 0x%02X: %s",
register, results)
return results
def readRaw8(self):
"""Read an 8-bit value on the bus (without register)."""
result = self._bus.read_byte(self._address) & 0xFF
self._logger.debug("Read 0x%02X",
result)
return result
def readU8(self, register):
"""Read an unsigned byte from the specified register."""
result = self._bus.read_byte_data(self._address, register) & 0xFF
self._logger.debug("Read 0x%02X from register 0x%02X",
result, register)
return result
def readS8(self, register):
"""Read a signed byte from the specified register."""
result = self.readU8(register)
if result > 127:
result -= 256
return result
def readU16(self, register, little_endian=True):
"""Read an unsigned 16-bit value from the specified register, with the
specified endianness (default little endian, or least significant byte
first)."""
result = self._bus.read_word_data(self._address,register) & 0xFFFF
self._logger.debug("Read 0x%04X from register pair 0x%02X, 0x%02X",
result, register, register+1)
# Swap bytes if using big endian because read_word_data assumes little
# endian on ARM (little endian) systems.
if not little_endian:
result = ((result << 8) & 0xFF00) + (result >> 8)
return result
def readS16(self, register, little_endian=True):
"""Read a signed 16-bit value from the specified register, with the
specified endianness (default little endian, or least significant byte
first)."""
result = self.readU16(register, little_endian)
if result > 32767:
result -= 65536
return result
def readU16LE(self, register):
"""Read an unsigned 16-bit value from the specified register, in little
endian byte order."""
return self.readU16(register, little_endian=True)
def readU16BE(self, register):
"""Read an unsigned 16-bit value from the specified register, in big
endian byte order."""
return self.readU16(register, little_endian=False)
def readS16LE(self, register):
"""Read a signed 16-bit value from the specified register, in little
endian byte order."""
return self.readS16(register, little_endian=True)
def readS16BE(self, register):
"""Read a signed 16-bit value from the specified register, in big
endian byte order."""
return self.readS16(register, little_endian=False)
|
{
"content_hash": "13c26db64a531840ad55e13449a45582",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 100,
"avg_line_length": 42.95530726256983,
"alnum_prop": 0.635063077123163,
"repo_name": "RandomGamer342/TTM4115-plantsensor",
"id": "e63a2d1a133d7c6b2f2a1928fdce89db8f4f48b1",
"size": "8863",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "Adafruit_GPIO/I2C.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "254565"
}
],
"symlink_target": ""
}
|
import sys
import socket
import argparse
import cryptoconfig
import peersockets
def pushtx(crypto,num_peers_to_send,tx_list,addresses):
crypto=crypto.lower()
if crypto not in cryptoconfig.SUPPORTED_CRYPTOS:
raise Exception("Crypto {} not supported, suppored cryptos are {}".format(crypto,cryptoconfig.SUPPORTED_CRYPTOS))
handler=peersockets.PeerSocketsHandler(crypto,tx_list,peer_list=addresses,
num_tx_broadcasts=num_peers_to_send)
while 1:
handler.run()
if all(out[1]>=num_peers_to_send for out in handler.tx_broadcast_list):
print("FINISHED")
return
def main():
parser = argparse.ArgumentParser(description=
'Command line interface for pushtx')
parser.add_argument('crypto_name', metavar='Crypto_Name', type=str,
help='Name of the cryptocurrency (Supports: {})'.format(cryptoconfig.SUPPORTED_CRYPTOS))
parser.add_argument('-num_peers', metavar='P', type=int,
default=20,
help='Number of peers P to broadcast to')
parser.add_argument('txs',metavar='Hex_transactions',type=str,
nargs='+',
help='List of hex transactions to broadcast')
parser.add_argument('-addresses',metavar='Address',type=str,
nargs='?',help='Comma seperated list of addresses to connect to (do not use white space to seperate addresses)')
args=parser.parse_args()
crypto = args.crypto_name.lower()
num_peers_to_send = args.num_peers
tx_list = args.txs
addresses = []
if args.addresses != None:
addresses = args.addresses.split(',')
pushtx(crypto,num_peers_to_send,tx_list,addresses)
if __name__ == "__main__":
main()
|
{
"content_hash": "d722409be228ebff3f0a81a932079569",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 132,
"avg_line_length": 39.32608695652174,
"alnum_prop": 0.6274184632393588,
"repo_name": "kaykurokawa/pushtx",
"id": "5c52ec1988c40514c1becf0c9b2fec4eecdc1a2a",
"size": "1919",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pushtx.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31846"
}
],
"symlink_target": ""
}
|
"""Tokenization classes for CLIP."""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from transformers.models.bert.tokenization_bert import BasicTokenizer
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"openai/clip-vit-base-patch32": "https://huggingface.co/openai/clip-vit-base-patch32/resolve/main/vocab.json",
},
"merges_file": {
"openai/clip-vit-base-patch32": "https://huggingface.co/openai/clip-vit-base-patch32/resolve/main/merges.txt",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"openai/clip-vit-base-patch32": 77,
}
PRETRAINED_INIT_CONFIGURATION = {
"openai/clip-vit-base-patch32": {"do_lower_case": True},
}
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
characters the bpe code barfs on.
The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
decent coverage. This is a signficant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
tables between utf-8 bytes and unicode strings.
"""
bs = (
list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
)
cs = bs[:]
n = 0
for b in range(2 ** 8):
if b not in bs:
bs.append(b)
cs.append(2 ** 8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""
Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def whitespace_clean(text):
text = re.sub(r"\s+", " ", text)
text = text.strip()
return text
class CLIPTokenizer(PreTrainedTokenizer):
"""
Construct a CLIP tokenizer. Based on byte-level Byte-Pair-Encoding.
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
be encoded differently whether it is at the beginning of the sentence (without space) or not:
You can get around that behavior by passing ``add_prefix_space=True`` when instantiating this tokenizer or when you
call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
.. note::
When used with ``is_split_into_words=True``, this tokenizer will add a space before each word (even the first
one).
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods.
Users should refer to this superclass for more information regarding those methods.
Args:
vocab_file (:obj:`str`):
Path to the vocabulary file.
merges_file (:obj:`str`):
Path to the merges file.
errors (:obj:`str`, `optional`, defaults to :obj:`"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See `bytes.decode
<https://docs.python.org/3/library/stdtypes.html#bytes.decode>`__ for more information.
unk_token (:obj:`str`, `optional`, defaults to :obj:`<|endoftext|>`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
bos_token (:obj:`str`, `optional`, defaults to :obj:`<|endoftext|>`):
The beginning of sequence token.
eos_token (:obj:`str`, `optional`, defaults to :obj:`<|endoftext|>`):
The end of sequence token.
add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (CLIP tokenizer detect beginning of words by the preceding space).
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
vocab_file,
merges_file,
errors="replace",
unk_token="<|endoftext|>",
bos_token="<|startoftext|>",
eos_token="<|endoftext|>",
pad_token="<|endoftext|>", # hack to enable padding
add_prefix_space=False,
do_lower_case=True,
**kwargs
):
bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
super().__init__(
errors=errors,
unk_token=unk_token,
bos_token=bos_token,
eos_token=eos_token,
pad_token=pad_token,
add_prefix_space=add_prefix_space,
do_lower_case=do_lower_case,
**kwargs,
)
try:
import ftfy
self.fix_text = ftfy.fix_text
except ImportError:
logger.warning("ftfy or spacy is not installed using BERT BasicTokenizer instead of ftfy.")
self.nlp = BasicTokenizer(do_lower_case=True)
self.fix_text = None
with open(vocab_file, encoding="utf-8") as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for k, v in self.encoder.items()}
self.errors = errors # how to handle errors in decoding
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
with open(merges_file, encoding="utf-8") as merges_handle:
bpe_merges = merges_handle.read().split("\n")[1 : 49152 - 256 - 2 + 1]
bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {"<|startoftext|>": "<|startoftext|>", "<|endoftext|>": "<|endoftext|>"}
self.add_prefix_space = add_prefix_space
self.pat = re.compile(
r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""",
re.IGNORECASE,
)
# Very ugly hack to enable padding
@property
def pad_token_id(self) -> Optional[int]:
"""
:obj:`Optional[int]`: Id of the padding token in the vocabulary. Returns :obj:`None` if the token has not been
set.
"""
return 0
@property
def vocab_size(self):
return len(self.encoder)
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A CLIP sequence has the following format:
- single sequence: ``<|startoftext|> X <|endoftext|>``
Pairs of sequences are not the expected use case, but they will be handled without a separator.
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.bos_token_id] + token_ids_0 + [self.eos_token_id]
return [self.bos_token_id] + token_ids_0 + token_ids_1 + [self.eos_token_id]
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` method.
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
:obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
if token_ids_1 is None:
return [1] + ([0] * len(token_ids_0)) + [1]
return [1] + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + [1]
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + (token[-1] + "</w>",)
pairs = get_pairs(word)
if not pairs:
return token + "</w>"
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = " ".join(word)
self.cache[token] = word
return word
def _tokenize(self, text):
"""Tokenize a string."""
bpe_tokens = []
if self.fix_text is None:
text = " ".join(self.nlp.tokenize(text))
else:
text = whitespace_clean(self.fix_text(text)).lower()
for token in re.findall(self.pat, text):
token = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")
) # Maps all our bytes to unicode strings, avoiding controle tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
return bpe_tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
text = "".join(tokens)
text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors).replace("</w>", " ")
return text
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
return
vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
merge_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
)
with open(vocab_file, "w", encoding="utf-8") as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
index = 0
with open(merge_file, "w", encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
"Saving vocabulary to {}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!".format(merge_file)
)
index = token_index
writer.write(" ".join(bpe_tokens) + "\n")
index += 1
return vocab_file, merge_file
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
if is_split_into_words or add_prefix_space:
text = " " + text
return (text, kwargs)
|
{
"content_hash": "1dd97a4f9d50aeeb2e01920201ff0d4a",
"timestamp": "",
"source": "github",
"line_count": 357,
"max_line_length": 119,
"avg_line_length": 39.299719887955185,
"alnum_prop": 0.5869565217391305,
"repo_name": "huggingface/pytorch-transformers",
"id": "39eed99e3ac83211ec520a847f001be8f9746249",
"size": "14668",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/transformers/models/clip/tokenization_clip.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "194"
},
{
"name": "Jupyter Notebook",
"bytes": "535623"
},
{
"name": "Python",
"bytes": "897445"
}
],
"symlink_target": ""
}
|
from flask import Blueprint, render_template, session, redirect, url_for, \
request, flash, g, jsonify, abort
from flask.ext.openid import COMMON_PROVIDERS
from flask_website import oid
from flask_website.search import search as perform_search
from flask_website.utils import requires_login, request_wants_json
from flask_website.database import db_session, User
from flask_website.listings.releases import releases
mod = Blueprint('general', __name__)
@mod.route('/')
def index():
if request_wants_json():
return jsonify(releases=[r.to_json() for r in releases])
return render_template('general/index.html',
latest_release=releases[-1])
@mod.route('/search/')
def search():
q = request.args.get('q') or ''
page = request.args.get('page', type=int) or 1
results = None
if q:
results = perform_search(q, page=page)
if results is None:
abort(404)
return render_template('general/search.html', results=results, q=q)
@mod.route('/logout/')
def logout():
if 'openid' in session:
flash(u'Logged out')
del session['openid']
return redirect(request.referrer or url_for('general.index'))
@mod.route('/login/', methods=['GET', 'POST'])
@oid.loginhandler
def login():
if g.user is not None:
return redirect(url_for('general.index'))
if 'cancel' in request.form:
flash(u'Cancelled. The OpenID was not changed.')
return redirect(oid.get_next_url())
openid = request.values.get('openid')
if not openid:
openid = COMMON_PROVIDERS.get(request.args.get('provider'))
if openid:
return oid.try_login(openid, ask_for=['fullname', 'nickname'])
error = oid.fetch_error()
if error:
flash(u'Error: ' + error)
return render_template('general/login.html', next=oid.get_next_url())
@mod.route('/first-login/', methods=['GET', 'POST'])
def first_login():
if g.user is not None or 'openid' not in session:
return redirect(url_for('.login'))
if request.method == 'POST':
if 'cancel' in request.form:
del session['openid']
flash(u'Login was aborted')
return redirect(url_for('general.login'))
db_session.add(User(request.form['name'], session['openid']))
db_session.commit()
flash(u'Successfully created profile and logged in')
return redirect(oid.get_next_url())
return render_template('general/first_login.html',
next=oid.get_next_url(),
openid=session['openid'])
@mod.route('/profile/', methods=['GET', 'POST'])
@requires_login
def profile():
name = g.user.name
if request.method == 'POST':
name = request.form['name'].strip()
if not name:
flash(u'Error: a name is required')
else:
g.user.name = name
db_session.commit()
flash(u'User profile updated')
return redirect(url_for('.index'))
return render_template('general/profile.html', name=name)
@mod.route('/profile/change-openid/', methods=['GET', 'POST'])
@requires_login
@oid.loginhandler
def change_openid():
if request.method == 'POST':
if 'cancel' in request.form:
flash(u'Cancelled. The OpenID was not changed.')
return redirect(oid.get_next_url())
openid = request.values.get('openid')
if not openid:
openid = COMMON_PROVIDERS.get(request.args.get('provider'))
if openid:
return oid.try_login(openid)
error = oid.fetch_error()
if error:
flash(u'Error: ' + error)
return render_template('general/change_openid.html',
next=oid.get_next_url())
@oid.after_login
def create_or_login(resp):
session['openid'] = resp.identity_url
user = g.user or User.query.filter_by(openid=resp.identity_url).first()
if user is None:
return redirect(url_for('.first_login', next=oid.get_next_url(),
name=resp.fullname or resp.nickname))
if user.openid != resp.identity_url:
user.openid = resp.identity_url
db_session.commit()
flash(u'OpenID identity changed')
else:
flash(u'Successfully signed in')
return redirect(oid.get_next_url())
|
{
"content_hash": "c880a26684152a0e0d3fb99e34d37084",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 75,
"avg_line_length": 33.960629921259844,
"alnum_prop": 0.62068166009738,
"repo_name": "LiHaoGit/flask-website",
"id": "257900bc33397f9965f25233146413aedd0072c1",
"size": "4313",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "flask_website/views/general.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "11505"
},
{
"name": "HTML",
"bytes": "35333"
},
{
"name": "JavaScript",
"bytes": "1015"
},
{
"name": "Makefile",
"bytes": "142"
},
{
"name": "Python",
"bytes": "77695"
}
],
"symlink_target": ""
}
|
import sys
import os
from basic.common import ROOT_PATH,readRankingResults
from basic.annotationtable import readConcepts,readAnnotationsFrom,writeAnnotationsTo,writeConceptsTo
if __name__ == '__main__':
rootpath = ROOT_PATH
collection = 'geoflickr1m'
collection = 'flickr1m'
#collection = 'web13train'
#collection = 'tentagv10dev'
collection = sys.argv[1] #'msr2013train'
conceptSetName = 'concepts88'
conceptSetName = 'biconcepts15'
conceptSetName = 'concepts11'
conceptSetName = 'conceptsweb13'
conceptSetName = 'conceptstentagv10dev'
conceptSetName = 'concepts100'
#conceptSetName = 'conceptsweb15'
T = 10
numPos = 100
numNeg = numPos * 10
sourceAnnotationName = '%s.rand%d.0.randco%d.' % (conceptSetName, numPos, numNeg) + '%d.txt'
sourceAnnotationName = '%s.rand%d.0.randwn%d.' % (conceptSetName, numPos, numNeg) + '%d.txt'
#posName = 'dsift-1000nn' + str(numPos)
#tagrelMethod = 'dsiftpca225,knn,1000,lemm'
posName = 'borda-cos-dsiftpca' + str(numPos)
posName = 'borda-fcsidf-multipca' + str(numPos)
tagrelMethod = 'borda-cos-dsiftpca'
tagrelMethod = 'borda-fcsidf-multipca'
removeBatchTagged = 0
if removeBatchTagged:
posName = 'multipcanobt' + str(numPos)
tagrelMethod = 'tentagv10dev/multipca,knn,1000,lemm/nobt'
else:
posName = 'multipca' + str(numPos)
tagrelMethod = 'tentagv10dev/multipca,knn,1000,lemm'
#sourceAnnotationName = '%s.rand%d.0.randwn%d.' % (conceptSetName, numPos, numNeg) + '%d.txt'
#posName = 'rgbsift' + str(numPos)
#tagrelMethod = 'web13train/rgbsift,knn,1000,w'
#posName = 'txt' + str(numPos)
#tagrelMethod = 'textual'
posName = 'clickcount' + str(numPos)
tagrelMethod = 'clickcount'
#posName = 'ccgd' + str(numPos)
#tagrelMethod = 'flickr1m/ccgd,knn,1000'
concepts = readConcepts(collection, sourceAnnotationName%0, rootpath)
holdoutfile = os.path.join(rootpath, collection, "ImageSets", "holdout.txt")
holdoutSet = set(map(str.strip, open(holdoutfile).readlines()))
print ('%s holdout %d' % (collection,len(holdoutSet)))
for concept in concepts:
simfile = os.path.join(rootpath, collection, 'SimilarityIndex', collection, 'tagged,lemm', tagrelMethod, '%s.txt' % concept)
searchresults = readRankingResults(simfile)
searchresults = [x for x in searchresults if x[0] not in holdoutSet]
positiveSet = [x[0] for x in searchresults[:numPos]]
for t in range(T):
newAnnotationName = sourceAnnotationName % t
newAnnotationName = newAnnotationName.replace('rand%d.0'%numPos, posName)
names,labels = readAnnotationsFrom(collection,sourceAnnotationName%t,concept,rootpath)
negativeSet = [x[0] for x in zip(names,labels) if -1 == x[1]]
renamed = positiveSet + negativeSet
relabeled = [1] * len(positiveSet) + [-1] * len(negativeSet)
print ('[%s] %s +%d, -%d -> %s' % (concept,sourceAnnotationName % t,len(positiveSet),len(negativeSet),newAnnotationName))
writeAnnotationsTo(renamed, relabeled, collection, newAnnotationName, concept, rootpath)
for t in range(T):
newAnnotationName = sourceAnnotationName % t
newAnnotationName = newAnnotationName.replace('rand%d.0'%numPos, posName)
writeConceptsTo(concepts, collection, newAnnotationName, rootpath)
|
{
"content_hash": "c3645dedbea58df8364f9de77b050115",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 134,
"avg_line_length": 42.9390243902439,
"alnum_prop": 0.6614598125532519,
"repo_name": "li-xirong/jingwei",
"id": "7bd15dd297b8ed749654265aa979f785ff7194b6",
"size": "3521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "model_based/dataengine/combineAnnotations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4962"
},
{
"name": "C",
"bytes": "1553"
},
{
"name": "C++",
"bytes": "8649"
},
{
"name": "CSS",
"bytes": "376"
},
{
"name": "HTML",
"bytes": "2354"
},
{
"name": "Makefile",
"bytes": "415"
},
{
"name": "Matlab",
"bytes": "21689"
},
{
"name": "Python",
"bytes": "400259"
},
{
"name": "Shell",
"bytes": "42803"
}
],
"symlink_target": ""
}
|
"""
The client object holds the connection information, and methods of the client
connected via TCP.
"""
import Queue
import time
import logging
from channel import Channel
class TCPClient(object):
"""TCPClient object."""
def __init__(self, server, connection, address):
"""
Set up the instance variables, and create a message queue,
along with setting the socket to non blocking.
"""
super(TCPClient, self).__init__()
self.user = None
self.address = address
self.sock = connection
self.server = server
self.sock.setblocking(0)
self.message_queue = Queue.Queue()
self.connection_accepted = False
self.ready_for_ping = True
self.last_ping = int(time.time())
self.ping_random_number = 0
def ping(self, rand_num):
"""Take care of setting variables for when a client is pinged."""
self.ping_random_number = rand_num
self.ready_for_ping = False
def time_since_pinged(self):
"""Return the time elapsed since the last ping."""
return int(time.time()) - self.last_ping
def add_message(self, data):
"""Add a message to the queue."""
return self.message_queue.put(data)
def get_message(self):
"""Get the next message out from the queue."""
return self.message_queue.get_nowait()
def join_channel(self, chan):
"""
Notify the client that he has joined the channel, and supply the
client with a userlist.
"""
try:
chan = Channel(chan)
except NameError:
chan = Channel.channels[chan]
if chan not in self.user.channels:
logging.debug("Putting user '%s' in channel '%s'" % (self.user.nickname, chan.name))
chan.add_user(self.user)
self.user.add_channel(chan)
self.server.queue_message("JOINED: %s\r\n" % chan.name, self.sock)
self.server.queue_message("USERLIST %s: %s\r\n" % (chan.name, chan.userlist()), self.sock)
def leave_channel(self, channel):
"""Leave a channel by removing it from the self.channels list."""
if channel in self.user.channels:
self.user.leave_channel(channel)
def close(self):
"""Wrapper for the sockets close function."""
return self.sock.close()
|
{
"content_hash": "1fe5b5a56a2204d9c638fad21dd5c461",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 102,
"avg_line_length": 31.441558441558442,
"alnum_prop": 0.5960346964064436,
"repo_name": "martinjlowm/Voixer",
"id": "252e964480fa152ba20ed8be38e77b2709862bcf",
"size": "2467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "voixer/tcp_client.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "19985"
}
],
"symlink_target": ""
}
|
"""
Atmosphere
==========
The atmosphere module contains functions and classes related to atmospheric
acoustics and is based on :mod:`acoustics.standards.iso_9613_1_1993`.
Atmosphere class
****************
.. autoclass:: acoustics.atmosphere.Atmosphere
From ISO 9613-1 1993
********************
Constants
---------
.. autoattribute:: acoustics.standards.iso_9613_1_1993.SOUNDSPEED
.. autoattribute:: acoustics.standards.iso_9613_1_1993.REFERENCE_TEMPERATURE
.. autoattribute:: acoustics.standards.iso_9613_1_1993.REFERENCE_PRESSURE
.. autoattribute:: acoustics.standards.iso_9613_1_1993.TRIPLE_TEMPERATURE
Functions
---------
.. autofunction:: acoustics.standards.iso_9613_1_1993.soundspeed
.. autofunction:: acoustics.standards.iso_9613_1_1993.saturation_pressure
.. autofunction:: acoustics.standards.iso_9613_1_1993.molar_concentration_water_vapour
.. autofunction:: acoustics.standards.iso_9613_1_1993.relaxation_frequency_nitrogen
.. autofunction:: acoustics.standards.iso_9613_1_1993.relaxation_frequency_oxygen
.. autofunction:: acoustics.standards.iso_9613_1_1993.attenuation_coefficient
"""
from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
from acoustics.standards.iso_9613_1_1993 import *
class Atmosphere(object):
"""
Class describing atmospheric conditions.
"""
REF_TEMP = 293.15
"""Reference temperature"""
REF_PRESSURE = 101.325
"""International Standard Atmosphere in kilopascal"""
TRIPLE_TEMP = 273.16
"""Triple point isotherm temperature."""
def __init__(self,
temperature=REFERENCE_TEMPERATURE,
pressure=REFERENCE_PRESSURE,
relative_humidity=0.0,
reference_temperature=REFERENCE_TEMPERATURE,
reference_pressure=REFERENCE_PRESSURE,
triple_temperature=TRIPLE_TEMPERATURE):
"""
:param temperature: Temperature in kelvin
:param pressure: Pressure
:param relative_humidity: Relative humidity
:param reference_temperature: Reference temperature.
:param reference_pressure: Reference pressure.
:param triple_temperature: Triple temperature.
"""
self.temperature = temperature
"""Ambient temperature :math:`T`."""
self.pressure = pressure
"""Ambient pressure :math:`p_a`."""
self.relative_humidity = relative_humidity
"""Relative humidity"""
self.reference_temperature = reference_temperature
"""
Reference temperature.
"""
self.reference_pressure = reference_pressure
"""
Reference pressure.
"""
self.triple_temperature = triple_temperature
"""
Triple temperature.
"""
def __repr__(self):
return "Atmosphere{}".format(self.__str__())
def __str__(self):
return "(temperature={}, pressure={}, relative_humidity={}, " \
"reference_temperature={}, reference_pressure={}, " \
"triple_temperature={})".format(self.temperature, self.pressure,
self.relative_humidity,
self.reference_temperature,
self.reference_pressure,
self.triple_temperature)
@property
def soundspeed(self):
"""
Speed of sound :math:`c`.
The speed of sound is calculated using :func:`acoustics.standards.iso_9613_1_1993.soundspeed`.
"""
return soundspeed(self.temperature, self.reference_temperature)
@property
def saturation_pressure(self):
"""
Saturation pressure :math:`p_{sat}`.
The saturation pressure is calculated using :func:`acoustics.standards.iso_9613_1_1993.saturation_pressure`.
"""
return saturation_pressure(self.temperature, self.reference_pressure, self.triple_temperature)
@property
def molar_concentration_water_vapour(self):
"""
Molar concentration of water vapour :math:`h`.
The molar concentration of water vapour is calculated using :func:`acoustics.standards.iso_9613_1_1993.molar_concentration_water_vapour`.
"""
return molar_concentration_water_vapour(self.relative_humidity, self.saturation_pressure, self.pressure)
@property
def relaxation_frequency_nitrogen(self):
"""
Resonance frequency of nitrogen :math:`f_{r,N}`.
The resonance frequency is calculated using :func:`acoustics.standards.iso_9613_1_1993.relaxation_frequency_nitrogen`.
"""
return relaxation_frequency_nitrogen(self.pressure, self.temperature, self.molar_concentration_water_vapour, self.reference_pressure, self.reference_temperature)
@property
def relaxation_frequency_oxygen(self):
"""
Resonance frequency of oxygen :math:`f_{r,O}`.
The resonance frequency is calculated using :func:`acoustics.standards.iso_9613_1_1993.relaxation_frequency_oxygen`.
"""
return relaxation_frequency_oxygen(self.pressure, self.molar_concentration_water_vapour, self.reference_pressure)
def attenuation_coefficient(self, frequency):
"""
Attenuation coefficient :math:`\\alpha` describing atmospheric absorption in dB/m as function of ``frequency``.
:param frequency: Frequencies to be considered.
The attenuation coefficient is calculated using :func:`acoustics.standards.iso_9613_1_1993.attenuation_coefficient`.
"""
return attenuation_coefficient(self.pressure, self.temperature, self.reference_pressure, self.reference_temperature, self.relaxation_frequency_nitrogen, self.relaxation_frequency_oxygen, frequency)
def plot_attenuation_coefficient(self, frequency):
"""
Plot the attenuation coefficient :math:`\\alpha` as function of frequency and write the figure to ``filename``.
:param filename: Filename
:param frequency: Frequencies
.. note:: The attenuation coefficient is plotted in dB/km!
"""
fig = plt.figure()
ax0 = fig.add_subplot(111)
ax0.plot(frequency, self.attenuation_coefficient(frequency)*1000.0)
ax0.set_xscale('log')
ax0.set_yscale('log')
ax0.set_xlabel(r'$f$ in Hz')
ax0.set_ylabel(r'$\alpha$ in dB/km')
ax0.legend()
ax0.grid()
return fig
__all__ = ['Atmosphere', 'SOUNDSPEED', 'REFERENCE_TEMPERATURE',
'REFERENCE_TEMPERATURE', 'TRIPLE_TEMPERATURE',
'soundspeed', 'saturation_pressure',
'molar_concentration_water_vapour',
'relaxation_frequency_oxygen',
'relaxation_frequency_nitrogen',
'attenuation_coefficient'
]
|
{
"content_hash": "c2fad3c2059378f67b3044695a85249e",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 205,
"avg_line_length": 36.101010101010104,
"alnum_prop": 0.6225517627308338,
"repo_name": "giumas/python-acoustics",
"id": "06dcc59548cc327eefdfd4b372fc550f9dc396c9",
"size": "7148",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "acoustics/atmosphere.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "291963"
}
],
"symlink_target": ""
}
|
__version__='${RFR_VERSION_MAJOR}.${RFR_VERSION_MINOR}.${RFR_VERSION_RELEASE}'
__version_info__=(${RFR_VERSION_MAJOR}, ${RFR_VERSION_MINOR}, ${RFR_VERSION_RELEASE})
|
{
"content_hash": "1dcd21b493d00f533093400e44eb337b",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 85,
"avg_line_length": 82.5,
"alnum_prop": 0.6787878787878788,
"repo_name": "sfalkner/random_forest_run",
"id": "2b5894ec64c234665575543544d1e128e314d06d",
"size": "165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyrfr/pyrfr/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "713992"
},
{
"name": "CMake",
"bytes": "3733"
},
{
"name": "Python",
"bytes": "59590"
},
{
"name": "Roff",
"bytes": "1519"
},
{
"name": "Shell",
"bytes": "1448"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, division, absolute_import
from collections import defaultdict
from datetime import timedelta
import logging
import six
import socket
import struct
from time import time
import traceback
import uuid
from toolz import assoc, first
try:
import cPickle as pickle
except ImportError:
import pickle
import cloudpickle
from tornado import gen
from tornado.locks import Event
from tornado.tcpserver import TCPServer
from tornado.tcpclient import TCPClient
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream, StreamClosedError
from .compatibility import PY3, unicode, WINDOWS
from .utils import get_traceback, truncate_exception, ignoring
from . import protocol
pickle_types = [str, bytes]
with ignoring(ImportError):
import numpy as np
pickle_types.append(np.ndarray)
with ignoring(ImportError):
import pandas as pd
pickle_types.append(pd.core.generic.NDFrame)
pickle_types = tuple(pickle_types)
class RPCClosed(IOError):
pass
def dumps(x):
""" Manage between cloudpickle and pickle
1. Try pickle
2. If it is short then check if it contains __main__
3. If it is long, then first check type, then check __main__
"""
try:
result = pickle.dumps(x, protocol=pickle.HIGHEST_PROTOCOL)
if len(result) < 1000:
if b'__main__' in result:
return cloudpickle.dumps(x, protocol=pickle.HIGHEST_PROTOCOL)
else:
return result
else:
if isinstance(x, pickle_types) or b'__main__' not in result:
return result
else:
return cloudpickle.dumps(x, protocol=pickle.HIGHEST_PROTOCOL)
except:
try:
return cloudpickle.dumps(x, protocol=pickle.HIGHEST_PROTOCOL)
except Exception:
logger.info("Failed to serialize %s", x, exc_info=True)
raise
def loads(x):
try:
return pickle.loads(x)
except Exception:
logger.info("Failed to deserialize %s", x, exc_info=True)
raise
logger = logging.getLogger(__name__)
def get_total_physical_memory():
try:
import psutil
return psutil.virtual_memory().total / 2
except ImportError:
return 2e9
MAX_BUFFER_SIZE = get_total_physical_memory()
def handle_signal(sig, frame):
IOLoop.instance().add_callback(IOLoop.instance().stop)
class Server(TCPServer):
""" Distributed TCP Server
Superclass for both Worker and Scheduler objects.
Inherits from ``tornado.tcpserver.TCPServer``, adding a protocol for RPC.
**Handlers**
Servers define operations with a ``handlers`` dict mapping operation names
to functions. The first argument of a handler function must be a stream for
the connection to the client. Other arguments will receive inputs from the
keys of the incoming message which will always be a dictionary.
>>> def pingpong(stream):
... return b'pong'
>>> def add(stream, x, y):
... return x + y
>>> handlers = {'ping': pingpong, 'add': add}
>>> server = Server(handlers) # doctest: +SKIP
>>> server.listen(8000) # doctest: +SKIP
**Message Format**
The server expects messages to be dictionaries with a special key, `'op'`
that corresponds to the name of the operation, and other key-value pairs as
required by the function.
So in the example above the following would be good messages.
* ``{'op': 'ping'}``
* ``{'op': 'add': 'x': 10, 'y': 20}``
"""
default_port = 0
def __init__(self, handlers, max_buffer_size=MAX_BUFFER_SIZE, **kwargs):
self.handlers = assoc(handlers, 'identity', self.identity)
self.id = str(uuid.uuid1())
self._port = None
self.rpc = ConnectionPool()
super(Server, self).__init__(max_buffer_size=max_buffer_size, **kwargs)
@property
def port(self):
if not self._port:
try:
self._port = first(self._sockets.values()).getsockname()[1]
except StopIteration:
raise OSError("Server has no port. Please call .listen first")
return self._port
def identity(self, stream):
return {'type': type(self).__name__, 'id': self.id}
def listen(self, port=None):
if port is None:
port = self.default_port
while True:
try:
super(Server, self).listen(port)
break
except (socket.error, OSError):
if port:
raise
else:
logger.info('Randomly assigned port taken for %s. Retrying',
type(self).__name__)
@gen.coroutine
def handle_stream(self, stream, address):
""" Dispatch new connections to coroutine-handlers
Handlers is a dictionary mapping operation names to functions or
coroutines.
{'get_data': get_data,
'ping': pingpong}
Coroutines should expect a single IOStream object.
"""
stream.set_nodelay(True)
ip, port = address
logger.info("Connection from %s:%d to %s", ip, port,
type(self).__name__)
try:
while True:
try:
msg = yield read(stream)
logger.debug("Message from %s:%d: %s", ip, port, msg)
except StreamClosedError:
logger.info("Lost connection: %s", str(address))
break
except Exception as e:
yield write(stream, error_message(e, status='uncaught-error'))
continue
if not isinstance(msg, dict):
raise TypeError("Bad message type. Expected dict, got\n "
+ str(msg))
op = msg.pop('op')
close = msg.pop('close', False)
reply = msg.pop('reply', True)
if op == 'close':
if reply:
yield write(stream, 'OK')
break
try:
handler = self.handlers[op]
except KeyError:
result = "No handler found: %s" % op
logger.warn(result, exc_info=True)
else:
logger.debug("Calling into handler %s", handler.__name__)
try:
result = yield gen.maybe_future(handler(stream, **msg))
except StreamClosedError as e:
logger.info("%s", e)
result = error_message(e, status='uncaught-error')
except Exception as e:
logger.exception(e)
result = error_message(e, status='uncaught-error')
if reply:
try:
yield write(stream, result)
except StreamClosedError:
logger.info("Lost connection: %s" % str(address))
break
if close:
break
finally:
try:
stream.close()
except Exception as e:
logger.warn("Failed while closing writer", exc_info=True)
logger.info("Close connection from %s:%d to %s", address[0], address[1],
type(self).__name__)
@gen.coroutine
def read(stream):
""" Read a message from a stream """
if isinstance(stream, BatchedStream):
msg = yield stream.recv()
raise gen.Return(msg)
else:
n_frames = yield stream.read_bytes(8)
n_frames = struct.unpack('Q', n_frames)[0]
lengths = yield stream.read_bytes(8 * n_frames)
lengths = struct.unpack('Q' * n_frames, lengths)
frames = []
for length in lengths:
if length:
frame = yield stream.read_bytes(length)
else:
frame = b''
frames.append(frame)
msg = protocol.loads(frames)
raise gen.Return(msg)
@gen.coroutine
def write(stream, msg):
""" Write a message to a stream """
if isinstance(stream, BatchedStream):
stream.send(msg)
else:
try:
frames = protocol.dumps(msg)
except Exception as e:
logger.info("Unserializable Message: %s", msg)
logger.exception(e)
raise
futures = []
lengths = ([struct.pack('Q', len(frames))] +
[struct.pack('Q', len(frame)) for frame in frames])
futures.append(stream.write(b''.join(lengths)))
for frame in frames[:-1]:
futures.append(stream.write(frame))
futures.append(stream.write(frames[-1]))
if WINDOWS:
yield futures[-1]
else:
yield futures
def pingpong(stream):
return b'pong'
@gen.coroutine
def connect(ip, port, timeout=3):
client = TCPClient()
start = time()
while True:
future = client.connect(ip, port, max_buffer_size=MAX_BUFFER_SIZE)
try:
stream = yield gen.with_timeout(timedelta(seconds=timeout), future)
stream.set_nodelay(True)
raise gen.Return(stream)
except StreamClosedError:
if time() - start < timeout:
yield gen.sleep(0.01)
logger.debug("sleeping on connect")
else:
raise
except gen.TimeoutError:
raise IOError("Timed out while connecting to %s:%d" % (ip, port))
@gen.coroutine
def send_recv(stream=None, arg=None, ip=None, port=None, addr=None, reply=True, **kwargs):
""" Send and recv with a stream
Keyword arguments turn into the message
response = yield send_recv(stream, op='ping', reply=True)
"""
if arg:
if isinstance(arg, (unicode, bytes)):
addr = arg
if isinstance(arg, tuple):
ip, port = arg
if addr:
assert not ip and not port
if PY3 and isinstance(addr, bytes):
addr = addr.decode()
ip, port = addr.rsplit(':', 1)
port = int(port)
if PY3 and isinstance(ip, bytes):
ip = ip.decode()
if stream is None:
stream = yield connect(ip, port)
msg = kwargs
msg['reply'] = reply
yield write(stream, msg)
if reply:
response = yield read(stream)
if isinstance(response, dict) and response.get('status') == 'uncaught-error':
six.reraise(*clean_exception(**response))
else:
response = None
if kwargs.get('close'):
stream.close()
raise gen.Return(response)
def ip_port_from_args(arg=None, addr=None, ip=None, port=None):
if arg:
if isinstance(arg, (unicode, bytes)):
addr = arg
if isinstance(arg, tuple):
ip, port = arg
if addr:
if PY3 and isinstance(addr, bytes):
addr = addr.decode()
assert not ip and not port
ip, port = addr.rsplit(':', 1)
port = int(port)
if PY3 and isinstance(ip, bytes):
ip = ip.decode()
return ip, port
class rpc(object):
""" Conveniently interact with a remote server
Normally we construct messages as dictionaries and send them with read/write
>>> stream = yield connect(ip, port) # doctest: +SKIP
>>> msg = {'op': 'add', 'x': 10, 'y': 20} # doctest: +SKIP
>>> yield write(stream, msg) # doctest: +SKIP
>>> response = yield read(stream) # doctest: +SKIP
To reduce verbosity we use an ``rpc`` object.
>>> remote = rpc(ip=ip, port=port) # doctest: +SKIP
>>> response = yield remote.add(x=10, y=20) # doctest: +SKIP
One rpc object can be reused for several interactions.
Additionally, this object creates and destroys many streams as necessary
and so is safe to use in multiple overlapping communications.
When done, close streams explicitly.
>>> remote.close_streams() # doctest: +SKIP
"""
def __init__(self, arg=None, stream=None, ip=None, port=None, addr=None,
timeout=3):
ip, port = ip_port_from_args(arg=arg, addr=addr, ip=ip, port=port)
self.streams = dict()
self.ip = ip
self.port = port
self.timeout = timeout
self.status = 'running'
assert self.ip
assert self.port
@property
def address(self):
return '%s:%d' % (self.ip, self.port)
@gen.coroutine
def live_stream(self):
""" Get an open stream
Some streams to the ip/port target may be in current use by other
coroutines. We track this with the `streams` dict
:: {stream: True/False if open and ready for use}
This function produces an open stream, either by taking one that we've
already made or making a new one if they are all taken. This also
removes streams that have been closed.
When the caller is done with the stream they should set
self.streams[stream] = True
As is done in __getattr__ below.
"""
if self.status == 'closed':
raise RPCClosed("RPC Closed")
to_clear = set()
open = False
for stream, open in self.streams.items():
if stream.closed():
to_clear.add(stream)
if open:
break
if not open or stream.closed():
stream = yield connect(self.ip, self.port, timeout=self.timeout)
for s in to_clear:
del self.streams[s]
self.streams[stream] = False # mark as taken
raise gen.Return(stream)
def close_streams(self):
for stream in self.streams:
if stream and not stream.closed():
try:
stream.close()
except (OSError, IOError, StreamClosedError):
pass
def __getattr__(self, key):
@gen.coroutine
def send_recv_from_rpc(**kwargs):
stream = yield self.live_stream()
result = yield send_recv(stream=stream, op=key, **kwargs)
self.streams[stream] = True # mark as open
raise gen.Return(result)
return send_recv_from_rpc
def __del__(self):
self.close_streams()
def close_rpc(self):
self.status = 'closed'
self.close_streams()
class RPCCall(object):
""" The result of ConnectionPool()('host:port')
See Also:
ConnectionPool
"""
def __init__(self, ip, port, pool):
self.ip = ip
self.port = port
self.pool = pool
def __getattr__(self, key):
@gen.coroutine
def send_recv_from_rpc(**kwargs):
stream = yield self.pool.connect(self.ip, self.port)
try:
result = yield send_recv(stream=stream, op=key, **kwargs)
finally:
if not stream.closed():
self.pool.available[self.ip, self.port].add(stream)
self.pool.occupied[self.ip, self.port].remove(stream)
self.pool.active -= 1
raise gen.Return(result)
return send_recv_from_rpc
class ConnectionPool(object):
""" A maximum sized pool of Tornado IOStreams
This provides a connect method that mirrors the normal distributed.connect
method, but provides connection sharing and tracks connection limits.
This object provides an ``rpc`` like interface::
>>> rpc = ConnectionPool(limit=512)
>>> scheduler = rpc('127.0.0.1:8786')
>>> workers = [rpc(ip=ip, port=port) for ip, port in ...]
>>> info = yield scheduler.identity()
It creates enough streams to satisfy concurrent connections to any
particular address::
>>> a, b = yield [scheduler.who_has(), scheduler.has_what()]
It reuses existing streams so that we don't have to continuously reconnect.
It also maintains a stream limit to avoid "too many open file handle"
issues. Whenever this maximum is reached we clear out all idling streams.
If that doesn't do the trick then we wait until one of the occupied streams
closes.
"""
def __init__(self, limit=512):
self.open = 0
self.active = 0
self.limit = limit
self.available = defaultdict(set)
self.occupied = defaultdict(set)
self.event = Event()
def __str__(self):
return "<ConnectionPool: open=%d, active=%d>" % (self.open,
self.active)
__repr__ = __str__
def __call__(self, arg=None, ip=None, port=None, addr=None):
""" Cached rpc objects """
ip, port = ip_port_from_args(arg=arg, addr=addr, ip=ip, port=port)
return RPCCall(ip, port, self)
@gen.coroutine
def connect(self, ip, port, timeout=3):
if self.available.get((ip, port)):
stream = self.available[ip, port].pop()
self.active += 1
self.occupied[ip, port].add(stream)
raise gen.Return(stream)
while self.open >= self.limit:
self.event.clear()
self.collect()
yield self.event.wait()
self.open += 1
stream = yield connect(ip=ip, port=port, timeout=timeout)
stream.set_close_callback(lambda: self.on_close(ip, port, stream))
self.active += 1
self.occupied[ip, port].add(stream)
if self.open >= self.limit:
self.event.clear()
raise gen.Return(stream)
def on_close(self, ip, port, stream):
self.open -= 1
if stream in self.available[ip, port]:
self.available[ip, port].remove(stream)
if stream in self.occupied[ip, port]:
self.occupied[ip, port].remove(stream)
self.active -= 1
if self.open <= self.limit:
self.event.set()
def collect(self):
logger.info("Collecting unused streams. open: %d, active: %d",
self.open, self.active)
for k, streams in list(self.available.items()):
for stream in streams:
stream.close()
def coerce_to_address(o, out=str):
if PY3 and isinstance(o, bytes):
o = o.decode()
if isinstance(o, (unicode, str)):
ip, port = o.rsplit(':', 1)
port = int(port)
o = (ip, port)
if isinstance(o, list):
o = tuple(o)
if isinstance(o, tuple) and isinstance(o[0], bytes):
o = (o[0].decode(), o[1])
if out == str:
o = '%s:%s' % o
return o
def coerce_to_rpc(o, **kwargs):
if isinstance(o, (bytes, str, tuple, list)):
ip, port = coerce_to_address(o, out=tuple)
return rpc(ip=ip, port=int(port), **kwargs)
elif isinstance(o, IOStream):
return rpc(stream=o, **kwargs)
elif isinstance(o, rpc):
return o
else:
raise TypeError()
def error_message(e, status='error'):
""" Produce message to send back given an exception has occurred
This does the following:
1. Gets the traceback
2. Truncates the exception and the traceback
3. Serializes the exception and traceback or
4. If they can't be serialized send string versions
5. Format a message and return
See Also
--------
clean_exception: deserialize and unpack message into exception/traceback
six.reraise: raise exception/traceback
"""
tb = get_traceback()
e2 = truncate_exception(e, 1000)
try:
e3 = dumps(e2)
loads(e3)
except Exception:
e3 = Exception(str(e2))
e3 = dumps(e3)
try:
tb2 = dumps(tb)
except Exception:
tb2 = ''.join(traceback.format_tb(tb))
tb2 = dumps(tb2)
if len(tb2) > 10000:
tb2 = None
return {'status': status, 'exception': e3, 'traceback': tb2}
def clean_exception(exception, traceback, **kwargs):
""" Reraise exception and traceback. Deserialize if necessary
See Also
--------
error_message: create and serialize errors into message
"""
if isinstance(exception, bytes):
exception = loads(exception)
if isinstance(traceback, bytes):
traceback = loads(traceback)
if isinstance(traceback, str):
traceback = None
return type(exception), exception, traceback
from .batched import BatchedStream
|
{
"content_hash": "15ef27f30271d9bba988b1efb42ab85e",
"timestamp": "",
"source": "github",
"line_count": 670,
"max_line_length": 90,
"avg_line_length": 30.562686567164178,
"alnum_prop": 0.5708844068955413,
"repo_name": "amosonn/distributed",
"id": "d19074620850cc100b6a2e6a42372f2b61d6f6ff",
"size": "20477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "distributed/core.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "3228"
},
{
"name": "Python",
"bytes": "737968"
},
{
"name": "Shell",
"bytes": "901"
}
],
"symlink_target": ""
}
|
class FormicaArgumentException(Exception):
pass
|
{
"content_hash": "109da4a0c19595fa5af11378987e66c8",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 42,
"avg_line_length": 26,
"alnum_prop": 0.8076923076923077,
"repo_name": "flomotlik/formica",
"id": "80171995c6a32b8ce139c8eafe30f6a3ba29c1d1",
"size": "52",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "formica/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "994"
},
{
"name": "Python",
"bytes": "135363"
}
],
"symlink_target": ""
}
|
"""cli.
Desc: Command-line tool for listing Python packages installed by setuptools,
package metadata, package dependencies, and querying The Cheese Shop
(PyPI) for Python package release information such as which installed
packages have updates available.
Author: Rob Cakebread <gentoodev a t gmail.com>
License : BSD (See COPYING)
"""
from __future__ import print_function
import argparse
import inspect
import os
import pkg_resources
import pprint
import re
import site
import struct
import subprocess
import sys
import webbrowser
if sys.version_info[0] == 2:
from httplib import HTTPException
from urllib import urlretrieve
from urlparse import urlparse
from xmlrpclib import Fault as XMLRPCFault
else:
from http.client import HTTPException
from urllib.request import urlretrieve
from urllib.parse import urlparse
from xmlrpc.client import Fault as XMLRPCFault
from distutils.sysconfig import get_python_lib
from yolk.metadata import get_metadata
from yolk import yolklib
from yolk.pypi import CheeseShop
from yolk.setuptools_support import get_download_uri, get_pkglist
from yolk.utils import run_command, command_successful
from yolk.__init__ import __version__ as VERSION
class YolkException(Exception):
"""Exception for communicating top-level error to user."""
class StdOut(object):
"""Filter stdout or stderr from specific modules So far this is just used
for pkg_resources."""
def __init__(self, stream, modulenames):
self.stdout = stream
# Modules to squelch
self.modulenames = modulenames
def __getattr__(self, attribute):
if attribute not in self.__dict__ or attribute == '__doc__':
return getattr(self.stdout, attribute)
return self.__dict__[attribute]
def flush(self):
"""Bug workaround for Python 3.2+: Exception AttributeError: 'flush'
in.
<yolk.cli.StdOut object...
"""
def write(self, inline):
"""Write a line to stdout if it isn't in a blacklist.
Try to get the name of the calling module to see if we want to
filter it. If there is no calling module, use current frame in
case there's a traceback before there is any calling module
"""
frame = inspect.currentframe().f_back
if frame:
mod = frame.f_globals.get('__name__')
else:
mod = sys._getframe(0).f_globals.get('__name__')
if mod not in self.modulenames:
self.stdout.write(inline)
def writelines(self, inline):
"""Write multiple lines."""
for line in inline:
self.write(line)
class Yolk(object):
"""Main class for yolk."""
def __init__(self):
# PyPI project name with proper case
self.project_name = ''
# PyPI project version
self.version = ''
# List of all versions not hidden on PyPI
self.all_versions = []
self.pkg_spec = None
self.options = None
# Squelch output from setuptools
# Add future offenders to this list.
shut_up = ['distutils.log']
sys.stdout = StdOut(sys.stdout, shut_up)
sys.stderr = StdOut(sys.stderr, shut_up)
self.pypi = None
def get_plugin(self, method):
"""Return plugin object if CLI option is activated and method exists.
@param method: name of plugin's method we're calling
@type method: string
@returns: list of plugins with `method`
"""
all_plugins = []
for entry_point in pkg_resources.iter_entry_points('yolk.plugins'):
plugin_obj = entry_point.load()
plugin = plugin_obj()
plugin.configure(self.options, None)
if plugin.enabled:
if not hasattr(plugin, method):
plugin = None
else:
all_plugins.append(plugin)
return all_plugins
def run(self):
"""Perform actions based on CLI options.
@returns: status code
"""
parser = setup_parser()
try:
import argcomplete
argcomplete.autocomplete(parser)
except ImportError:
pass
self.options = parser.parse_args()
pkg_spec = validate_pypi_opts(parser)
if not pkg_spec:
pkg_spec = self.options.pkg_spec
self.pkg_spec = pkg_spec
if self.options.fields:
self.options.fields = [s.strip().lower()
for s in self.options.fields.split(',')]
else:
self.options.fields = []
if (
not self.options.pypi_search and
len(sys.argv) == 1
):
parser.print_help()
return 2
# Options that depend on querying installed packages, not PyPI.
# We find the proper case for package names if they are installed,
# otherwise PyPI returns the correct case.
if (
self.options.show_deps or
self.options.show_all or
self.options.show_active or
self.options.show_non_active or
(self.options.show_updates and pkg_spec) or
self.options.upgrade
):
want_installed = True
else:
want_installed = False
# show_updates may or may not have a pkg_spec
if (
not want_installed or
self.options.show_updates or
self.options.upgrade
):
self.pypi = CheeseShop(self.options.debug)
# XXX: We should return 2 here if we couldn't create xmlrpc server
if pkg_spec:
(self.project_name,
self.version,
self.all_versions) = self.parse_pkg_ver(want_installed)
if want_installed and not self.project_name:
print(u'{} is not installed'.format(pkg_spec),
file=sys.stderr)
return 1
# I could prefix all these with 'cmd_' and the methods also
# and then iterate over the `options` dictionary keys...
commands = ['show_deps', 'query_metadata_pypi', 'fetch',
'versions_available', 'show_updates', 'upgrade',
'browse_website',
'show_download_links', 'pypi_search',
'show_pypi_changelog', 'show_pypi_releases',
'yolk_version', 'show_all',
'show_active', 'show_non_active', 'show_entry_map',
'show_entry_points']
# Run first command it finds, and only the first command, then return
# XXX: Check if more than one command was set in options and give
# error?
for action in commands:
if getattr(self.options, action):
return getattr(self, action)()
parser.print_help()
def show_active(self):
"""Show installed active packages."""
return self.show_distributions('active')
def show_non_active(self):
"""Show installed non-active packages."""
return self.show_distributions('nonactive')
def show_all(self):
"""Show all installed packages."""
return self.show_distributions('all')
def show_updates(self):
"""Check installed packages for available updates on PyPI.
@param project_name: optional package name to check; checks every
installed package if none specified
@type project_name: string
@returns: None
"""
if self.project_name:
pkg_list = [self.project_name]
else:
pkg_list = get_pkglist()
for (project_name, version, newest) in _updates(
pkg_list,
self.pypi,
user_installs_only=self.options.user):
print(u'{} {} ({})'.format(project_name,
version,
newest))
return 0
def upgrade(self):
"""Check installed packages for available updates on PyPI and upgrade.
@param project_name: optional package name to check; checks every
installed package if none specified
@type project_name: string
@returns: None
"""
if self.project_name:
pkg_list = [self.project_name]
else:
pkg_list = get_pkglist()
names = [values[0]
for values in _updates(pkg_list,
self.pypi,
user_installs_only=self.options.user)]
if names:
subprocess.call(
[sys.executable, '-m', 'pip', 'install', '--upgrade'] +
(['--user'] if self.options.user else []) +
names)
return 0
def show_distributions(self, show):
"""Show list of installed activated OR non-activated packages.
@param show: type of pkgs to show (all, active or nonactive)
@type show: string
@returns: None or 2 if error
"""
# Search for any plugins with active CLI options with add_column()
# method.
plugins = self.get_plugin('add_column')
# Some locations show false positive for 'development' packages:
ignores = ['/UNIONFS', '/KNOPPIX.IMG']
# See http://cheeseshop.python.org/pypi/workingenv.py for details.
workingenv = os.environ.get('WORKING_ENV')
if workingenv:
ignores.append(workingenv)
results = None
for (dist, active) in yolklib.get_distributions(show,
self.project_name,
self.version):
metadata = get_metadata(dist)
for prefix in ignores:
if dist.location.startswith(prefix):
dist.location = dist.location.replace(prefix, '')
# Case-insensitive search because of Windows.
if dist.location.lower().startswith(get_python_lib().lower()):
develop = ''
else:
develop = dist.location
if metadata:
add_column_text = ''
for my_plugin in plugins:
# See if package is 'owned' by a package manager such as
# portage, apt, rpm etc.
add_column_text += my_plugin.add_column(dist) + ' '
self.print_metadata(metadata, develop, active, add_column_text)
else:
print(str(dist) + ' has no metadata')
results = True
if not results and self.project_name:
if self.version:
pkg_spec = '{}=={}'.format(self.project_name, self.version)
else:
pkg_spec = self.project_name
if show == 'all':
print(
u'There are no versions of {} installed'.format(pkg_spec),
file=sys.stderr)
else:
print(
u'There are no {} versions of {} installed'.format(
show, pkg_spec),
file=sys.stderr)
return 2
elif show == 'all' and results and self.options.fields:
print("Versions with '*' are non-active.")
print("Versions with '!' are deployed in development mode.")
def print_metadata(self, metadata, develop, active, installed_by):
"""Print out formatted metadata.
@param metadata: package's metadata
@type metadata: pkg_resources Distribution obj
@param develop: path to pkg if its deployed in development mode
@type develop: string
@param active: show if package is activated or not
@type active: boolean
@param installed_by: Shows if pkg was installed by a package manager
other than setuptools
@type installed_by: string
@returns: None
"""
show_metadata = self.options.metadata
version = metadata['Version']
# When showing all packages, note which are not active:
if active:
if self.options.fields:
active_status = ''
else:
active_status = 'active'
else:
if self.options.fields:
active_status = '*'
else:
active_status = 'non-active'
if develop:
if self.options.fields:
development_status = '! ({})'.format(develop)
else:
development_status = 'development ({})'.format(develop)
else:
development_status = installed_by
status = '{} {}'.format(active_status, development_status)
if self.options.fields:
print(
'{} ({}){} {}'.format(metadata['Name'], version, active_status,
development_status))
else:
# Need intelligent justification.
print(metadata['Name'].ljust(15) + ' - ' + version.ljust(12) +
' - ' + status)
if self.options.fields:
for field in metadata.keys():
if field.lower() in self.options.fields:
print(u' {}: {}'.format(field, metadata[field]))
print()
elif show_metadata:
for field in metadata.keys():
if field != 'Name' and field != 'Summary':
print(u' {}: {}'.format(field, metadata[field]))
def show_deps(self):
"""Show dependencies for package(s)
@returns: 0 - success 1 - No dependency info supplied
"""
pkgs = pkg_resources.Environment()
for pkg in pkgs[self.project_name]:
if not self.version:
print(pkg.project_name, pkg.version)
i = len(list(pkg._dep_map.values())[0])
if i:
while i:
if (
not self.version or
self.version and
pkg.version == self.version
):
if self.version and i == len(list(
pkg._dep_map.values())[0]):
print(pkg.project_name, pkg.version)
print(u' ' + str(list(
pkg._dep_map.values())[0][i - 1]))
i -= 1
else:
return 1
return 0
def show_pypi_changelog(self):
"""Show detailed PyPI ChangeLog for the last `hours`
@returns: 0 = success or 1 if failed to retrieve from XML-RPC server
"""
hours = self.options.show_pypi_changelog
if not hours.isdigit():
print('You must supply an integer',
file=sys.stderr)
return 1
try:
changelog = self.pypi.changelog(int(hours))
except XMLRPCFault as err_msg:
print(err_msg, file=sys.stderr)
print("Couldn't retrieve changelog", file=sys.stderr)
return 1
last_pkg = ''
for entry in changelog:
pkg = entry[0]
if pkg != last_pkg:
print(u'{} {}\n\t{}'.format(entry[0], entry[1], entry[3]))
last_pkg = pkg
else:
print(u'\t{}'.format(entry[3]))
return 0
def show_pypi_releases(self):
"""Show PyPI releases for the last number of `hours`
@returns: 0 = success or 1 if failed to retrieve from XML-RPC server
"""
try:
hours = int(self.options.show_pypi_releases)
except ValueError:
print('You must supply an integer', file=sys.stderr)
return 1
try:
latest_releases = self.pypi.updated_releases(hours)
except XMLRPCFault as err_msg:
print(err_msg, file=sys.stderr)
print("Couldn't retrieve latest releases.", file=sys.stderr)
return 1
for release in latest_releases:
print(u'{} {}'.format(release[0], release[1]))
return 0
def show_download_links(self):
"""Query PyPI for pkg download URI for a packge.
@returns: 0
"""
# In case they specify version as 'dev' instead of using -T svn,
# don't show three svn URI's
if self.options.file_type == 'all' and self.version == 'dev':
self.options.file_type = 'svn'
if self.options.file_type == 'svn':
version = 'dev'
else:
if self.version:
version = self.version
else:
version = self.all_versions[0]
if self.options.file_type == 'all':
# Search for source, egg, and svn.
self.print_download_uri(version, True)
self.print_download_uri(version, False)
self.print_download_uri('dev', True)
else:
if self.options.file_type == 'source':
source = True
else:
source = False
self.print_download_uri(version, source)
return 0
def print_download_uri(self, version, source):
"""@param version: version number or 'dev' for svn.
@type version: string
@param source: download source or egg
@type source: boolean
@returns: None
"""
if version == 'dev':
source = True
# Use setuptools monkey-patch to grab url.
url = get_download_uri(self.project_name, version, source,
self.options.pypi_index)
if url:
print(u'{}'.format(url))
def fetch(self):
"""Download a package.
@returns: 0 = success or 1 if failed download
"""
source = True
directory = '.'
if self.options.file_type == 'svn':
svn_uri = get_download_uri(self.project_name,
'dev', True)
if svn_uri:
directory = self.project_name + '_svn'
return self.fetch_svn(svn_uri, directory)
else:
print(
'No subversion repository found for {}'.format(
self.project_name),
file=sys.stderr)
return 1
elif self.options.file_type == 'source':
source = True
elif self.options.file_type == 'egg':
source = False
uri = get_download_uri(self.project_name, self.version, source)
if uri:
return self.fetch_uri(directory, uri)
else:
print(u'No {} URI found for package: {}'.format(
self.options.file_type, self.project_name))
return 1
def fetch_uri(self, directory, uri):
"""Use ``urllib.urlretrieve`` to download package to file in sandbox
dir.
@param directory: directory to download to
@type directory: string
@param uri: uri to download
@type uri: string
@returns: 0 = success or 1 for failed download
"""
filename = os.path.basename(urlparse(uri)[2])
if os.path.exists(filename):
print(u'File exists: ' + filename, file=sys.stderr)
return 1
try:
downloaded_filename, headers = urlretrieve(uri, filename)
except IOError as err_msg:
print(
'Error downloading package {} from URL {}'.format(
filename, uri),
file=sys.stderr)
print(str(err_msg), file=sys.stderr)
return 1
if 'text/html' in headers:
dfile = open(downloaded_filename)
if re.search('404 Not Found', ''.join(dfile.readlines())):
dfile.close()
print("'404 Not Found' error", file=sys.stderr)
return 1
dfile.close()
return 0
def fetch_svn(self, svn_uri, directory):
"""Fetch subversion repository.
@param svn_uri: subversion repository uri to check out
@type svn_uri: string
@param directory: directory to download to
@type directory: string
"""
if not command_successful(['svn', '--version']):
raise YolkException('Do you have subversion installed?')
if os.path.exists(directory):
raise YolkException(
'Checkout directory exists - {}'.format(directory))
try:
os.mkdir(directory)
except OSError as err_msg:
raise YolkException('' + str(err_msg))
cwd = os.path.realpath(os.curdir)
os.chdir(directory)
status, _ = run_command(['svn', 'checkout', svn_uri])
os.chdir(cwd)
def browse_website(self, browser=None):
"""Launch web browser at project's homepage.
@param browser: name of web browser to use
@type browser: string
@returns: 0 if homepage found, 1 if no homepage found
"""
if len(self.all_versions):
metadata = self.pypi.release_data(self.project_name,
self.all_versions[0])
if 'home_page' in metadata:
if browser == 'konqueror':
browser = webbrowser.Konqueror()
else:
browser = webbrowser.get()
browser.open(metadata['home_page'], 2)
return 0
print('No homepage URL found', file=sys.stderr)
return 1
def query_metadata_pypi(self):
"""Show pkg metadata queried from PyPI.
@returns: 0
"""
if self.version and self.version in self.all_versions:
metadata = self.pypi.release_data(self.project_name, self.version)
else:
# Give highest version
metadata = self.pypi.release_data(self.project_name,
self.all_versions[0])
if metadata:
if len(self.options.fields) == 1:
try:
print(metadata[self.options.fields[0]])
except KeyError:
pass
else:
for key in metadata.keys():
if (
not self.options.fields or
(self.options.fields and
key.lower() in self.options.fields)
):
print(u'{}: {}'.format(key, metadata[key]))
return 0
def versions_available(self):
"""Query PyPI for a particular version or all versions of a package.
@returns: 0 if version(s) found or 1 if none found
"""
if self.all_versions and self.version in self.all_versions:
print_pkg_versions(self.project_name, [self.version])
elif not self.version and self.all_versions:
print_pkg_versions(self.project_name, self.all_versions)
else:
if self.version:
print(
'No package found for version {}'.format(self.version),
file=sys.stderr)
else:
print(
'No package found for {}'.format(self.project_name),
file=sys.stderr)
return 1
return 0
def parse_search_spec(self, spec):
"""Parse search args and return spec dict for PyPI.
* Owwww, my eyes!. Re-write this.
@param spec: Cheese Shop package search spec
e.g.
name=Cheetah
license=ZPL
license=ZPL AND name=Cheetah
@type spec: string
@returns: tuple with spec and operator
"""
usage = """You can search PyPI by the following:
name
version
author
author_email
maintainer
maintainer_email
home_page
license
summary
description
keywords
platform
download_url
e.g. yolk -S name=Cheetah
yolk -S name=yolk AND license=PSF
"""
if not spec:
print(usage, file=sys.stderr)
return (None, None)
try:
spec = (' ').join(spec)
operator = 'AND'
first = second = ''
if ' AND ' in spec:
(first, second) = spec.split('AND')
elif ' OR ' in spec:
(first, second) = spec.split('OR')
operator = 'OR'
else:
first = spec
(key1, term1) = first.split('=')
key1 = key1.strip()
if second:
(key2, term2) = second.split('=')
key2 = key2.strip()
spec = {}
spec[key1] = term1
if second:
spec[key2] = term2
except:
print(usage, file=sys.stderr)
spec = operator = None
return (spec, operator)
def pypi_search(self):
"""Search PyPI by metadata keyword e.g.
yolk -S name=yolk AND license=GPL
@param spec: Cheese Shop search spec
@type spec: list of strings
spec examples:
["name=yolk"]
["license=GPL"]
["name=yolk", "AND", "license=GPL"]
@returns: 0 on success or 1 if mal-formed search spec
"""
spec = self.pkg_spec
# Add remaining cli arguments to options.pypi_search.
search_arg = self.options.pypi_search
spec.insert(0, search_arg.strip())
(spec, operator) = self.parse_search_spec(spec)
if not spec:
return 1
for pkg in self.pypi.search(spec, operator):
if pkg['summary']:
summary = pkg['summary'].encode('utf-8')
else:
summary = ''
print("""{} ({}):
{}
""".format(pkg['name'].encode('utf-8'), pkg['version'],
summary))
return 0
def show_entry_map(self):
"""Show entry map for a package.
@param dist: package
@param type: string
@returns: 0 for success or 1 if error
"""
pprinter = pprint.PrettyPrinter()
try:
entry_map = pkg_resources.get_entry_map(
self.options.show_entry_map)
if entry_map:
pprinter.pprint(entry_map)
except pkg_resources.DistributionNotFound:
print(
'Distribution not found: {}'.format(
self.options.show_entry_map),
file=sys.stderr)
return 1
return 0
def show_entry_points(self):
"""Show entry points for a module.
@returns: 0 for success or 1 if error
"""
found = False
for entry_point in pkg_resources.iter_entry_points(
self.options.show_entry_points):
found = True
try:
plugin = entry_point.load()
print(plugin.__module__)
print(u' {}'.format(entry_point))
if plugin.__doc__:
print(plugin.__doc__)
print()
except ImportError:
pass
if not found:
print(
'No entry points found for {}'.format(
self.options.show_entry_points),
file=sys.stderr)
return 1
return 0
def yolk_version(self):
"""Show yolk's version."""
print(u'yolk {}'.format(VERSION))
def parse_pkg_ver(self, want_installed):
"""Return tuple with project_name and version from CLI args If the user
gave the wrong case for the project name, this corrects it.
@param want_installed: whether package we want is installed or not
@type want_installed: boolean
@returns: tuple(project_name, version, all_versions)
"""
all_versions = []
arg_str = self.pkg_spec
if '==' not in arg_str:
# No version specified.
project_name = arg_str
version = None
else:
(project_name, version) = arg_str.split('==')
project_name = project_name.strip()
version = version.strip()
# Find proper case for package name.
if want_installed:
project_name = yolklib.case_sensitive_name(project_name)
else:
(project_name, all_versions) = self.pypi.query_versions_pypi(
project_name)
if not len(all_versions):
msg = "I'm afraid we have no '{}' at ".format(project_name)
msg += 'The Cheese Shop. A little Red Leicester, perhaps?'
print(msg, file=sys.stderr)
sys.exit(2)
return (project_name, version, all_versions)
def setup_parser():
"""Setup the argparser.
@returns: parser.ArgumentParser
"""
parser = argparse.ArgumentParser()
parser.add_argument('--version', action='store_true', dest='yolk_version',
default=False,
help='show yolk version and exit')
parser.add_argument('--debug', action='store_true',
default=False, help='show debugging information')
parser.add_argument('-q', '--quiet', action='store_true',
default=False, help='show less output')
parser.add_argument('pkg_spec', nargs='?')
group_local = parser.add_argument_group(
'Query installed Python packages',
'The following options show information about installed Python '
'packages. Activated packages are normal packages on sys.path that '
'can be imported. Non-activated packages need '
"'pkg_resources.require()' before they can be imported, such as "
"packages installed with 'easy_install --multi-version'. PKG_SPEC can "
'be either a package name or package name and version e.g. Paste==0.9')
group_local.add_argument(
'-l', '--list', action='store_true', dest='show_all', default=False,
help='list all Python packages installed by distutils or setuptools. '
'Use PKG_SPEC to narrow results')
group_local.add_argument(
'-a', '--activated', action='store_true',
dest='show_active', default=False,
help='list activated packages installed by distutils or setuptools. '
'Use PKG_SPEC to narrow results')
group_local.add_argument(
'-n', '--non-activated', action='store_true',
dest='show_non_active', default=False,
help='list non-activated packages installed by distutils or '
'setuptools. Use PKG_SPEC to narrow results')
group_local.add_argument(
'-m', '--metadata', action='store_true',
default=False,
help='show all metadata for packages installed by '
'setuptools (use with -l -a or -n)')
group_local.add_argument(
'-f', '--fields', action='store', default=False,
help='show specific metadata (comma-separated) fields; '
'use with -m or -M')
group_local.add_argument(
'-d', '--depends', action='store', dest='show_deps',
metavar='PKG_SPEC',
help='show dependencies for a package installed by '
'setuptools if they are available')
group_local.add_argument(
'--entry-points', action='store',
dest='show_entry_points', default=False,
help='list entry points for a module. e.g. --entry-points '
'nose.plugins',
metavar='MODULE')
group_local.add_argument(
'--entry-map', action='store',
dest='show_entry_map', default=False,
help='list entry map for a package. e.g. --entry-map yolk',
metavar='PACKAGE_NAME')
group_pypi = parser.add_argument_group(
'PyPI (Cheese Shop) options',
'The following options query the Python Package Index:')
group_pypi.add_argument(
'-C', '--changelog', action='store',
dest='show_pypi_changelog', metavar='HOURS',
default=False,
help='show detailed ChangeLog for PyPI for last n hours')
group_pypi.add_argument(
'-D', '--download-links', action='store',
metavar='PKG_SPEC', dest='show_download_links',
default=False,
help="show download URL's for package listed on PyPI. Use with -T to "
'specify egg, source etc')
group_pypi.add_argument(
'-F', '--fetch-package', action='store',
metavar='PKG_SPEC', dest='fetch',
default=False,
help='download package source or egg; You can specify a file type '
'with -T')
group_pypi.add_argument(
'-H', '--browse-homepage', action='store',
metavar='PKG_SPEC', dest='browse_website',
default=False,
help='launch web browser at home page for package')
group_pypi.add_argument('-I', '--pypi-index', action='store',
default=False,
help='specify PyPI mirror for package index')
group_pypi.add_argument('-L', '--latest-releases', action='store',
dest='show_pypi_releases', metavar='HOURS',
default=False,
help='show PyPI releases for last n hours')
group_pypi.add_argument(
'-M', '--query-metadata', action='store',
dest='query_metadata_pypi', default=False,
metavar='PKG_SPEC',
help='show metadata for a package listed on PyPI. Use -f to show '
'particular fields')
group_pypi.add_argument(
'-S', action='store', dest='pypi_search',
default=False,
help='search PyPI by spec and optional AND/OR operator',
metavar='SEARCH_SPEC <AND/OR SEARCH_SPEC>')
group_pypi.add_argument(
'-T', '--file-type', action='store', default='all',
help="You may specify 'source', 'egg', 'svn' or 'all' when using -D.")
group_pypi.add_argument('-U', '--show-updates', action='store_true',
default=False,
help='check PyPI for updates on package(s)')
group_pypi.add_argument('--upgrade', '--pip', action='store_true',
help='run pip command to upgrade outdated '
'packages; may be used with --user')
group_pypi.add_argument('--user', action='store_true',
help='run pip with --user; for use with --upgrade')
group_pypi.add_argument('-V', '--versions-available', action='store',
default=False, metavar='PKG_SPEC',
help='show available versions for given package '
'listed on PyPI')
return parser
def print_pkg_versions(project_name, versions):
"""Print list of versions available for a package.
@returns: None
"""
for ver in versions:
print(u'{} {}'.format(project_name, ver))
def validate_pypi_opts(parser):
"""Check parse options that require pkg_spec.
@returns: pkg_spec
"""
options = parser.parse_args()
options_pkg_specs = [options.versions_available,
options.query_metadata_pypi,
options.show_download_links,
options.browse_website,
options.fetch,
options.show_deps,
]
for pkg_spec in options_pkg_specs:
if pkg_spec:
return pkg_spec
def _updates(names, pypi, user_installs_only):
"""Return updates."""
from multiprocessing.pool import ThreadPool
exception = None
def worker_function(pkg):
for (dist, active) in yolklib.get_distributions(
'all', pkg,
yolklib.get_highest_installed(pkg)):
if exception:
return
width = terminal_width()
if width:
print(u'\rChecking {}'.format(dist.project_name).ljust(width),
end='',
file=sys.stderr)
(project_name, versions) = pypi.query_versions_pypi(
dist.project_name)
return (pkg, dist, project_name, versions)
import multiprocessing
pool = ThreadPool(multiprocessing.cpu_count())
try:
results = pool.map(worker_function, names)
except IOError as _exception:
exception = _exception
print('\r', end='', file=sys.stderr)
if exception:
raise YolkException(exception)
for (pkg, dist, project_name, versions) in results:
try:
if (
user_installs_only and
not dist.location.startswith(site.getusersitepackages())
):
continue
except AttributeError:
# Probably inside a virtualenv.
pass
if versions:
# PyPI returns them in chronological order,
# but who knows if its guaranteed in the API?
# Make sure we grab the highest version:
newest = yolklib.get_highest_version(versions)
if newest != dist.version:
# We may have newer than what PyPI knows about.
if (
pkg_resources.parse_version(dist.version) <
pkg_resources.parse_version(newest)
):
yield (project_name, dist.version, newest)
def terminal_width():
try:
import fcntl
import termios
return struct.unpack(
'HHHH',
fcntl.ioctl(sys.stderr.fileno(),
termios.TIOCGWINSZ,
struct.pack('HHHH', 0, 0, 0, 0)))[1]
except (ImportError, OSError):
# ImportError for non-Unix.
# OSError for non-TTYs.
return None
def main():
"""Let's do it."""
try:
my_yolk = Yolk()
my_yolk.run()
except (HTTPException, IOError, YolkException) as exception:
print(exception, file=sys.stderr)
return 1
except KeyboardInterrupt:
return 1
|
{
"content_hash": "d0eb1c37535c80415a0b74d30ee8840a",
"timestamp": "",
"source": "github",
"line_count": 1174,
"max_line_length": 79,
"avg_line_length": 32.885860306643956,
"alnum_prop": 0.5347855366763365,
"repo_name": "myint/yolk",
"id": "3becfecacbf69c80a3c71d511f6615dd92902e3b",
"size": "38633",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yolk/cli.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "134"
},
{
"name": "Python",
"bytes": "57256"
},
{
"name": "Shell",
"bytes": "1179"
}
],
"symlink_target": ""
}
|
"""
lantz_core.unit
~~~~~~~~~~~~~~~
Unit handling is done using the Pint library. If absent the unit support is
simply disabled.
This module allows the user to specify the UnitRegistry to be used by Lantz
and exposes some useful Pint features.
:copyright: 2015 by Lantz Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import (division, unicode_literals, print_function,
absolute_import)
import logging
UNIT_SUPPORT = True
try:
from pint import UnitRegistry
except ImportError:
UNIT_SUPPORT = False
UNIT_REGISTRY = None
def set_unit_registry(unit_registry):
"""Set the UnitRegistry used by Lantz.
Given that conversion can only happen for units declared in the same
UnitRegistry an application should only use a single registry. This method
should be called before doing anything else in Lantz (even importing driver
) to avoid the creation of a default registry by Eapii.
Parameters
----------
unit_registry : UnitRegistry
UnitRegistry to use for Lantz.
Raises
------
ValueError:
If a unit registry has already been set.
"""
global UNIT_REGISTRY
if UNIT_REGISTRY:
mess = 'The unit registry used by Lantz cannot be changed once set.'
raise ValueError(mess)
UNIT_REGISTRY = unit_registry
def get_unit_registry():
"""Access the UnitRegistry currently in use by Lantz.
If no UnitRegistry has been previously declared using `set_unit_registry`,
a new UnitRegistry is created.
"""
global UNIT_REGISTRY
if not UNIT_REGISTRY:
logger = logging.getLogger(__name__)
logger.debug('Creating default UnitRegistry for Lantz')
UNIT_REGISTRY = UnitRegistry()
return UNIT_REGISTRY
def to_float(value):
"""Convert a value which could be a Quantity to a float.
"""
try:
return value.m if UNIT_SUPPORT else value
except AttributeError:
return value
def to_quantity(value, unit):
"""Turn a value into a Quantity with the given unit.
This is a no-op if unit support is not available.
Parameters
----------
value : float
Value to cast.
unit : unicode
Unit of the quantity to create.
"""
if UNIT_SUPPORT:
ureg = get_unit_registry()
value *= ureg.parse_expression(unit)
return value
|
{
"content_hash": "40b367bfe190e8ad1ed14a63e26254dd",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 79,
"avg_line_length": 24.04901960784314,
"alnum_prop": 0.6583774969425193,
"repo_name": "MatthieuDartiailh/lantz_core",
"id": "94c1b5e77a19f2c884b72e4967a580d70059e312",
"size": "2477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lantz_core/unit.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "226997"
}
],
"symlink_target": ""
}
|
import flask
from www import server
from www import login
import common.rpc
import html
from collections import OrderedDict
SECTIONS = OrderedDict((
("info", "Stream information"),
("quotes", "Quotes"),
("text", "Simple text responses"),
("misc", "Miscellaneous"),
))
DEFAULT_SECTION = "misc"
def command_format(cmd):
cmd['raw-aliases'] = cmd["aliases"]
cmd["aliases"] = "<code>" + "</code> or <code>".join(map(html.escape, cmd["aliases"])) + "</code>"
cmd["description"] = cmd["description"].split("\n\n")
return cmd
@server.app.route('/help')
@login.with_session
async def help(session):
commandlist = sorted(map(command_format, await common.rpc.bot.get_commands()), key=lambda c: c["raw-aliases"])
commands = {}
for command in commandlist:
section = command['section']
if section not in SECTIONS:
section = DEFAULT_SECTION
commands.setdefault(section, {'list': []})['list'].append(command)
for section in commands:
commands[section]['mod-only'] = all(command['mod-only'] for command in commands[section]['list'])
return flask.render_template('help.html', commands=commands, sections=SECTIONS, session=session)
|
{
"content_hash": "57e60b72902a86647675e43031d41fd3",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 111,
"avg_line_length": 33.529411764705884,
"alnum_prop": 0.7026315789473684,
"repo_name": "andreasots/lrrbot",
"id": "925ec5587b848c97fd6f5ab78ef46b589aeb2f02",
"size": "1140",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "www/help.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "15924"
},
{
"name": "HTML",
"bytes": "65230"
},
{
"name": "JavaScript",
"bytes": "39616"
},
{
"name": "Mako",
"bytes": "318"
},
{
"name": "Python",
"bytes": "381399"
}
],
"symlink_target": ""
}
|
from bluffinmuffin.protocol.enums import BluffinMessageIdEnum
from bluffinmuffin.protocol.interfaces import AbstractResponse
from bluffinmuffin.protocol.data_types import TupleTable
from .list_table_command import ListTableCommand
class ListTableResponse(AbstractResponse):
def __init__(self, success, message_id, message, jsonCommand, tables):
super().__init__(success, message_id, message, ListTableCommand.decode(jsonCommand))
self.tables = tables
def __str__(self):
return '{0} => ({1})'.format(
super().__str__(),
', '.join([x.__str__() for x in self.tables])
)
def _encode_specific(self, d):
super()._encode_specific(d)
d['Tables'] = [x.encode() for x in self.tables]
@classmethod
def decode(cls, obj):
return cls(
obj['Success'],
BluffinMessageIdEnum.parse(obj['MessageId']),
obj['Message'],
obj['Command'],
[TupleTable.decode(x) for x in obj['Tables']]
)
|
{
"content_hash": "3f963284ce09e2007574bb90a09e9d1d",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 92,
"avg_line_length": 34.53333333333333,
"alnum_prop": 0.6148648648648649,
"repo_name": "Ericmas001/BluffinMuffin.Protocol",
"id": "be95ebb9047ea6397fb6c4c9f301a7d1134f65f8",
"size": "1036",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Python/bluffinmuffin/protocol/lobby/list_table_response.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "158977"
},
{
"name": "PowerShell",
"bytes": "3633"
},
{
"name": "Python",
"bytes": "94998"
}
],
"symlink_target": ""
}
|
from twisted.internet import interfaces, reactor
from zope.interface import implementer
import logging
log = logging.getLogger(__name__)
|
{
"content_hash": "b74923db94a1bbf0f18b41e2e8d50ed1",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 48,
"avg_line_length": 17.625,
"alnum_prop": 0.7943262411347518,
"repo_name": "ebu/ebu-tt-live-toolkit",
"id": "7f2d80c3041c0b9a22981827a11dd47c4bcc53d5",
"size": "142",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ebu_tt_live/twisted/node.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "827"
},
{
"name": "CSS",
"bytes": "1835"
},
{
"name": "Gherkin",
"bytes": "158680"
},
{
"name": "HTML",
"bytes": "17041"
},
{
"name": "JavaScript",
"bytes": "156550"
},
{
"name": "Makefile",
"bytes": "1288"
},
{
"name": "Python",
"bytes": "590538"
}
],
"symlink_target": ""
}
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest, time, re
class ModifyDetails(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(30)
self.base_url = "http://localhost:8000/"
self.verificationErrors = []
self.accept_next_alert = True
def test_modify_details(self):
driver = self.driver
driver.get(self.base_url + "/")
driver.find_element_by_id("inputUser").clear()
driver.find_element_by_id("inputUser").send_keys("bob@concordia.ca")
driver.find_element_by_id("inputPassword").clear()
driver.find_element_by_id("inputPassword").send_keys("student")
driver.find_element_by_xpath("//button[@type='submit']").click()
driver.find_element_by_link_text("My Profile").click()
driver.find_element_by_link_text("Change Password").click()
driver.find_element_by_id("inputNewPassword").clear()
driver.find_element_by_id("inputNewPassword").send_keys("student")
driver.find_element_by_id("verifyNewPassword").clear()
driver.find_element_by_id("verifyNewPassword").send_keys("student")
driver.find_element_by_xpath("//button[@type='submit']").click()
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException, e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "c70700c079b25c2fba3842e4da717918",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 76,
"avg_line_length": 39.96610169491525,
"alnum_prop": 0.6484308736217134,
"repo_name": "foxtrot94/EchelonPlanner",
"id": "d1c80b56376067c706c3c3e32fce02f0543765a5",
"size": "2382",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testing/automation/modify_details.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "7334"
},
{
"name": "CSS",
"bytes": "294499"
},
{
"name": "HTML",
"bytes": "139939"
},
{
"name": "JavaScript",
"bytes": "33582"
},
{
"name": "Python",
"bytes": "273409"
},
{
"name": "Shell",
"bytes": "7334"
}
],
"symlink_target": ""
}
|
from os.path import dirname, join
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from rest_framework_rules import VERSION
def get_version(version):
return '.'.join(map(str, version))
with open(join(dirname(__file__), 'README.rst')) as f:
long_description = f.read()
github_url = 'https://github.com/escodebar/django-rest-framework-rules'
setup(
name='django-rest-framework-rules',
description='Django REST framework integration for django-rules',
version=get_version(VERSION),
long_description=long_description,
url=github_url,
download_url=('{github_url}/archive/v{version}.tar.gz'
.format(github_url=github_url,
version=get_version(VERSION))),
author='Pablo Escodebar',
author_email='escodebar@gmail.com',
maintainer='Pablo Escodebar',
maintainer_email='escodebar@gmail.com',
license='MIT',
packages=['rest_framework_rules'],
install_requires=['django', 'rules'],
python_requires='>=3.5.*, <4',
py_modules=['six'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
|
{
"content_hash": "af628a856051a91edf6305b22054b64c",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 71,
"avg_line_length": 30.4,
"alnum_prop": 0.6394736842105263,
"repo_name": "escodebar/django-rest-framework-rules",
"id": "bd3c0c51156f9b98dfb2e970d77c30bcff6db656",
"size": "1542",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46208"
},
{
"name": "Shell",
"bytes": "149"
}
],
"symlink_target": ""
}
|
import os
import re
from . import util
class IvwPaths:
"""
Given a path to a file try and find all related files.
"""
def __init__(self, filepath):
(path, file) = os.path.split(filepath)
abspath = os.path.abspath(path).split(os.sep)
self.class_name = file
self.file_name = file.lower()
m = re.compile(r".*/include/inviwo/(?P<kind>core|qt)/((?P<qt>editor|widgets))?.*").match("/".join(abspath))
if m: # Core/Qt path
inviwo_pos = next(i for (i,x) in enumerate(abspath) if x=="inviwo" and abspath[i-1]=="include")
self.module_name = m.group("kind")
if self.module_name == "qt": self.module_name += m.group("qt")
self.api_def = "IVW_" + self.module_name.upper() + "_API"
if m.group("kind") == "qt":
self.module_define = "<inviwo/qt/"+ m.group("qt") +"/inviwo" + self.module_name.lower() + "define.h>"
else:
self.module_define = "<inviwo/core/common/inviwo" + self.module_name.lower() + "define.h>"
self.include_define = "<" + "/".join(abspath[inviwo_pos:] + [self.file_name + ".h"]) + ">"
self.header_file = os.sep.join(abspath + [self.file_name + ".h"])
self.source = os.sep.join(abspath[:inviwo_pos-1] + ["src"] + abspath[inviwo_pos+1:] + [self.file_name])
self.cmake_file = self.find_cmake_file(self.source)
self.cmake_header_file = "/".join(["${IVW_INCLUDE_DIR}"] + abspath[inviwo_pos:] + [file.lower() +".h"])
self.cmake_source = util.useForwardSlash(self.make_path_relative(self.source, self.cmake_file))
self.glsl_file_name = self.source
self.module_register_file = None
elif re.compile(r".*/modules/.*").match("/".join(abspath)): # Module path
module_pos = abspath.index("modules")
self.module_name = abspath[module_pos+1]
self.api_def = "IVW_MODULE_" + self.module_name.upper() + "_API"
self.module_define = "<modules/" + self.module_name + "/" + self.module_name + "moduledefine.h>"
self.header_file = os.sep.join(abspath + [self.file_name + ".h"])
self.source = os.sep.join(abspath + [self.file_name])
self.include_define = "<" + util.useForwardSlash(os.path.relpath(self.header_file, os.sep.join(abspath[:module_pos]))) +">"
self.cmake_file = self.find_cmake_file(self.source)
self.cmake_header_file = "${CMAKE_CURRENT_SOURCE_DIR}/" + util.useForwardSlash(self.make_path_relative(self.header_file, self.cmake_file))
self.cmake_source = "${CMAKE_CURRENT_SOURCE_DIR}/" + util.useForwardSlash(self.make_path_relative(self.source, self.cmake_file))
self.glsl_file_name = os.sep.join(abspath[:module_pos + 2] + ["glsl", self.file_name])
self.cmake_glsl_source = "${CMAKE_CURRENT_SOURCE_DIR}/" + util.useForwardSlash(self.make_path_relative(self.glsl_file_name, self.cmake_file))
self.module_register_file = os.sep.join(abspath[:module_pos + 2] + [self.module_name + "module"])
def get_source_file(self, ext = ".cpp"):
return self.source + ext
def get_glsl_file(self, ext = ".glsl"):
return self.glsl_file_name + ext
def get_cmake_source(self,ext = ".cpp"):
return self.cmake_source + ext
def get_cmake_glsl(self,ext = ".glsl"):
return self.cmake_glsl_source + ext
def make_path_relative(self, path, base):
(basedir, filename) = os.path.split(base)
return os.path.relpath(path, basedir)
def find_cmake_file(self, path):
pathlist = path.split(os.sep)
for i in range(len(pathlist),0,-1):
if os.path.exists(os.sep.join(pathlist[:i] + ["CMakeLists.txt"])):
return os.sep.join(pathlist[:i] + ["CMakeLists.txt"])
return []
def info(self):
print("Class name: " + self.class_name)
print("... File name: " + self.file_name)
print("... Module name: " + self.module_name)
print("... API: " + self.api_def)
print("... Module def: " + self.module_define)
print("... Include def: " + self.include_define)
print("... Header file: " + self.header_file)
print("... Source file: " + self.get_source_file())
print("... CMake file: " + self.cmake_file)
print("... CMake header: " + self.cmake_header_file)
print("... CMake source: " + self.get_cmake_source())
def test_for_inviwo(path):
return (os.path.exists(os.sep.join([path] + ['modules', 'base']))
and os.path.exists(os.sep.join([path] + ['include', 'inviwo']))
and os.path.exists(os.sep.join([path] + ['tools', 'templates'])))
def find_inv_path():
path = util.getScriptFolder()
folders=[]
while 1:
path, folder = os.path.split(path)
if folder != "":
folders.append(folder)
else:
if path != "":
folders.append(path)
break
folders.reverse()
basepath = ""
for i in range(len(folders), 0 ,-1):
if test_for_inviwo(os.sep.join(folders[:i])):
basepath = os.sep.join(folders[:i])
break
return basepath
|
{
"content_hash": "c665103ed78cb63d241c50f16603167e",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 146,
"avg_line_length": 41.23684210526316,
"alnum_prop": 0.6426292278238672,
"repo_name": "cgloger/inviwo",
"id": "ad29b2dab940d47dc7bfd3422ed7572c60e34c3f",
"size": "6260",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/ivwpy/ivwpaths.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "87832"
},
{
"name": "Batchfile",
"bytes": "11554"
},
{
"name": "C",
"bytes": "15805810"
},
{
"name": "C#",
"bytes": "713601"
},
{
"name": "C++",
"bytes": "25473057"
},
{
"name": "CMake",
"bytes": "1277310"
},
{
"name": "COBOL",
"bytes": "2921725"
},
{
"name": "CSS",
"bytes": "26526"
},
{
"name": "D",
"bytes": "175403"
},
{
"name": "GLSL",
"bytes": "261754"
},
{
"name": "Groff",
"bytes": "6855"
},
{
"name": "HTML",
"bytes": "2691735"
},
{
"name": "Inno Setup",
"bytes": "8416"
},
{
"name": "Java",
"bytes": "287161"
},
{
"name": "JavaScript",
"bytes": "3140"
},
{
"name": "Logos",
"bytes": "2952312"
},
{
"name": "M",
"bytes": "10146"
},
{
"name": "M4",
"bytes": "16806"
},
{
"name": "Makefile",
"bytes": "5057156"
},
{
"name": "Matlab",
"bytes": "1691"
},
{
"name": "Objective-C",
"bytes": "129135"
},
{
"name": "Objective-C++",
"bytes": "29141"
},
{
"name": "Pascal",
"bytes": "13054"
},
{
"name": "Python",
"bytes": "184506"
},
{
"name": "QMake",
"bytes": "1381"
},
{
"name": "Shell",
"bytes": "258309"
},
{
"name": "Smalltalk",
"bytes": "1501"
},
{
"name": "Smarty",
"bytes": "169"
},
{
"name": "Tcl",
"bytes": "1811"
},
{
"name": "UnrealScript",
"bytes": "1273"
},
{
"name": "XSLT",
"bytes": "3925"
}
],
"symlink_target": ""
}
|
"""
This module contains very useful functions you can use while unittesting
student's code.
.. note::
In order to use the :mod:`unittest` module, you need to make sure that you
have SWIG installed, and that you have *Python development headers*
installed, both of which are probably available through your distribution's
package manager (``apt-get`` or ``yum`` for example).
"""
import interact._utils as _utils
import os
import imp
import inspect
import atexit
import shutil
import tempfile
import subprocess
import os.path
import distutils.core
import capture
#: The absolute path to the swig executable. When this module is imported, the
#: environmental variable ``PATH`` is searched for a file named ``swig``, this
#: variable will be set to the first one that is found. This variable will equal
#: ``None`` if no such file could be found.
swig_path = _utils.which("swig")
class CouldNotCompile(RuntimeError):
"""
Exception raised when a student's code could not be compiled into a single
library file.
:ivar message: A short message describing the exception.
:ivar stderr: The output that was received through standard error. This is
output by ``distutils.core.setup``.
"""
def __init__(self, message, stderr):
self.message = message
self.stderr = stderr
RuntimeError.__init__(self)
def __str__(self):
output = [
self.message,
"---BEGIN STDERR---",
self.stderr,
"---END STDERR---"
]
return "\n".join(output)
def _build_extension(module, mod_ext, working_directory):
os.chdir(working_directory)
distutils.core.setup(
name = module,
ext_modules = [mod_ext],
py_modules = [module],
script_name = "setup.py",
script_args = ["build_ext", "--inplace"]
)
def _generate_shared_libraries(modules, wrapper_directory):
"""
Compiles modules and wrappers to shared libraries using distutils.
:raises: :class:`CouldNotCompile` if the extension could not be compiled.
"""
wrapper_directory = _utils.resolve_path(wrapper_directory)
for module in modules:
so_name = "_%s" % (module, )
wrapper_file = os.path.join(wrapper_directory, module + "_wrap.cxx")
mod_ext = distutils.core.Extension(
str(so_name), sources = [str(wrapper_file)]
)
try:
captured = capture.capture_function(
_build_extension, str(module), mod_ext, str(wrapper_directory)
)
captured.wait()
except SystemExit:
# Setup will call exit which can make the running script exit rather
# suddenly. At least give the user an error with a traceback.
raise CouldNotCompile(
"Could not compile extension module.",
stderr = captured.stderr.read()
)
def _generate_swig_wrappers(interface_files, output_directory):
"""
Generates SWIG Wrapper files (.cxx) and python modules that can be
compiled into a shared library by distutils.
:raises: ``EnvironmentError`` if swig is not installed.
"""
if swig_path is None:
raise EnvironmentError("No swig executable found.")
output_directory = _utils.resolve_path(output_directory)
for current_file in interface_files:
module_name = _utils.file_name(current_file)
output_file = os.path.join(
output_directory, "%s_wrap.cxx" % (module_name, )
)
# Let swig generate the wrapper files.
subprocess.check_call(
[swig_path, "-c++", "-python", "-o", output_file, current_file],
cwd = output_directory,
stdout = _utils.DEVNULL,
stderr = subprocess.STDOUT
)
# These are necessary to allow STL types in python
STD_INTERFACES = [
"std_deque.i", "std_list.i", "std_map.i", "std_pair.i", "std_set.i",
"std_string.i", "std_vector.i", "std_sstream.i"
]
# C++ Directives that expose extra functionality in the underlying C++ code.
EXPOSURE_DIRECTIVES = [
"#define private public", # Expose private member variables to module
"#define protected public",
"#define class struct" # Expose unmarked private member variables
]
def _generate_swig_interface(file_path, output_directory):
"""
Generates a SWIG Interface file (.i) that can be compiled with SWIG to
a shared library file that can be imported into python for testing.
"""
file_path = _utils.resolve_path(file_path)
output_directory = _utils.resolve_path(output_directory)
# Figure out what this module will be named by getting just the filename
# (minus extension) of the code file.
module_name = _utils.file_name(file_path)
# -MM flag returns all dependencies needed to compile file.
gpp_process = subprocess.Popen(
["g++", "-MM", file_path],
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT
)
gpp_output = gpp_process.communicate()[0]
# Get dependencies, minus the .o file and the white space
gpp_output = gpp_output.split(":")[1].strip()
dependencies = [i.strip() for i in gpp_output.split(" ") if i.strip() != "\\"]
necessary_includes = []
for include in dependencies:
necessary_includes.append("#include \"%s\"" % (include))
# TODO: Add comment describing what's going on here.
if ".h" in include:
include = include.replace(".hpp", ".h")
include = include.replace(".h", ".cpp")
if file_path not in include and os.path.isfile(include):
necessary_includes.append("#include \"%s\"" % (include))
with open(os.path.join(output_directory, module_name + ".i"), "w") as f:
f.write("%%module %s\n\n" % (module_name, ))
# Ensure we include all of the special swig interface files that allow
# us to interop with the C++ Standard Library.
for interface in STD_INTERFACES:
f.write("%%include \"%s\"\n" % (interface, ))
# Write directives inside and out of wrapper for consistency in wrapped
# file.
f.write("\n".join(EXPOSURE_DIRECTIVES) + "\n")
f.write("using namespace std;\n\n")
f.write("%{\n")
f.write("\n".join(EXPOSURE_DIRECTIVES) + "\n")
for include in necessary_includes:
f.write("%s\n" % include)
f.write("%}\n\n")
# SWIG cannot import global include like iostream, but it does need
# all local includes
local_includes = \
(include for include in necessary_includes if '<' not in include)
for include in local_includes:
f.write("%s\n" % include.replace("#", "%"))
return module_name
to_delete = []
def _cleanup():
for i in to_delete:
shutil.rmtree(i)
atexit.register(_cleanup)
def load_files(files):
"""
Compiles and loads functions and classes in code files and makes them
callable from within Python.
:param files: A list of file paths. All of the files will be compiled and
loaded together. These must be absolute paths, see
:meth:`Harness.student_files <interact.core.Harness.student_files>`.
:returns: A ``dict`` where every file that was passed in is a key in the
dictionary (without its file extension) and the value is another
``dict`` where each key is the name of a function or class in the
file and the value is a callable you can use to actually execute
or create an instance of that function or class.
:raises: ``EnvironmentError`` if swig is not properly installed.
:raises: :class:`CouldNotCompile` if the student's code could not be
compiled into a library file.
.. warning::
During testing, oftentimes the execution of loaded code's ``main()``
function failed. We haven't determined what the problem is yet so for
now don't use this function to test ``main()`` functions (the
:mod:`interact.execute` module should work well instead).
.. code-block:: python
>>> print open("main.cpp").read()
#include <iostream>
using namespace std;
class Foo {
int a_;
public:
Foo(int a);
int get_a() const;
};
Foo::Foo(int a) : a_(a) {
// Do nothing
}
int Foo::get_a() const {
return a_;
}
int bar() {
Foo foo(3);
cout << "foo.get_a() = " << foo.get_a() << endl;
return 2;
}
int main() {
return 0;
}
>>> students_code = interact.unittest.load_files(["main.cpp"])
>>> Foo = students_code["main"]["Foo"]
>>> bar = students_code["main"]["bar"]
>>> b = Foo(3)
>>> b.get_a()
3
>>> rvalue = b.bar()
foo.get_a() = 3
>>> print rvalue
2
If you want to test a function that prints things to stdout or reads from
stdin (like the ``bar()`` function in the above example) you can use the
:mod:`interact.capture` module.
"""
module_dict = {}
# Get a directory we can work within.
temp_dir = tempfile.mkdtemp()
modules = []
for f in files:
modules.append(_generate_swig_interface(f, temp_dir))
interface_files = ((module + ".i") for module in modules)
_generate_swig_wrappers(interface_files, temp_dir)
_generate_shared_libraries(modules, temp_dir)
for module in modules:
module_dict[module] = {}
# Load up the python module we created whose function will let us access
# the C++ ones.
created_module = os.path.join(temp_dir, module + ".py")
mod = imp.load_source(module, created_module)
# Get all functions and classes in this module
filter_func = lambda a: inspect.isbuiltin(a) or inspect.isclass(a)
for name, impl in inspect.getmembers(mod, filter_func):
module_dict[module][name] = impl
to_delete.append(temp_dir)
return module_dict
|
{
"content_hash": "803493d24c6732f78539a2d1c5fe1cd6",
"timestamp": "",
"source": "github",
"line_count": 312,
"max_line_length": 82,
"avg_line_length": 32.61217948717949,
"alnum_prop": 0.6102211302211302,
"repo_name": "galah-group/galah-interact-python",
"id": "4261bfbd26697fc4cb1013a8de89e26041dcfd24",
"size": "10881",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "interact/unittest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "418"
},
{
"name": "Python",
"bytes": "107116"
},
{
"name": "Shell",
"bytes": "2596"
}
],
"symlink_target": ""
}
|
<<<<<<< HEAD
<<<<<<< HEAD
r"""Fixer for unicode.
* Changes unicode to str and unichr to chr.
* If "...\u..." is not unicode literal change it into "...\\u...".
* Change u"..." into "...".
"""
from ..pgen2 import token
from .. import fixer_base
_mapping = {"unichr" : "chr", "unicode" : "str"}
class FixUnicode(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "STRING | 'unicode' | 'unichr'"
def start_tree(self, tree, filename):
super(FixUnicode, self).start_tree(tree, filename)
self.unicode_literals = 'unicode_literals' in tree.future_features
def transform(self, node, results):
if node.type == token.NAME:
new = node.clone()
new.value = _mapping[node.value]
return new
elif node.type == token.STRING:
val = node.value
if not self.unicode_literals and val[0] in '\'"' and '\\' in val:
val = r'\\'.join([
v.replace('\\u', r'\\u').replace('\\U', r'\\U')
for v in val.split(r'\\')
])
if val[0] in 'uU':
val = val[1:]
if val == node.value:
return node
new = node.clone()
new.value = val
return new
=======
r"""Fixer for unicode.
* Changes unicode to str and unichr to chr.
* If "...\u..." is not unicode literal change it into "...\\u...".
* Change u"..." into "...".
"""
from ..pgen2 import token
from .. import fixer_base
_mapping = {"unichr" : "chr", "unicode" : "str"}
class FixUnicode(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "STRING | 'unicode' | 'unichr'"
def start_tree(self, tree, filename):
super(FixUnicode, self).start_tree(tree, filename)
self.unicode_literals = 'unicode_literals' in tree.future_features
def transform(self, node, results):
if node.type == token.NAME:
new = node.clone()
new.value = _mapping[node.value]
return new
elif node.type == token.STRING:
val = node.value
if not self.unicode_literals and val[0] in '\'"' and '\\' in val:
val = r'\\'.join([
v.replace('\\u', r'\\u').replace('\\U', r'\\U')
for v in val.split(r'\\')
])
if val[0] in 'uU':
val = val[1:]
if val == node.value:
return node
new = node.clone()
new.value = val
return new
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
r"""Fixer for unicode.
* Changes unicode to str and unichr to chr.
* If "...\u..." is not unicode literal change it into "...\\u...".
* Change u"..." into "...".
"""
from ..pgen2 import token
from .. import fixer_base
_mapping = {"unichr" : "chr", "unicode" : "str"}
class FixUnicode(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "STRING | 'unicode' | 'unichr'"
def start_tree(self, tree, filename):
super(FixUnicode, self).start_tree(tree, filename)
self.unicode_literals = 'unicode_literals' in tree.future_features
def transform(self, node, results):
if node.type == token.NAME:
new = node.clone()
new.value = _mapping[node.value]
return new
elif node.type == token.STRING:
val = node.value
if not self.unicode_literals and val[0] in '\'"' and '\\' in val:
val = r'\\'.join([
v.replace('\\u', r'\\u').replace('\\U', r'\\U')
for v in val.split(r'\\')
])
if val[0] in 'uU':
val = val[1:]
if val == node.value:
return node
new = node.clone()
new.value = val
return new
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
{
"content_hash": "4376da7a15ced909bad8ac9c5023f734",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 77,
"avg_line_length": 29.606060606060606,
"alnum_prop": 0.513050153531218,
"repo_name": "ArcherSys/ArcherSys",
"id": "86dea3bc94ad2d1ccb8f871720010f9563536d56",
"size": "3908",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lib/lib2to3/fixes/fix_unicode.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
'''
Created on Apr 11, 2014
@author: igor
'''
class AbstractAction():
pass
class CreateNewModelInstanceAction(AbstractAction):
pass
class CreateModelAttributeAction(AbstractAction):
pass
class CreateReferenceInstanceAction(AbstractAction):
pass
class CreateAttributeValuesAction(AbstractAction):
pass
class DeleteElementAction(AbstractAction):
pass
class LoadAction(AbstractAction):
pass
class SaveAction(AbstractAction):
pass
|
{
"content_hash": "4ddb726fabcee196ea8616d81b97ea48",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 52,
"avg_line_length": 12.552631578947368,
"alnum_prop": 0.7568134171907757,
"repo_name": "igordejanovic/MetaMoRP",
"id": "0caf7348f5d0618504947493d6c50e5dc10a6d37",
"size": "477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "metamorp/core/actions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Objective-C++",
"bytes": "5554"
},
{
"name": "Python",
"bytes": "15737"
}
],
"symlink_target": ""
}
|
import matplotlib
matplotlib.use('Agg')
from matplotlib.mlab import PCA
import matplotlib.pyplot as plt
import argparse
import os
import shutil
import time
import sys
import copy
import torch
from torchvision import datasets, transforms
from torch.autograd import Variable
import numpy as np
from random import shuffle
import utils
# networks
import model_net
# triplet and loss
import losses
from cub_loader import CUBImages
# Training settings
parser = argparse.ArgumentParser(description='Metric Learning With Triplet Loss and Unknown Classes')
parser.add_argument('--network', type=str, default='Simple',
help='network architecture to use (default: Simple)')
parser.add_argument('--load', type=str, default='',
help='path to checkpoint (default: none)')
parser.add_argument('--output', type=str, default='output',
help='output directory (default: output)')
parser.add_argument('--feature-size', type=int, default=64,
help='size for embeddings/features to learn')
parser.add_argument('--num-train', type=int, default=8,
help='Number of train classes')
parser.add_argument('--num-val', type=int, default=4,
help='Number of validation classes')
parser.add_argument('--num-test', type=int, default=4,
help='Number of test classes')
parser.add_argument('--batch-size', type=int, default=8,
help='input batch size for training (default: 8)')
parser.add_argument('--normalize-features', action='store_true', default=False,
help='normalize features')
# parameters
feature_size = 0
im_size = 64
use_cmd_split=True # if false, set the following values to something meaningful
num_train=0
num_val=0
num_test=0
train_classes=None # triplets_per_class*train_classes should be a multiple of batch size (64 by default)
val_classes=None
test_classes=None
output_dir = ''
# main
def main():
global args, feature_size, im_size
global num_train, num_val, num_test
global train_classes, val_classes, test_classes
global output_dir
args = parser.parse_args()
output_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
args.output)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# train/val/test split
if use_cmd_split:
num_train=args.num_train
num_val=args.num_val
num_test=args.num_test
train_classes=range(num_train) # triplets_per_class*train_classes should be a multiple of batch size (64 by default)
val_classes=range(num_train-num_val,num_train)
test_classes=range(num_train,num_train+num_test)
# feature size
feature_size = args.feature_size
# network
Net = None
model = None
if args.network == 'Simple':
print('Using simple net')
Net = model_net.SimpleNet
elif args.network == 'Inception':
print('Using inception net')
Net = model_net.InceptionBased
# force image size to be 299
im_size = 299
elif args.network == 'Squeeze':
print('Using squeezenet')
Net = model_net.SqueezeNetBased
# force image size to be 224
im_size = 224
elif args.network == 'Shallow':
print('Using shallownet')
Net = model_net.ShallowNet
# force image size to be 96
im_size = 96
elif args.network == 'ResNet':
print('Using resnet')
Net = model_net.ResNetBased
# force image size to be 224
im_size = 224
else:
assert(False)
model = Net(feature_size=feature_size, im_size=im_size, normalize=args.normalize_features)
# data
dir_path = os.path.dirname(os.path.realpath(__file__))
DLoader = CUBImages
data_path = os.path.join(dir_path, 'datasets/cub-2011')
train_data_set = DLoader(data_path,
transform=transforms.Compose([
transforms.ToTensor(),
]),
classes=train_classes, im_size=im_size)
train_loader = torch.utils.data.DataLoader(
train_data_set, batch_size=args.batch_size, shuffle=False,
sampler=torch.utils.data.sampler.SequentialSampler(train_data_set))
val_data_set = DLoader(data_path,
transform=transforms.Compose([
transforms.ToTensor(),
]),
classes=val_classes, im_size=im_size)
val_loader = torch.utils.data.DataLoader(
val_data_set, batch_size=args.batch_size/2, shuffle=False,
sampler=torch.utils.data.sampler.SequentialSampler(val_data_set))
test_data_set = DLoader(data_path,
transform=transforms.Compose([
transforms.ToTensor(),
]),
classes=test_classes, im_size=im_size)
test_loader = torch.utils.data.DataLoader(
test_data_set, batch_size=args.batch_size/3, shuffle=False,
sampler=torch.utils.data.sampler.SequentialSampler(test_data_set))
# resume from a checkpoint
print("=> loading checkpoint '{}'".format(args.load))
checkpoint = torch.load(args.load)
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.load, checkpoint['epoch']))
model.eval()
# select some random classes for visualization
train_sel = [[0,6,10,20,49,53]]
val_sel = [[64,74,80,86,95,98]]
test_sel = [[105,129,144,156,183,192]]
markers = ['o', '^', 'v', 'p', 's', 'D']
#print('Generating training set embeddings')
#train_embeddings, train_labels_true = ComputeEmbeddings(train_loader, model)
print('Generating validation set embeddings')
val_embeddings, val_labels_true = ComputeEmbeddings(val_loader, model)
print('Generating PCA for validation set')
SavePCA(val_embeddings, val_labels_true, val_sel, markers,
os.path.join(output_dir, 'val'))
print('Generating test set embeddings')
Test_embeddings, test_labels_true = ComputeEmbeddings(test_loader, model)
print('Generating PCA for test set')
SavePCA(test_embeddings, test_labels_true, test_sel, markers,
os.path.join(output_dir, 'test'))
def ComputeEmbeddings(loader, enet):
global feature_size
enet.eval()
embeddings = np.zeros(shape=(len(loader.dataset), feature_size),
dtype=float)
labels_true = np.zeros(shape=(len(loader.dataset)), dtype=int)
for batch_idx, (data, classes, ids) in enumerate(loader):
data = Variable(data)
# compute embeddings
f = enet(data)
embeddings[ids.numpy(),:] = f.cpu().data.numpy()
labels_true[ids.numpy()] = classes.cpu().numpy()
return embeddings, labels_true
def SavePCA(features, labels, classes, markers, prefix):
for n, cc in enumerate(classes):
ids = list()
cc_ids = list()
for c in cc:
c_ids = list(np.where(labels == c)[0])
#c_ids = np.random.choice(c_ids, 20) # select random 20
ids = ids + list(c_ids)
cc_ids.append(list(c_ids))
ids = np.array(ids)
samples = features[ids,:]
pca = PCA(samples)
num_plotted = 0
for i in range(len(cc)):
num = len(cc_ids[i])
plt.plot(pca.Y[num_plotted:num_plotted+num,0],
pca.Y[num_plotted:num_plotted+num,1],
markers[i], markersize=5, alpha=0.5)
num_plotted += num
plt.axis('off')
plt.savefig(prefix + ('_%d.png' % n))
if __name__ == '__main__':
main()
|
{
"content_hash": "faf8c7cfb13ddb56d59916a6233138ad",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 125,
"avg_line_length": 33.557522123893804,
"alnum_prop": 0.6293512658227848,
"repo_name": "schinmayee/metric-learning",
"id": "d6f61cd6ebaa3d07a31690b2dac8f3790ecbbb6a",
"size": "7607",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pca_vis.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "102609"
},
{
"name": "Shell",
"bytes": "5458"
}
],
"symlink_target": ""
}
|
import os
import csv
import shutil
import subprocess
from datetime import datetime
from django.db import connection
from django.conf import settings
from django.core.management.base import BaseCommand
def write(path, header, cursor):
with open(path, 'w') as fout:
writer = csv.writer(fout)
writer.writerow(header)
for row in cursor.fetchall():
row = [str(s).encode('utf-8') for s in row]
writer.writerow(row)
class Command(BaseCommand):
help = 'Export this servo database in a portable format'
def handle(self, *args, **options):
# creates a folder, then dumps different portable tables as separate
# files to it, then compresses the folder and deletes it
if not os.path.exists(settings.BACKUP_DIR):
os.mkdir(settings.BACKUP_DIR)
dirname = datetime.now().strftime('%Y%m%d-%H%M')
backupdir = os.path.join(settings.BACKUP_DIR, dirname)
if not os.path.exists(backupdir):
os.mkdir(backupdir)
path = os.path.join(backupdir, 'notes.csv')
cursor = connection.cursor()
cursor.execute("""SELECT id, order_id, created_by_id, created_at, body
FROM servo_note""")
header = ['ID', 'ORDER_ID', 'USER_ID', 'CREATED_AT', 'NOTE']
write(path, header, cursor)
path = os.path.join(backupdir, 'users.csv')
header = ['ID', 'USERNAME', 'FIRST_NAME', 'LAST_NAME', 'EMAIL']
cursor.execute("""SELECT id, username, first_name, last_name, email
FROM servo_user WHERE is_visible = TRUE""")
write(path, header, cursor)
path = os.path.join(backupdir, 'orders.csv')
header = ['ID', 'CODE', 'CREATED_AT',
'CLOSED_AT', 'CUSTOMER_ID', 'USER_ID', 'QUEUE_ID']
cursor.execute("""SELECT id, code, created_at, closed_at,
customer_id, user_id, queue_id
FROM servo_order""")
write(path, header, cursor)
path = os.path.join(backupdir, 'queues.csv')
header = ['ID', 'NAME', 'DESCRIPTION',
'CLOSED_AT', 'CUSTOMER_ID', 'USER_ID', 'QUEUE_ID']
cursor.execute("""SELECT id, title, description FROM servo_queue""")
write(path, header, cursor)
path = os.path.join(backupdir, 'devices.csv')
header = ['ID', 'SERIAL_NUMBER', 'IMEI',
'CONFIGURATION', 'WARRANTY_STATUS', 'PURCHASE_DATE', 'NOTES']
cursor.execute("""SELECT id, sn, imei, configuration, warranty_status,
purchased_on, notes FROM servo_device""")
write(path, header, cursor)
path = os.path.join(backupdir, 'repairs.csv')
header = ['ID', 'ORDER_ID', 'DEVICE_ID', 'USER_ID',
'SUBMITTED_AT', 'COMPLETED_AT', 'REQUEST_REVIEW',
'TECH_ID', 'UNIT_RECEIVED', 'CONFIRMATION',
'REFERENCE', 'SYMPTOM', 'DIAGNOSIS', 'NOTES']
cursor.execute("""SELECT id, order_id, device_id,
created_by_id, submitted_at, completed_at,
request_review, tech_id, unit_received_at, confirmation, reference,
symptom, diagnosis, notes
FROM servo_repair
WHERE submitted_at IS NOT NULL""")
write(path, header, cursor)
header = ['ID', 'CODE', 'TITLE', 'DESCRIPTION',
'PRICE_PURCHASE_EXCHANGE', 'PRICE_PURCHASE_STOCK',
'PRICE_SALES_EXCHANGE', 'PRICE_SALES_STOCK', 'COMPONENT_CODE',
'PART_TYPE', 'EEE_CODE']
cursor.execute("""SELECT id, code, title, description,
price_purchase_exchange, price_purchase_stock,
price_sales_exchange, price_sales_stock,
component_code, part_type, eee_code
FROM servo_product""")
path = os.path.join(backupdir, 'products.csv')
write(path, header, cursor)
header = ['ID', 'PARENT_ID', 'NAME', 'PHONE', 'EMAIL',
'STREET_ADDRESS', 'POSTAL_CODE', 'CITY'
'COUNTRY', 'NOTES']
cursor.execute("""SELECT id, parent_id, name, phone,
email, street_address, zip_code, city, country, notes
FROM servo_customer""")
path = os.path.join(backupdir, 'customers.csv')
write(path, header, cursor)
path = os.path.join(backupdir, 'order_products.csv')
header = ['ID', 'PRODUCT_ID', 'ORDER_ID', 'CODE', 'TITLE',
'DESCRIPTION', 'AMOUNT', 'SERIAL_NUMBER', 'KBB_SN',
'IMEI', 'REPORTED', 'PRICE_CATEGORY', 'PRICE'
'COMPTIA_CODE', 'COMPTIA_MODIFIER']
cursor.execute("""SELECT id, product_id, order_id, code,
title, description, amount, sn, price, kbb_sn,
imei, should_report, price_category, price,
comptia_code, comptia_modifier
FROM servo_serviceorderitem""")
write(path, header, cursor)
path = os.path.join(backupdir, 'parts.csv')
header = ['ID', 'REPAIR_ID', 'ORDER_ITEM_ID',
'NUMBER', 'TITLE', 'COMPTIA_CODE', 'COMPTIA_MODIFIER',
'RETURN_ORDER', 'RETURN_STATUS', 'RETURN_CODE',
'ORDER_STATUS', 'COVERAGE', 'SHIP_TO', 'RETURNED_AT']
cursor.execute("""SELECT id, repair_id, order_item_id,
part_number, part_title, comptia_code, comptia_modifier,
return_order, return_status, return_code,
order_status, coverage_description, ship_to, returned_at
FROM servo_servicepart""")
write(path, header, cursor)
path = os.path.join(backupdir, 'order_devices.csv')
header = ['ID', 'ORDER_ID', 'DEVICE_ID', 'REPORTED']
cursor.execute("""SELECT id, order_id, device_id, should_report
FROM servo_orderdevice""")
write(path, header, cursor)
subprocess.call(['tar', '-C', backupdir, '-zcf', '%s.tar.gz' % backupdir, '.'])
shutil.rmtree(backupdir, ignore_errors=True)
|
{
"content_hash": "879a54c35912ef02387bca760b61c5ec",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 87,
"avg_line_length": 43.27536231884058,
"alnum_prop": 0.5780308104487609,
"repo_name": "fpsw/Servo",
"id": "253c4fae09929b1b494b2990b739295146b557fa",
"size": "5997",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "servo/management/commands/backup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "114750"
},
{
"name": "HTML",
"bytes": "500795"
},
{
"name": "JavaScript",
"bytes": "393764"
},
{
"name": "Makefile",
"bytes": "297"
},
{
"name": "Python",
"bytes": "1121876"
},
{
"name": "Shell",
"bytes": "680"
}
],
"symlink_target": ""
}
|
SECRET_KEY = 'dog'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
}
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'asgiref.inmemory.ChannelLayer',
'ROUTING': "tests.routing.channel_routing",
},
}
MIDDLEWARE_CLASSES = []
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'channels',
'tests'
)
|
{
"content_hash": "3dc2919f5d72d0e01e1a8e1dbd9e1723",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 51,
"avg_line_length": 17.708333333333332,
"alnum_prop": 0.5882352941176471,
"repo_name": "linuxlewis/channels-api",
"id": "866f283bfe367ffdd5fa05d27cecb272633a9afd",
"size": "425",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "694"
},
{
"name": "Python",
"bytes": "29661"
}
],
"symlink_target": ""
}
|
import os
import sys
#sys.path.append('/Users/carlomazzaferro/Documents/Code/variantannotation-master')
from variantannotation import annotate_batch
from variantannotation import myvariant_parsing_utils
from variantannotation import mongo_DB_export
from variantannotation import create_output_files
from variantannotation import utilities
from variantannotation import MongoDB_querying
#set paths
collection_name = 'Test_Normal_Targeted'
db_name = 'My_Variant_Database'
#set paths
filepath = "/Volumes/Seagate Backup Plus Drive/vcf_files/"
csv_file = "normal_targeted_seq.hg19_multianno.csv"
vcf_file = "normal_targeted_seq.vcf"
os.chdir(filepath)
#ANNOVAR_PATH = '/database/annovar/'
#IN_PATH = '/data/Nof1/file.vcf'
#OUT_PATH = '/data/ccbb_internal/interns/Carlo/annovar_results'
#1. Get csv file: run annovar
#utilities.run_annovar(ANNOVAR_PATH, IN_PATH, OUT_PATH)
#METHOD 1: by chunks, iteratively.
chunksize = 10000
step = 0
collection_name = 'ANNOVAR_MyVariant_chunks'
db_name = 'My_Variant_Database'
#Get variant list. Should always be the first step after running ANNOVAR
open_file = myvariant_parsing_utils.VariantParsing()
list_file = open_file.get_variants_from_vcf(vcf_file)
#Run process, export to MongoDB in-built
as_batch = annotate_batch.AnnotationMethods()
as_batch.by_chunks(list_file, chunksize, step, csv_file, collection_name, db_name)
#Apply filter(s).
filter_collection = MongoDB_querying.Filters(db_name, collection_name)
rare_cancer_variants = filter_collection.rare_cancer_variant()
rare_disease_variants = filter_collection.rare_disease_variant()
cadd_phred_high_impact_variants = filter_collection.rare_high_impact_variants()
#Create 4 output files: annotated vcf, annotated csv, filtered vcf, filtered csv
#Annotated vcf and csv, unfiltered. Will contain all info coming from annovar and myvariant
out_unfiltered_vcf_file = filepath + "/normal_targ_unfilterd_vcf_annotated.vcf"
out_unfiltered_csv_file = filepath + "/normal_targ_unfiltered_csv_annotated.csv"
rare_cancer_variants_csv = filepath + "/normal_targ_rare_cancer_vars.csv"
rare_cancer_variants_vcf = filepath + "/normal_targ_rare_cancer_vars.vcf"
rare_disease_variants_csv = filepath + "/normal_targ_rare_disease_vars.csv"
rare_diseasw_variants_vcf = filepath + "/normal_targ_rare_disease_vars.vcf"
cadd_phred_high_impact_variants_csv = filepath + "/normal_targ_cadd_phred_high_impact_variants.csv"
cadd_phred_high_impact_variants_vcf = filepath + "/normal_targ_cadd_phred_high_impact_variants.vcf"
in_vcf_file = filepath + "/normal_targeted_seq.vcf.gz"
#Create writer object
my_writer_1 = create_output_files.FileWriter(db_name, collection_name)
#Write collection to csv and vcf
my_writer_1.generate_unfiltered_annotated_csv(out_unfiltered_csv_file)
my_writer_1.generate_unfiltered_annotated_vcf(in_vcf_file, out_unfiltered_vcf_file)
#Crete writer object for filtered lists:
my_writer_2 = create_output_files.FileWriter(db_name, collection_name)
#cancer variants filtered files
my_writer_2.generate_annotated_csv(rare_cancer_variants, rare_cancer_variants_csv)
my_writer_2.generate_annotated_vcf(rare_cancer_variants, in_vcf_file, rare_cancer_variants_vcf)
#disease variants filtered files
my_writer_2.generate_annotated_csv(rare_disease_variants, rare_disease_variants)
my_writer_2.generate_annotated_vcf(rare_disease_variants, rare_disease_variants)
#high impact cadd_phredd filtered files
my_writer_2.generate_annotated_csv(cadd_phred_high_impact_variants, cadd_phred_high_impact_variants_csv)
my_writer_2.generate_annotated_vcf(cadd_phred_high_impact_variants, cadd_phred_high_impact_variants_vcf)
#---------------#--------------#---------------#--------------#---------------#--------------#---------------#
#METHOD 2: usign full file, and holding it in memory (OK for smaller files) ##TEST THIS##
#get variant list. Should always be the first step after running ANNOVAR
open_file = myvariant_parsing_utils.VariantParsing()
list_file = open_file.get_variants_from_vcf(vcf_file)
#Run process, data saved to joint_list
as_one_file = annotate_batch.AnnotationMethods()
joint_list = as_one_file.full_file(list_file, csv_file)
#Name Collection & DB
collection_name = 'ANNOVAR_MyVariant_full'
db_name = 'My_Variant_Database'
#Export
exporting_function = mongo_DB_export.export
exporting_function(joint_list, collection_name, db_name)
#Generate output files
out_vcf_file = filepath + "/Tumor_RNAseq_rare_variants_ANNOTATED_FULL.vcf"
out_csv_file = filepath + "/Tumor_RNAseq_rare_variants_ANNOTATED_FULL.csv"
in_vcf_file = filepath + "/Tumor_RNAseq_rare_variants_VCF.vcf"
create_output_files.generate_annotated_vcf(joint_list, in_vcf_file, out_vcf_file)
create_output_files.generate_annotated_csv(joint_list, out_csv_file)
#Filtering
#---------------#--------------#---------------#--------------#---------------#--------------#---------------#
#METHOD 3: ignore annovar, get data solely from myvariant (much faster, requires nothing but a VCF file.
#will however be incomplete (some variants will have no information).
#Get variant list form vcf file
open_file = myvariant_parsing_utils.VariantParsing()
list_file = open_file.get_variants_from_vcf(vcf_file)
#Run process
my_variants = annotate_batch.AnnotationMethods()
myvariant_data = my_variants.my_variant_at_once(list_file)
#Name Collection & DB
collection_name = 'My_Variant_Info_Collection_Full'
db_name = 'My_Variant_Database'
#Export
exporting_function = mongo_DB_export.export
exporting_function(myvariant_data, collection_name, db_name)
#---------------#--------------#---------------#--------------#---------------#--------------#---------------#
#METHOD 4: ignore annovar, Get data solely from myvariant (much faster, requires nothing but a VCF file.
#will however be incomplete (some variants will have no information).
#Do so BY CHUNKS. Export function is built in the methods myvariant_chunks
import myvariant
filepath = "/Users/carlomazzaferro/Desktop/"
vcf_file = "ShortSample.vcf"
os.chdir(filepath)
chunksize = 1000
step = 0
#Get variant list from vcf file
open_file = myvariant_parsing_utils.VariantParsing()
list_ids = list(myvariant.get_hgvs_from_vcf(vcf_file))
list_file = open_file.get_variants_from_vcf(vcf_file)
#Name Collection & DB
collection_name = 'My_Variant_Info_Collection_Chunks'
db_name = 'My_Variant_Database'
#Run process, export to MongoDB in-built
my_variants = annotate_batch.AnnotationMethods()
myvariant_data = my_variants.myvariant_chunks(list_file, chunksize, step, collection_name, db_name)
out_vcf_file = filepath + "/Tumor_RNAseq_rare_variants_ANNOTATED_MYV_FULL.vcf"
out_csv_file = filepath + "/Tumor_RNAseq_rare_variants_ANNOTATED_MyV_FULL.csv"
in_vcf_file = filepath + "/Tumor_RNAseq_rare_variants_VCF.vcf"
create_output_files.generate_annotated_vcf(myvariant_data, in_vcf_file, out_vcf_file)
create_output_files.generate_annotated_csv(myvariant_data, out_csv_file)
########DEBUG#########
import os
collection_name = 'Test_Normal_Targeted'
db_name = 'My_Variant_Database'
#set paths
filepath = "/Volumes/Seagate Backup Plus Drive/vcf_files"
csv_file = "normal_targeted_seq.hg19_multianno.csv"
vcf_file = "normal_targeted_seq.vcf"
os.chdir(filepath)
from variantannotation import myvariant_parsing_utils
from variantannotation import csv_to_df
from variantannotation import annovar_processing
from variantannotation import utilities
open_file = myvariant_parsing_utils.VariantParsing()
list_file = open_file.get_variants_from_vcf(vcf_file)
df = csv_to_df.parse_to_df(csv_to_df.open_and_parse(csv_file))
list1 = annovar_processing.get_list_from_annovar_csv(df, list_file[0:5000])
open_file = myvariant_parsing_utils.VariantParsing()
from_myvariant = open_file.get_dict_myvariant(list_file[0:5000])
utilities.final_joint(list1, from_myvariant)
joined_list = list1
from pymongo import MongoClient
client = MongoClient()
db = client.My_Variant_Database
collection = db.Test_Normal_Targeted
all_my_data = list(collection.find({}))
chr_vars = []
location_vars_ant = []
location_vars_pos = []
for i in range(0, len(all_my_data)):
if all_my_data[i]['Chr'] == 'chrMT':
chr_vars.append('chrM')
else:
chr_vars.append(all_my_data[i]['Chr'].encode('ascii','ignore'))
location_vars_ant.append(all_my_data[i]['Start'] + 1)
location_vars_pos.append(all_my_data[i]['Start'] - 1)
import vcf
in_vcf_file = filepath + "/somatic_mutect_old.vcf"
vcf_output_path = "/Users/carlomazzaferro/Desktop/test.vcf"
vcf_reader = vcf.Reader(filename=in_vcf_file)
vcf_writer = vcf.Writer(open(vcf_output_path, 'w'), vcf_reader)
for i in range(0, len(chr_vars)):
for record in vcf_reader.fetch(chr_vars[i], location_vars_pos[i], location_vars_ant[i]):
record.INFO.update(joined_list[i])
vcf_writer.write_record(record)
|
{
"content_hash": "817eed5fb0f004dd2863b8cb35fb7c69",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 110,
"avg_line_length": 36.205761316872426,
"alnum_prop": 0.7377813139349853,
"repo_name": "ucsd-ccbb/jupyter-genomics",
"id": "c2659dfd144eacab6cef4bc2cd38823362cb4dd2",
"size": "8798",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/dnaSeq/VAPr/variantannotation/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "254329"
},
{
"name": "Java",
"bytes": "278021"
},
{
"name": "Jupyter Notebook",
"bytes": "19771596"
},
{
"name": "Perl",
"bytes": "14052"
},
{
"name": "Python",
"bytes": "428899"
},
{
"name": "R",
"bytes": "6817"
},
{
"name": "Shell",
"bytes": "37476"
}
],
"symlink_target": ""
}
|
"""Public ops that allow FFmpeg encoding and decoding operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.ffmpeg.ops import gen_decode_audio_op_py
from tensorflow.contrib.ffmpeg.ops import gen_encode_audio_op_py
from tensorflow.python.framework import load_library
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import resource_loader
@ops.RegisterShape('DecodeAudio')
def _decode_audio_shape(op):
"""Computes the shape of a DecodeAudio operation.
Args:
op: A DecodeAudio operation.
Returns:
A list of output shapes. There's exactly one output, the sampled audio.
This is a rank 2 tensor with an unknown number of samples and a
known number of channels.
"""
try:
channels = op.get_attr('channel_count')
except ValueError:
channels = None
return [tensor_shape.TensorShape([None, channels])]
def decode_audio(contents, file_format=None, samples_per_second=None,
channel_count=None):
"""Create an op that decodes the contents of an audio file.
Args:
contents: The binary contents of the audio file to decode. This is a
scalar.
file_format: A string specifying which format the contents will conform
to. This can be mp3, ogg, or wav.
samples_per_second: The number of samples per second that is assumed.
In some cases, resampling will occur to generate the correct sample
rate.
channel_count: The number of channels that should be created from the
audio contents. If the contents have more than this number, then
some channels will be merged or dropped. If contents has fewer than
this, then additional channels will be created from the existing ones.
Returns:
A rank 2 tensor that has time along dimension 0 and channels along
dimension 1. Dimension 0 will be `samples_per_second * length` wide, and
dimension 1 will be `channel_count` wide.
"""
return gen_decode_audio_op_py.decode_audio(
contents, file_format=file_format, samples_per_second=samples_per_second,
channel_count=channel_count)
ops.NoGradient('DecodeAudio')
@ops.RegisterShape('EncodeAudio')
def _encode_audio_shape(unused_op):
"""Computes the shape of an EncodeAudio operation.
Returns:
A list of output shapes. There's exactly one output, the formatted audio
file. This is a rank 0 tensor.
"""
return [tensor_shape.TensorShape([])]
def encode_audio(audio, file_format=None, samples_per_second=None):
"""Creates an op that encodes an audio file using sampled audio from a tensor.
Args:
audio: A rank 2 tensor that has time along dimension 0 and channels along
dimension 1. Dimension 0 is `samples_per_second * length` long in
seconds.
file_format: The type of file to encode. "wav" is the only supported format.
samples_per_second: The number of samples in the audio tensor per second of
audio.
Returns:
A scalar tensor that contains the encoded audio in the specified file
format.
"""
return gen_encode_audio_op_py.encode_audio(
audio, file_format=file_format, samples_per_second=samples_per_second)
ops.NoGradient('EncodeAudio')
def _load_library(name, op_list=None):
"""Loads a .so file containing the specified operators.
Args:
name: The name of the .so file to load.
op_list: A list of names of operators that the library should have. If None
then the .so file's contents will not be verified.
Raises:
NameError if one of the required ops is missing.
"""
filename = resource_loader.get_path_to_datafile(name)
library = load_library.load_op_library(filename)
for expected_op in (op_list or []):
for lib_op in library.OP_LIST.op:
if lib_op.name == expected_op:
break
else:
raise NameError('Could not find operator %s in dynamic library %s' %
(expected_op, name))
_load_library('ffmpeg.so', ['DecodeAudio', 'EncodeAudio'])
|
{
"content_hash": "a0942b20d150fb361d8ea9946259bf25",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 80,
"avg_line_length": 34.57142857142857,
"alnum_prop": 0.7119591638308216,
"repo_name": "shishaochen/TensorFlow-0.8-Win",
"id": "cdb8006e6b90c1942fdf79b4ccf710376ae18c65",
"size": "4791",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/ffmpeg/ffmpeg_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "33878"
},
{
"name": "C",
"bytes": "1390259"
},
{
"name": "C#",
"bytes": "1900628"
},
{
"name": "C++",
"bytes": "28129535"
},
{
"name": "CMake",
"bytes": "417657"
},
{
"name": "CSS",
"bytes": "1297"
},
{
"name": "Emacs Lisp",
"bytes": "7809"
},
{
"name": "GCC Machine Description",
"bytes": "1"
},
{
"name": "Go",
"bytes": "8549"
},
{
"name": "Groff",
"bytes": "1272396"
},
{
"name": "HTML",
"bytes": "849000"
},
{
"name": "Java",
"bytes": "3139664"
},
{
"name": "JavaScript",
"bytes": "417956"
},
{
"name": "Jupyter Notebook",
"bytes": "1772913"
},
{
"name": "M4",
"bytes": "78386"
},
{
"name": "Makefile",
"bytes": "1177180"
},
{
"name": "Objective-C",
"bytes": "2580186"
},
{
"name": "Objective-C++",
"bytes": "2897"
},
{
"name": "PHP",
"bytes": "342"
},
{
"name": "Protocol Buffer",
"bytes": "924786"
},
{
"name": "Python",
"bytes": "8241830"
},
{
"name": "Ruby",
"bytes": "82233"
},
{
"name": "Shell",
"bytes": "1875702"
},
{
"name": "Swift",
"bytes": "20550"
},
{
"name": "TypeScript",
"bytes": "395532"
},
{
"name": "VimL",
"bytes": "3759"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from chaco.tools.toolbars.toolbar_buttons import *
|
{
"content_hash": "622418139b2c9d4b3741723b46f37f4e",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 50,
"avg_line_length": 45,
"alnum_prop": 0.8,
"repo_name": "enthought/etsproxy",
"id": "a5799c05904469daa6e9a3c1a7af9adbbaaf6cc9",
"size": "105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enthought/chaco/tools/toolbars/toolbar_buttons.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "363714"
}
],
"symlink_target": ""
}
|
"""Color-mod."""
import re
from mdpopups.coloraide import Color as ColorCSS
from mdpopups.coloraide import ColorMatch
from mdpopups.coloraide.spaces import _parse
from mdpopups.coloraide import util
import functools
import math
WHITE = [1.0] * 3
BLACK = [0.0] * 3
TOKENS = {
"units": re.compile(
r"""(?xi)
# Some number of units separated by valid separators
(?:
{float} |
{angle} |
{percent} |
\#(?:{hex}{{6}}(?:{hex}{{2}})?|{hex}{{3}}(?:{hex})?) |
[\w][\w\d]*
)
""".format(**_parse.COLOR_PARTS)
),
"functions": re.compile(r'(?i)[\w][\w\d]*\('),
"separators": re.compile(r'(?:{comma}|{space}|{slash})'.format(**_parse.COLOR_PARTS))
}
RE_ADJUSTERS = {
"alpha": re.compile(
r'(?i)\s+a(?:lpha)?\(\s*(?:(\+\s+|\-\s+)?({percent}|{float})|(\*)?\s*({percent}|{float}))\s*\)'.format(
**_parse.COLOR_PARTS
)
),
"saturation": re.compile(
r'(?i)\s+s(?:aturation)?\((\+\s|\-\s|\*)?\s*({percent})\s*\)'.format(**_parse.COLOR_PARTS)
),
"lightness": re.compile(r'(?i)\s+l(?:ightness)?\((\+\s|\-\s|\*)?\s*({percent})\s*\)'.format(**_parse.COLOR_PARTS)),
"min-contrast_start": re.compile(r'(?i)\s+min-contrast\(\s*'),
"blend_start": re.compile(r'(?i)\s+blenda?\(\s*'),
"end": re.compile(r'(?i)\s*\)')
}
RE_HUE = re.compile(r'(?i){angle}'.format(**_parse.COLOR_PARTS))
RE_COLOR_START = re.compile(r'(?i)color\(\s*')
RE_BLEND_END = re.compile(r'(?i)\s+({percent})(?:\s+(rgb|hsl|hwb))?\s*\)'.format(**_parse.COLOR_PARTS))
RE_BRACKETS = re.compile(r'(?:(\()|(\))|[^()]+)')
RE_MIN_CONTRAST_END = re.compile(r'(?i)\s+({float})\s*\)'.format(**_parse.COLOR_PARTS))
RE_VARS = re.compile(r'(?i)(?:(?<=^)|(?<=[\s\t\(,/]))(var\(\s*([-\w][-\w\d]*)\s*\))(?!\()(?=[\s\t\),/]|$)')
def bracket_match(match, string, start, fullmatch):
"""
Make sure we can acquire a complete `func()` before we replace variables.
We mainly do this so we can judge the real size before we alter the string with variables.
"""
end = None
if match.match(string, start):
brackets = 1
for m in RE_BRACKETS.finditer(string, start + 6):
if m.group(2):
brackets -= 1
elif m.group(1):
brackets += 1
if brackets == 0:
end = m.end(2)
break
return end if (not fullmatch or end == len(string)) else None
def validate_vars(var, good_vars):
"""
Validate variables.
We will blindly replace values, but if we are fairly confident they follow
the pattern of a valid, complete unit, if you replace them in a bad place,
it will break the color (as it should) and if not, it is likely to parse fine,
unless it breaks the syntax of the color being evaluated.
"""
for k, v in var.items():
v = v.strip()
start = 0
need_sep = False
length = len(v)
while True:
if start == length:
good_vars[k] = v
break
try:
# Each item should be separated by some valid separator
if need_sep:
m = TOKENS["separators"].match(v, start)
if m:
start = m.end(0)
need_sep = False
continue
else:
break
# Validate things like `rgb()`, `contrast()` etc.
m = TOKENS["functions"].match(v, start)
if m:
end = None
brackets = 1
for m in RE_BRACKETS.finditer(v, start + 6):
if m.group(2):
brackets -= 1
elif m.group(1):
brackets += 1
if brackets == 0:
end = m.end(0)
break
if end is None:
break
start = end
need_sep = True
continue
# Validate that units such as percents, floats, hex colors, etc.
m = TOKENS["units"].match(v, start)
if m:
start = m.end(0)
need_sep = True
continue
break
except Exception:
break
def _var_replace(m, var=None, parents=None):
"""Replace variables but try to prevent infinite recursion."""
name = m.group(2)
replacement = var.get(m.group(2))
string = replacement if replacement and name not in parents is not None else ""
parents.add(name)
return RE_VARS.sub(functools.partial(_var_replace, var=var, parents=parents), string)
def handle_vars(string, variables, parents=None):
"""Handle CSS variables."""
temp_vars = {}
validate_vars(variables, temp_vars)
parent_vars = set() if parents is None else parents
return RE_VARS.sub(functools.partial(_var_replace, var=temp_vars, parents=parent_vars), string)
class ColorMod:
"""Color utilities."""
def __init__(self, fullmatch=True):
"""Associate with parent."""
self.OP_MAP = {
"": self._op_null,
"*": self._op_mult,
"+": self._op_add,
"-": self._op_sub
}
self.adjusting = False
self._color = None
self.fullmatch = fullmatch
@staticmethod
def _op_mult(a, b):
"""Multiply."""
return a * b
@staticmethod
def _op_add(a, b):
"""Multiply."""
return a + b
@staticmethod
def _op_sub(a, b):
"""Multiply."""
return a - b
@staticmethod
def _op_null(a, b):
"""Multiply."""
return b
def _adjust(self, string, start=0):
"""Adjust."""
nested = self.adjusting
self.adjusting = True
color = None
done = False
old_parent = self._color
hue = None
try:
m = RE_COLOR_START.match(string, start)
if m:
start = m.end(0)
m = RE_HUE.match(string, start)
if m:
hue = _parse.norm_angle(m.group(0))
color = Color("hsl", [hue, 1, 0.5]).convert("srgb")
start = m.end(0)
if color is None:
m = RE_COLOR_START.match(string, start)
if m:
color2, start = self._adjust(string, start=start)
if color2 is None:
raise ValueError("Found unterminated or invalid 'color('")
color = color2.convert("srgb")
if not color.is_nan("hsl.hue"):
hue = color.get("hsl.hue")
if color is None:
obj = Color.match(string, start=start, fullmatch=False)
if obj is not None:
color = obj.color
if color.space != "srgb":
color = color.convert("srgb")
if not color.is_nan("hsl.hue"):
hue = color.get("hsl.hue")
start = obj.end
if color is not None:
self._color = color
self._color.fit(method="clip", in_place=True)
while not done:
m = None
name = None
for key, pattern in RE_ADJUSTERS.items():
name = key
m = pattern.match(string, start)
if m:
start = m.end(0)
break
if m is None:
break
if name == "alpha":
start, hue = self.process_alpha(m, hue)
elif name in ("saturation", "lightness"):
start, hue = self.process_hwb_hsl_channels(name, m, hue)
elif name == "min-contrast_start":
start, hue = self.process_min_contrast(m, string, hue)
elif name == "blend_start":
start, hue = self.process_blend(m, string, hue)
elif name == "end":
done = True
start = m.end(0)
else:
break
self._color.fit(method="clip", in_place=True)
else:
raise ValueError('Could not calculate base color')
except Exception:
pass
if not done or (self.fullmatch and start != len(string)):
result = None
else:
result = self._color
self._color = old_parent
if not nested:
self.adjusting = False
return result, start
def adjust_base(self, base, string):
"""Adjust base."""
self._color = base
pattern = "color({} {})".format(self._color.fit(method="clip").to_string(precision=-1), string)
color, start = self._adjust(pattern)
if color is not None:
self._color.update(color)
else:
raise ValueError(
"'{}' doesn't appear to be a valid and/or supported CSS color or color-mod instruction".format(string)
)
def adjust(self, string, start=0):
"""Adjust."""
color, end = self._adjust(string, start=start)
return color, end
def process_alpha(self, m, hue):
"""Process alpha."""
if m.group(2):
value = m.group(2)
else:
value = m.group(4)
if value.endswith('%'):
value = float(value.strip('%')) * _parse.SCALE_PERCENT
else:
value = float(value)
op = ""
if m.group(1):
op = m.group(1).strip()
elif m.group(3):
op = m.group(3).strip()
self.alpha(value, op=op)
return m.end(0), hue
def process_hwb_hsl_channels(self, name, m, hue):
"""Process HWB and HSL channels (except hue)."""
value = m.group(2)
value = float(value.strip('%'))
op = m.group(1).strip() if m.group(1) else ""
getattr(self, name)(value, op=op, hue=hue)
if not self._color.is_nan("hsl.hue"):
hue = self._color.get("hsl.hue")
return m.end(0), hue
def process_blend(self, m, string, hue):
"""Process blend."""
start = m.end(0)
alpha = m.group(0).strip().startswith('blenda')
m = RE_COLOR_START.match(string, start)
if m:
color2, start = self._adjust(string, start=start)
if color2 is None:
raise ValueError("Found unterminated or invalid 'color('")
else:
color2 = None
obj = Color.match(string, start=start, fullmatch=False)
if obj is not None:
color2 = obj.color
start = obj.end
if color2 is None:
raise ValueError("Could not find a valid color for 'blend'")
m = RE_BLEND_END.match(string, start)
if m:
value = float(m.group(1).strip('%')) * _parse.SCALE_PERCENT
space = "srgb"
if m.group(2):
space = m.group(2).lower()
if space == "rgb":
space = "srgb"
start = m.end(0)
else:
raise ValueError("Found unterminated or invalid 'blend('")
value = util.clamp(value, 0.0, 1.0)
self.blend(color2, 1.0 - value, alpha, space=space)
if not self._color.is_nan("hsl.hue"):
hue = self._color.get("hsl.hue")
return start, hue
def process_min_contrast(self, m, string, hue):
"""Process blend."""
# Gather the min-contrast parameters
start = m.end(0)
m = RE_COLOR_START.match(string, start)
if m:
color2, start = self._adjust(string, start=start)
if color2 is None:
raise ValueError("Found unterminated or invalid 'color('")
else:
color2 = None
obj = Color.match(string, start=start, fullmatch=False)
if obj is not None:
color2 = obj.color
start = obj.end
m = RE_MIN_CONTRAST_END.match(string, start)
if m:
value = float(m.group(1))
start = m.end(0)
else:
raise ValueError("Found unterminated or invalid 'min-contrast('")
this = self._color.convert("srgb")
color2 = color2.convert("srgb")
color2.alpha = 1.0
self.min_contrast(this, color2, value)
self._color.update(this)
if not self._color.is_nan("hsl.hue"):
hue = self._color.get("hsl.hue")
return start, hue
def min_contrast(self, color1, color2, target):
"""
Get the color with the best contrast.
This mimics Sublime Text's custom `min-contrast` for `color-mod` (now defunct - the CSS version).
It ensure the color has at least the specified contrast ratio.
While there seems to be slight differences with ours and Sublime, maybe due to some rounding,
this essentially fulfills the intention of their min-contrast.
"""
ratio = color1.contrast(color2)
# Already meet the minimum contrast or the request is impossible
if ratio > target or target < 1:
return
lum2 = color2.luminance()
is_dark = lum2 < 0.5
orig = color1.convert("hwb")
if is_dark:
primary = "whiteness"
secondary = "blackness"
min_mix = orig.whiteness
max_mix = 100.0
else:
primary = "blackness"
secondary = "whiteness"
min_mix = orig.blackness
max_mix = 100.0
orig_ratio = ratio
last_ratio = 0
last_mix = 0
last_other = 0
temp = orig.clone()
while abs(min_mix - max_mix) > 0.2:
mid_mix = round((max_mix + min_mix) / 2, 1)
mid_other = (
orig.get(secondary) -
((mid_mix - orig.get(primary)) / (100.0 - orig.get(primary))) * orig.get(secondary)
)
temp.set(primary, mid_mix)
temp.set(secondary, mid_other)
ratio = temp.contrast(color2)
if ratio < target:
min_mix = mid_mix
else:
max_mix = mid_mix
if (
(last_ratio < target and ratio > last_ratio) or
(ratio > target and ratio < last_ratio)
):
last_ratio = ratio
last_mix = mid_mix
last_other = mid_other
# Can't find a better color
if last_ratio < ratio and orig_ratio > last_ratio:
return
# Use the best, last values
final = orig.new("hwb", [orig.hue, last_mix, last_other] if is_dark else [orig.hue, last_other, last_mix])
final = final.convert('srgb')
# If we are lightening the color, then we'd like to round up to ensure we are over the luminance threshold
# as sRGB will clip off decimals. If we are darkening, then we want to just floor the values as the algorithm
# leans more to the light side.
rnd = util.round_half_up if is_dark else math.floor
final = Color("srgb", [rnd(c * 255.0) / 255.0 for c in final.coords()], final.alpha)
color1.update(final)
def blend(self, color, percent, alpha=False, space="srgb"):
"""Blend color."""
space = space.lower()
if space not in ("srgb", "hsl", "hwb"):
raise ValueError(
"ColorMod's does not support the '{}' colorspace, only 'srgb', 'hsl', and 'hwb' are supported"
).format(space)
this = self._color.convert(space) if self._color.space() != space else self._color
if color.space() != space:
color.convert(space, in_place=True)
new_color = this.mix(color, percent, space=space)
if not alpha:
new_color.alpha = color.alpha
self._color.update(new_color)
def alpha(self, value, op=""):
"""Alpha."""
this = self._color
op = self.OP_MAP.get(op, self._op_null)
this.alpha = op(this.alpha, value)
self._color.update(this)
def lightness(self, value, op="", hue=None):
"""Lightness."""
this = self._color.convert("hsl") if self._color.space() != "hsl" else self._color
if this.is_nan('hue') and hue is not None:
this.hue = hue
op = self.OP_MAP.get(op, self._op_null)
this.lightness = op(this.lightness, value)
self._color.update(this)
def saturation(self, value, op="", hue=None):
"""Saturation."""
this = self._color.convert("hsl") if self._color.space() != "hsl" else self._color
if this.is_nan("hue") and hue is not None:
this.hue = hue
op = self.OP_MAP.get(op, self._op_null)
this.saturation = op(this.saturation, value)
self._color.update(this)
class Color(ColorCSS):
"""Color modify class."""
def __init__(self, color, data=None, alpha=util.DEF_ALPHA, *, filters=None, variables=None, **kwargs):
"""Initialize."""
super().__init__(color, data, alpha, filters=None, variables=variables, **kwargs)
def _parse(self, color, data=None, alpha=util.DEF_ALPHA, filters=None, variables=None, **kwargs):
"""Parse the color."""
obj = None
if data is not None:
filters = set(filters) if filters is not None else set()
for space, space_class in self.CS_MAP.items():
s = color.lower()
if space == s and (not filters or s in filters):
obj = space_class(data[:space_class.NUM_COLOR_CHANNELS], alpha)
return obj
elif isinstance(color, ColorCSS):
if not filters or color.space() in filters:
obj = self.CS_MAP[color.space()](color._space)
else:
m = self._match(color, fullmatch=True, filters=filters, variables=variables)
if m is None:
raise ValueError("'{}' is not a valid color".format(color))
obj = m.color
if obj is None:
raise ValueError("Could not process the provided color")
return obj
@classmethod
def _match(cls, string, start=0, fullmatch=False, filters=None, variables=None):
"""
Match a color in a buffer and return a color object.
This must return the color space, not the Color object.
"""
# Handle variable
end = None
is_mod = False
if variables:
m = RE_VARS.match(string, start)
if m and (not fullmatch or len(string) == m.end(0)):
end = m.end(0)
start = 0
string = string[start:end]
string = handle_vars(string, variables)
variables = None
temp = bracket_match(RE_COLOR_START, string, start, fullmatch)
if end is None and temp:
end = temp
is_mod = True
elif end is not None and temp is not None:
is_mod = True
if is_mod:
if variables:
string = handle_vars(string, variables)
obj, match_end = ColorMod(fullmatch).adjust(string, start)
if obj is not None:
return ColorMatch(obj._space, start, end if end is not None else match_end)
else:
filters = set(filters) if filters is not None else set()
obj = None
for space, space_class in cls.CS_MAP.items():
if filters and space not in filters:
continue
value, match_end = space_class.match(string, start, fullmatch)
if value is not None:
color = space_class(*value)
obj = ColorMatch(color, start, match_end)
if obj is not None and end:
obj.end = end
return obj
@classmethod
def match(cls, string, start=0, fullmatch=False, *, filters=None, variables=None):
"""Match color."""
obj = cls._match(string, start, fullmatch, filters=filters, variables=variables)
if obj is not None:
obj.color = cls(obj.color.space(), obj.color.coords(), obj.color.alpha)
return obj
def new(self, color, data=None, alpha=util.DEF_ALPHA, *, filters=None, variables=None, **kwargs):
"""Create new color object."""
return type(self)(color, data, alpha, filters=filters, variables=variables, **kwargs)
def update(self, color, data=None, alpha=util.DEF_ALPHA, *, filters=None, variables=None, **kwargs):
"""Update the existing color space with the provided color."""
clone = self.clone()
obj = self._parse(color, data, alpha, filters=filters, variables=variables, **kwargs)
clone._attach(obj)
if clone.space() != self.space():
clone.convert(self.space(), in_place=True)
self._attach(clone._space)
return self
def mutate(self, color, data=None, alpha=util.DEF_ALPHA, *, filters=None, variables=None, **kwargs):
"""Mutate the current color to a new color."""
self._attach(self._parse(color, data, alpha, filters=filters, variables=variables, **kwargs))
return self
|
{
"content_hash": "0e605eadb71372a698886161743bb2e8",
"timestamp": "",
"source": "github",
"line_count": 632,
"max_line_length": 119,
"avg_line_length": 34.6376582278481,
"alnum_prop": 0.5092503768672058,
"repo_name": "facelessuser/ThemeTweaker",
"id": "1018d51cb70008ff96fa51e9a926363f9337cc2d",
"size": "21891",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/tmtheme.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "133381"
}
],
"symlink_target": ""
}
|
import glob
import time
import stat
import math
import linecache
import os
import sys
import subprocess
from fabric.operations import run, put
from fabric.api import env,run,hide,settings
from fabric.context_managers import shell_env
from fabric.operations import put
import shutil
import datetime
#==========================
def s3_to_ebs(IP,keypair,bucketname,dironebs,rclonepath,keyid,secretid,region,numfilesAtATime):
#Copy rclone onto instance
cmd='scp -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i %s %s ubuntu@%s:~/'%(keypair,rclonepath,IP)
subprocess.Popen(cmd,shell=True).wait()
#Write rclone config file
homedir='/home/ubuntu/'
rclonename='ebss3'
if os.path.exists('.rclone.conf'):
os.remove('.rclone.conf')
r1=open('.rclone.conf','w')
r1.write('[rclonename]\n')
r1.write('type = s3\n')
r1.write('env_auth = false\n')
r1.write('access_key_id = %s\n' %(keyid))
r1.write('secret_access_key = %s\n' %(secretid))
r1.write('region = %s\n' %(region))
r1.write('endpoint = \n')
if region == 'us-east-1':
r1.write('location_constraint = \n')
else:
r1.write('location_constraint = %s\n' %(region))
r1.write('acl = authenticated-read\n')
r1.write('server_side_encryption = \n')
r1.write('storage_class = STANDARD\n')
r1.close()
cmd='scp -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i %s .rclone.conf ubuntu@%s:~/.rclone.conf' %(keypair,IP)
subprocess.Popen(cmd,shell=True).wait()
#Copy data down
env.host_string='ubuntu@%s' %(IP)
env.key_filename = '%s' %(keypair)
rcloneexe='rclone'
exec_remote_cmd('%s/%s copy rclonename:%s %s --max-size 1G --quiet --transfers %i' %(homedir,rcloneexe,bucketname.split('s3://')[-1],dironebs,numfilesAtATime))
fileonly=dironebs.split('/')[-1]
if dironebs.split('.')[-1] == 'mrcs' or dironebs.split('.')[-1] == 'spi':
exec_remote_cmd('mv %s/%s tmp.mrcs' %(dironebs,fileonly))
exec_remote_cmd('rm -rf /%s/' %(dironebs))
#exec_remote_cmd('mv tmp.mrcs /data/%s' %(fileonly))
exec_remote_cmd('mv tmp.mrcs /%s' %(fileonly))
#=========================
def rclone_to_s3(indir,numfiles,region,keyid,secretid,rclonename,bucketname,awspath,project,rclonelist,outdir):
if subprocess.Popen('uname',shell=True, stdout=subprocess.PIPE).stdout.read().strip() == 'Linux':
rclonepath='%s/rclone' %(awspath)
if subprocess.Popen('uname',shell=True, stdout=subprocess.PIPE).stdout.read().strip() == 'Darwin':
rclonepath='%s/rclone_mac'%(awspath)
#Write .rclone.conf
if os.path.exists('%s/.rclone.conf' %(outdir)):
os.remove('%s/.rclone.conf' %(outdir))
print ('Region = %s\n'%(region))
print ('[rclonename]\n')
print ('type = s3\n')
print ('env_auth = false\n')
print ('access_key_id = %s\n' %(keyid))
print ('secret_access_key = %s\n' %(secretid[:5]+'****************************************'))
print ('region = %s\n' %(region))
r1=open('%s/.rclone.conf' %(outdir),'w')
r1.write('[rclonename]\n')
r1.write('type = s3\n')
r1.write('env_auth = false\n')
r1.write('access_key_id = %s\n' %(keyid))
r1.write('secret_access_key = %s\n' %(secretid))
r1.write('region = %s\n' %(region))
r1.write('endpoint = \n')
if region == 'us-east-1':
r1.write('location_constraint = \n')
print('location_constraint = \n')
else:
r1.write('location_constraint = %s\n' %(region))
print('location_constraint = %s\n' %(region))
r1.write('acl = authenticated-read\n')
r1.write('server_side_encryption = \n')
r1.write('storage_class = STANDARD\n')
r1.close()
#Create bucket on aws:
#if len(project) == 0:
# cmd='aws s3 mb s3://%s --region %s > s3.log' %(bucketname,region)
# subprocess.Popen(cmd,shell=True).wait()
# os.remove('s3.log')
if len(rclonelist) == 0:
cmd='%s copy %s rclonename:%s --quiet --config %s/.rclone.conf --transfers %i > rclone.log' %(rclonepath,indir,bucketname,outdir,math.ceil(numfiles))
subprocess.Popen(cmd,shell=True).wait()
if len(rclonelist) > 0:
cmd='%s copy %s rclonename:%s --quiet --config %s/.rclone.conf --transfers %i --include-from %s > rclone.log' %(rclonepath,indir,bucketname,outdir,math.ceil(numfiles),rclonelist)
subprocess.Popen(cmd,shell=True).wait()
os.remove('rclone.log')
return 's3://%s' %(bucketname)
#====================
def exec_remote_cmd(cmd):
from fabric.operations import run, put
from fabric.api import hide,settings
with hide('output','running','warnings'):
with settings(warn_only=True):
return run(cmd)
#==============================
def writeToLog(msg,outfile):
cmd='echo '' >> %s' %(outfile)
subprocess.Popen(cmd,shell=True).wait()
cmd='echo "%s" >> %s' %(msg,outfile)
subprocess.Popen(cmd,shell=True).wait()
#==============================
def getCMDrefine(rlncmd):
#o1=open(f1,'r')
#for line in o1:
# if len(line.split('=')) > 0:
# if line.split('=')[0] == 'relioncmd':
# rlncmd=line.split('=')[1]
#o1.close()
#Get particle input directory and if there is a reference model
outbasenanme=''
continuecounter=-1
indircounter=-1
refcounter=-1
outcounter=-1
autoref=-1
counter=1
itercounter=0
numiters=0
mask=''
maskcounter=-1
ref='None'
stack=False
partstarname=''
partdir=''
contLocation=''
particlediameter=-999
partdiamcounter=-1
for l in rlncmd.split():
if l == '--i':
indircounter=counter
if l == '--ref':
refcounter=counter
if l == '--o':
outcounter=counter
if l == '--auto_refine':
autoref=counter
if l == '--iter':
itercounter=counter
if l == '--solvent_mask':
maskcounter=counter
if l == '--continue':
continuecounter=counter
if l == '--particle_diameter':
partdiamcounter=counter
counter=counter+1
if indircounter > 0:
partstarname=rlncmd.split()[indircounter].split('/')[-1]
if '.star' not in partstarname:
stack=True
partdir=rlncmd.split()[indircounter].split('/')
del partdir[-1]
partdir='/'.join(partdir)
if partdiamcounter > 0:
diamlength=len(repr(rlncmd.split()[partdiamcounter].strip()))
if diamlength>10:
particlediameter=float(repr(rlncmd.split()[partdiamcounter].strip())[1:-9])
if diamlength<=10:
particlediameter=float(rlncmd.split()[partdiamcounter].strip())
#Since appions command comes with this formatting: 234.0\xc2\xa0
outbasename=rlncmd.split()[outcounter]
outdir=rlncmd.split()[outcounter].split('/')
del outdir[-1]
outdir='/'.join(outdir)
if itercounter > 0:
numiterslength=len(repr(rlncmd.split()[itercounter].strip()))
if numiterslength>8:
numiters=int(repr(rlncmd.split()[itercounter].strip())[1:-9])
if numiterslength<=8:
numiters=int(rlncmd.split()[itercounter].strip())
if refcounter > 0:
ref=rlncmd.split()[refcounter]
if maskcounter > 0:
mask=rlncmd.split()[maskcounter]
if continuecounter > 0:
contLocation=rlncmd.split()[continuecounter]
return rlncmd,partdir,ref,outdir,autoref,numiters,partstarname,mask,stack,contLocation,outbasename,particlediameter
#==============================
def checkPartLocation(instarfile,indir):
otherPartDir=''
otherPartRclone=[]
error=''
o44=open(instarfile,'r')
for line in o44:
if len(line) > 0:
if 'data' not in line:
if '_rln' in line:
if line.split()[0] == '_rlnImageName':
print(line)
if len(line.split('#')) > 1:
imagecolnum=int(line.split('#')[-1])
else:
imagecolnum=1
if line.split()[0] == '_rlnMicrographName':
if len(line.split('#')) > 1:
microcolnum=int(line.split('#')[-1])
else:
microcolnum=2
o44.close()
if microcolnum == 0:
error='Could not find _rlnImageName in starfile %s' %(instarfile)
if microcolnum != 0:
o44=open(instarfile,'r')
for line in o44:
if not line.startswith("#"):
if len(line.split()) > 0:
if 'data' not in line:
if '_rln' not in line:
if 'loop_' not in line:
part=line.split()[imagecolnum-1].split('@')[-1]
starfile=''
if not os.path.exists(part):
error='Error: particle stack %s does not exist.' %(part)
if os.path.exists('%s_extract.star' %(part[:-5])):
starfile='%s_extract.star' %(part[:-5])
InIt=False
if indir in part:
InIt=True
if InIt is False:
if len(part.split('/')) == 5:
otherPartDir=part.split('/')[0]+'/'+part.split('/')[1]+'/'
tmpline=part.split('/')[2]+'/'+part.split('/')[3]+'/'+part.split('/')[4]
tmpline=tmpline.replace('//','/')
otherPartRclone.append(tmpline)
if len(part.split('/')) == 4:
otherPartDir=part.split('/')[0]+'/'+part.split('/')[1]+'/'
otherPartRclone.append(part.split('/')[2]+'/'+part.split('/')[3])
if len(part.split('/')) == 3:
otherPartDir=part.split('/')[0]+'/'+part.split('/')[1]+'/'
otherPartRclone.append(part.split('/')[2])
'''
checkdir=part.split(micro)[0]
if checkdir[-1] == '/':
checkdir=checkdir[:-1]
if checkdir != indir:
otherPartDir=checkdir
if micro not in otherPartRclone:
partfilename=part.split('/')[-1]
micdironly=micro.split('/')
del micdironly[-1]
micdironly='/'.join(micdironly)
otherPartRclone.append('%s/%s' %(micdironly,partfilename))
if len(starfile) > 0:
instarfile='%s/%s_extract.star' %(micdironly,partfilename[:-5])
if instarfile not in otherPartRclone:
otherPartRclone.append(instarfile)
'''
o44.close()
return otherPartDir,otherPartRclone,error
#==============================
def parseCMDrefine(relioncmd):
l=relioncmd.split()
newcmd=[]
tot=len(l)
counter=0
selectflag=''
while counter < tot:
if 'mpirun' in l[counter]:
counter=counter+1
continue
if l[counter] == '-np':
counter=counter+2
continue
if l[counter] == '--preread_images':
counter=counter+1
continue
if l[counter] == '--pool':
counter=counter+2
continue
if l[counter] == '`which':
counter=counter+1
continue
if l[counter] == 'relion_refine_mpi`':
counter=counter+1
continue
if l[counter] == 'relion_refine`':
counter=counter+1
continue
if l[counter] == 'relion_refine':
counter=counter+1
continue
if l[counter] == 'relion_refine_mpi':
counter=counter+1
continue
if l[counter] == '--gpu':
if counter+1 < tot:
if l[counter+1][0] == '-':
counter=counter+1
else:
counter=counter+2
else:
counter=counter+1
continue
if l[counter] == '--j':
counter=counter+2
continue
if l[counter] == '--i':
if l[counter+1].split('/')[0] == 'Select':
selectflag=l[counter+1]
newcmd.append(l[counter])
counter=counter+1
return ' '.join(newcmd),selectflag
#==============================
def getSelectParticleDir(selectdir):
r1=open(selectdir,'r')
imagenumcol=3
for line in r1:
if len(line) < 40:
if len(line.split()) > 0:
if line.split()[0] == '_rlnImageName':
imagenumcol=int(line.split()[1].split('#')[-1])-1
r1.close()
r1=open(selectdir,'r')
skip=0
for line in r1:
if len(line) < 40:
continue
if len(line.split()) > 3:
if skip == 0:
micname=line.split()[imagenumcol]
skype=1
r1.close()
jobname=micname.split('@')[-1].split('/')[1]
return 'Extract/%s' %(jobname)
#==============================
def relion_refine_mpi(in_cmd,instancetype='',symlinks=False,spotprice=0):
assert type(instancetype) == str
print("Instance type is: ",instancetype)
#Set entry
otherPartDir=''
otherPartRclone=''
error=''
#Get relion command and input options
relioncmd,particledir,initmodel,outdir,autoref,numiters,partstarname,mask,stack,continueRun,outbasename,diameter=getCMDrefine(in_cmd)
#Make output directory
if not os.path.exists(outdir):
os.makedirs(outdir)
cmd='touch %s/note.txt' %(outdir)
subprocess.Popen(cmd,shell=True).wait()
if os.path.exists('%s/run.err' %(outdir)):
os.remove('%s/run.err' %(outdir))
cmd='touch %s/run.err' %(outdir)
subprocess.Popen(cmd,shell=True).wait()
if len(continueRun) > 0:
particledir=continueRun.split('/')
del particledir[-1]
particledir='/'.join(particledir)
partstarname='%s_data.star' %(continueRun.split('/')[-1][:-15])
for line in open(continueRun,'r'):
if len(line) > 4:
if line.split()[0] == '_rlnCurrentIteration':
iterationNumOpt=int(line.split()[1].strip())
writeToLog('Error: Number of iterations requested %i is less than / equal to current iteration of data (%i). Exiting' %(numiters,iterationNumOpt),'%s/run.err' %(outdir))
sys.exit()
if len(particledir) == 0:
particledir=partstarname
#Get number of particles to decide how big of a machine to spin up
if stack is False:
if len(particledir) == 0:
starfilename=particledir
numParticles=len(open(particledir,'r').readlines())
if len(particledir) > 0:
starfilename='%s/%s' %(particledir,partstarname)
numParticles=len(open('%s/%s' %(particledir,partstarname),'r').readlines())
magcheck=False
pixcheck=False
ctfcheck=False
partcolnum=-1
detectorcolnum=-1
magcolnum=-1
exampleline=''
for line in open(starfilename,'r'):
if len(line.split()) > 0:
if line.split()[0] == '_rlnMagnification':
magcheck=True
magcolnum=int(line.split()[1].split('#')[-1])
if line.split()[0] == '_rlnDetectorPixelSize':
pixcheck=True
detectorcolnum=int(line.split()[1].split('#')[-1])
if line.split()[0] == '_rlnDefocusU':
ctfcheck=True
if line.split()[0] == '_rlnImageName':
print("LINE IS",line)
if len(line.split()) >1:
partcolnum=int(line.split()[1])
elif len(line.split())>0:
partcolnum=1
exampleline=line
ctfin=False
apixin=False
rlncounter=1
while rlncounter <= len(relioncmd.split()):
if relioncmd.split()[rlncounter-1] == '--ctf':
ctfin=True
if relioncmd.split()[rlncounter-1] == '--angpix':
apixin=True
apixVal=float(relioncmd.split()[rlncounter])
rlncounter=rlncounter+1
if partcolnum < 0:
writeToLog('Error: could not find _rlnImageName in .star file. Exiting','%s/run.err' %(outdir))
sys.exit()
if apixin is False:
if magcheck is False:
writeToLog('Error: No magnification information found in .star file. Exiting', '%s/run.err' %(outdir))
sys.exit()
if apixin is False:
if pixcheck is False:
writeToLog('Error: No detector pixel size information found in .star file. Exiting', '%s/run.err' %(outdir))
sys.exit()
if ctfin is True:
if ctfcheck is False:
writeToLog('Error: no defocus information found in .star file. Exiting', '%s/run.err' %(outdir))
sys.exit()
#Get xdims
if len(exampleline) == 0:
writeToLog('Error: no inputline found','%s/run.err' %(outdir))
sys.exit()
examplePart=exampleline.split()[partcolnum-1]
if os.path.exists('%s/handler.txt' %(outdir)):
os.remove('%s/handler.txt' %(outdir))
cmd='relion_image_handler --i %s --stats > %s/handler.txt' %(examplePart,outdir)
subprocess.Popen(cmd,shell=True).wait()
partxdim=int(linecache.getline('%s/handler.txt' %(outdir),1).split('=')[1].split('x')[0].strip())
if apixin is False:
if len(exampleline.split()) < detectorcolnum-1:
writeToLog('Error: particle line is missing columns: %s' %(exampleline), '%s/run.err' %(outdir))
sys.exit()
example_detector=float(exampleline.split()[detectorcolnum-1])
example_mag=float(exampleline.split()[magcolnum-1])
apixVal=(example_detector/example_mag)*10000
if stack is True:
# Appion uses .hed files, not mrcs
#if partstarname.split('.')[-1] != 'mrcs':
# writeToLog('Error: input stack must have .mrcs extension. Exiting','%s/run.err' %(outdir))
# sys.exit()
if os.path.exists('%s/handler.txt' %(outdir)):
os.remove('%s/handler.txt' %(outdir))
cmd='relion_image_handler --i %s --stats > %s/handler.txt' %(partstarname,outdir)
subprocess.Popen(cmd,shell=True).wait()
numParticles=int(linecache.getline('%s/handler.txt' %(outdir),1).split('=')[1].split('x')[3].split(';')[0])
partxdim=int(linecache.getline('%s/handler.txt' %(outdir),1).split('=')[1].split('x')[0].strip())
print("numParticles is",numParticles)
print("partxdim calculated is",partxdim)
ctf=False
angpix=False
rlncounter=1
while rlncounter <= len(relioncmd.split()):
if relioncmd.split()[rlncounter-1] == '--ctf':
ctf=True
if relioncmd.split()[rlncounter-1] == '--angpix':
angpix=True
apixVal=float(relioncmd.split()[rlncounter])
rlncounter=rlncounter+1
if ctf is True:
writeToLog('Error: CTF correction was selected for a particle stack without a star file (which means that Relion cannot do CTF correction). Exiting','%s/run.err' %(outdir))
sys.exit()
if angpix is False:
writeToLog('Error: Pixel size required. Please include --angpix into "Additional arguments" and resubmit','%s/run.err' %(outdir))
sys.exit()
if initmodel != 'None':
if os.path.exists('handler2.txt'):
os.remove('handler2.txt')
time.sleep(2)
cmd='relion_image_handler --i %s --stats > handler2.txt' %(initmodel)
subprocess.Popen(cmd,shell=True).wait()
time.sleep(2)
modxdim=int(linecache.getline('handler2.txt',1).split('=')[1].split('x')[0].strip())
os.remove('handler2.txt')
if modxdim != partxdim:
writeToLog('Error: 3D model and particles do not have the same dimensions. Exiting','%s/run.err' %(outdir))
sys.exit()
#Check that diameter specified fits within box
if float(diameter) >= float(apixVal)*float(partxdim)-1:
writeToLog('Error: Diameter specified (%.0f Angstroms) is greater than box size (%.0f Angstroms). Exiting' %(diameter,apixVal*partxdim),'%s/run.err' %(outdir))
writeToLog('Error: Diameter specified (%.0f Angstroms) is greater than box size (%.0f Angstroms). Exiting' %(diameter,apixVal*partxdim),'%s/run.out' %(outdir))
sys.exit()
#Parse relion command to only include input options, removing any mention of 'gpu' or j threads in command
relioncmd,select=parseCMDrefine(relioncmd)
#Check where input particles are located
if stack is False:
otherPartDir,otherPartRclone,error=checkPartLocation(starfilename,particledir)
if len(error) > 0:
writeToLog(error,'%s/run.err' %(outdir))
sys.exit()
if len(otherPartRclone) > 0:
if os.path.exists('rclonetmplist1298.txt'):
os.remove('rclonetmplist1298.txt')
o89=open('rclonetmplist1298.txt','w')
for entry in otherPartRclone:
o89.write('%s\n' %(entry.strip()))
o89.close()
otherPartRclone='rclonetmplist1298.txt'
#Choose instance type
print("Selecting instance type...")
print("instancetype is",instancetype)
if instancetype == '':
print("No instance type specified. Selecting instance based on number of particles")
if initmodel == 'None': #2D classification
if numParticles < 20000:
instance='p2.xlarge'
if numParticles >= 20000 and numParticles <= 100000:
instance='p2.8xlarge'
if numParticles > 100000:
instance='p2.16xlarge'
if initmodel != 'None': #3D classification or refinement
if autoref == -1: #3D classification
if numParticles <25000:
instance='p2.xlarge'
if numParticles >=25000:
instance='p2.8xlarge'
if autoref != -1: #3D refinement
instance='p2.8xlarge'
#instance='p2.xlarge'
elif instancetype not in ['p2.xlarge','p2.8xlarge','p2.16xlarge','g3.8xlarge','g3.16xlarge','p3.2xlarge','p3.8xlarge','p3.16xlarge']:
writeToLog("Error, invalid instance type. Must be p2.xlarge, p2.8xlarge, p2.16xlarge, g3.8xlarge, g3.16xlarge, p3.2xlarge, p3.8xlarge, or p3.16xlarge.",'%s/run.out' %(outdir))
sys.exit()
else:
instance = instancetype
print("Using %s instance type."%instancetype)
#Get AWS region from aws_init.sh environment variable
awsregion=subprocess.Popen('echo $AWS_DEFAULT_REGION', shell=True, stdout=subprocess.PIPE).stdout.read().split()[0]
if len(awsregion) == 0:
writeToLog('Error: Could not find default region specified as $AWS_DEFAULT_REGION. Please set this environmental variable and try again.','%s/run.err' %(outdir))
sys.exit()
writeToLog('Booting up virtual machine %s on AWS in availability zone %sa' %(instance,awsregion), '%s/run.out' %(outdir))
#Get AWS ID
AWS_ID=subprocess.Popen('echo $AWS_ACCOUNT_ID',shell=True, stdout=subprocess.PIPE).stdout.read().strip()
key_ID=subprocess.Popen('echo $AWS_ACCESS_KEY_ID',shell=True, stdout=subprocess.PIPE).stdout.read().strip()
secret_ID=subprocess.Popen('echo $AWS_SECRET_ACCESS_KEY',shell=True, stdout=subprocess.PIPE).stdout.read().strip()
teamname=subprocess.Popen('echo $RESEARCH_GROUP_NAME',shell=True, stdout=subprocess.PIPE).stdout.read().strip()
keypair=subprocess.Popen('echo $KEYPAIR_PATH',shell=True, stdout=subprocess.PIPE).stdout.read().strip()
#Get AWS CLI directory location
awsdir=subprocess.Popen('echo $AWS_CLI_DIR', shell=True, stdout=subprocess.PIPE).stdout.read().split()[0]
if len(awsdir) == 0:
print 'Error: Could not find AWS scripts directory specified as $AWS_CLI_DIR. Please set this environmental variable and try again.'
sys.exit()
#.aws_relion will Have: [particledir] [s3 bucket name] [ebs volume]
ebs_exist=False
s3_exist=False
bucketname=''
if os.path.exists('.aws_relion'):
for line in open('.aws_relion','r'):
if line.split()[0] == particledir:
bucketname=line.split()[1]
ebsvolname=''
#Check if it exists:
if os.path.exists('%s/ebsout.log' %(outdir)):
os.remove('%s/ebsout.log' %(outdir))
cmd='aws ec2 describe-volumes | grep VolumeId > %s/ebsout.log' %(outdir)
subprocess.Popen(cmd,shell=True).wait()
for line in open('%s/ebsout.log' %(outdir),'r'):
if line.strip().split()[-1].split('"')[1] == ebsvolname:
ebs_exist=True
volID=ebsvolname
os.remove('%s/ebsout.log' %(outdir))
if os.path.exists('%s/s3out.log' %(outdir)):
os.remove('%s/s3out.log' %(outdir))
cmd='aws s3 ls %s > %s/s3out.log' %(bucketname.split('s3://')[-1],outdir)
subprocess.Popen(cmd,shell=True).wait()
if len(open('%s/s3out.log' %(outdir),'r').readlines()) > 0:
s3_exist=True
keyname=keypair.split('/')[-1].split('.pem')[0]
keyname=keyname.split('_')
keyname='-'.join(keyname)
outdirname=outdir.split('/')
if len(outdirname[-1]) == 0:
del outdirname[-1]
outdirname='-'.join(outdirname)
outdirname=outdirname.lower().strip()
keyname=keyname.lower().strip()
project=''
project=project.strip()
if s3_exist is False:
if ebs_exist is True:
ebs_exist=False
cmd='aws ec2 delete-volume --volume-id %s' %(ebsvolname)
subprocess.Popen(cmd,shell=True).wait()
if len(otherPartDir) == 0:
if symlinks:
inputfilesize=subprocess.Popen('du -L %s' %(particledir), shell=True, stdout=subprocess.PIPE).stdout.read().split()[-2]
else:
inputfilesize=subprocess.Popen('du %s' %(particledir), shell=True, stdout=subprocess.PIPE).stdout.read().split()[-2]
if len(otherPartDir) > 0:
if symlinks:
inputfilesize=subprocess.Popen('du -L %s' %(otherPartDir), shell=True, stdout=subprocess.PIPE).stdout.read().split()[-2]
else:
inputfilesize=subprocess.Popen('du %s' %(otherPartDir), shell=True, stdout=subprocess.PIPE).stdout.read().split()[-2]
sizeneeded='%.0f' %(math.ceil((float(inputfilesize)*4)/1000000))
actualsize='%.0f' %(math.ceil((float(inputfilesize)/1000000)))
#Upload data to S3
if s3_exist is False:
writeToLog('Started uploading %sGB to AWS on %s' %(actualsize,time.asctime(time.localtime(time.time()))),'%s/run.out' %(outdir))
if len(project) == 0:
bucketname='rln-aws-tmp-%s/%s/%0.f' %(teamname,keyname,time.time())
if len(project) > 0:
bucketname='rln-aws-%s-%s/%s/%s' %(teamname,keyname,project,outdirname)
if subprocess.Popen('uname',shell=True, stdout=subprocess.PIPE).stdout.read().strip() == 'Linux':
numCPUs=int(subprocess.Popen('grep -c ^processor /proc/cpuinfo',shell=True, stdout=subprocess.PIPE).stdout.read().strip())
if subprocess.Popen('uname',shell=True, stdout=subprocess.PIPE).stdout.read().strip() == 'Darwin':
numCPUs=int(subprocess.Popen('sysctl -n hw.ncpu',shell=True, stdout=subprocess.PIPE).stdout.read().strip())
if len(otherPartRclone) == 0:
bucketname=rclone_to_s3(particledir,numCPUs*2.4,awsregion,key_ID,secret_ID,bucketname,bucketname,awsdir,project,otherPartRclone,outdir)
if len(otherPartRclone) > 0:
bucketname=rclone_to_s3(otherPartDir,numCPUs*2.4,awsregion,key_ID,secret_ID,bucketname,bucketname,awsdir,project,otherPartRclone,outdir)
writeToLog('Finished at %s' %(time.asctime(time.localtime(time.time()))),'%s/run.out' %(outdir))
if ebs_exist is False:
writeToLog('Creating data storage drive ...','%s/run.out' %(outdir))
#Create EBS volume
if os.path.exists('%s/awsebs.log' %(outdir)) :
os.remove('%s/awsebs.log' %(outdir))
if spotprice >0:
most_stable_region = get_stable_instance_region(awsregion,instance,spotprice)
writeToLog('Most stable region is %s.'%(most_stable_region),'%s/awslog.log'%(outdir))
print("Most stable region is %s."%(most_stable_region))
cmd='%s/create_volume.py %i %s "rln-aws-tmp-%s-%s"'%(awsdir,int(sizeneeded),most_stable_region,teamname,particledir)+'> %s/awsebs.log' %(outdir)
else:
cmd='%s/create_volume.py %i %sa "rln-aws-tmp-%s-%s"'%(awsdir,int(sizeneeded),awsregion,teamname,particledir)+'> %s/awsebs.log' %(outdir)
print("Create volume with command %s"%(cmd))
subprocess.Popen(cmd,shell=True).wait()
#Get volID from logfile
volID=linecache.getline('%s/awsebs.log' %(outdir),5).split('ID: ')[-1].split()[0]
#Restore volume, returning with it volID for later steps
writeToLog('Launching virtual machine %s...' %(instance),'%s/run.out' %(outdir))
now=datetime.datetime.now()
startday=now.day
starthr=now.hour
startmin=now.minute
#Create directories on AWS
if instance == 'p2.xlarge':
gpu='--gpu '
j='--j 2 '
mpi=2
numfiles=8
cost=0.9
if instance == 'p2.8xlarge':
gpu='--gpu '
j='--j 3 '
mpi=9
numfiles=50
cost=7.20
if instance == 'p2.16xlarge':
gpu='--gpu '
j='--j 3 '
mpi=17
numfiles=90
cost=14.40
if instance == 'g3.4xlarge':
gpu='--gpu '
j='--j 2 '
mpi=2
numfiles=8
cost=1.14
if instance == 'g3.8xlarge':
gpu='--gpu '
j='--j 2 '
mpi=3
numfiles=50
cost=2.28
if instance == 'g3.16xlarge':
gpu='--gpu '
j='--j 3 '
mpi=5
numfiles=90
cost=4.56
if instance == 'p3.2xlarge':
gpu='--gpu '
j='--j 2 '
mpi=2
numfiles=90
cost=3.06
if instance == 'p3.8xlarge':
gpu='--gpu '
j='--j 3 '
mpi=5
numfiles=90
cost=12.24
if instance == 'p3.16xlarge':
gpu='--gpu '
j='--j 3 '
mpi=9
numfiles=90
cost=24.48
#Launch instance
if os.path.exists('%s/awslog.log' %(outdir)):
os.remove('%s/awslog.log' %(outdir))
dirlocation = subprocess.Popen('echo $AWS_DATA_DIRECTORY', shell=True, stdout=subprocess.PIPE).stdout.read().split()[0]
if spotprice >0:
cmd='%s/launch_AWS_instance.py --spotPrice=%s --instance=%s --availZone=%s --volume=%s --dirname=%s --tag=%s -d | tee %s/awslog.log' %(awsdir,str(spotprice),instance,most_stable_region,volID,dirlocation,outdir,outdir)
else:
cmd='%s/launch_AWS_instance.py --instance=%s --availZone=%sa --volume=%s --dirname=%s --tag=%s -d | tee %s/awslog.log' %(awsdir,instance,awsregion,volID,dirlocation,outdir,outdir)
print("Launching AWS instance with command ",cmd)
proc = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE)
LaunchOut,LaunchErr = proc.communicate()
#Get instance ID, keypair, and username:IP
instanceID=subprocess.Popen('cat %s/awslog.log | grep ID' %(outdir), shell=True, stdout=subprocess.PIPE).stdout.read().split('ID:')[-1].strip()
print("instanceID is",instanceID)
print("KEYPAIR IS",subprocess.Popen('cat %s/awslog.log | grep ssh' %(outdir), shell=True, stdout=subprocess.PIPE).stdout.read())
keypair=subprocess.Popen('cat %s/awslog.log | grep ssh' %(outdir), shell=True, stdout=subprocess.PIPE).stdout.read().split()[3].strip()
userIP=subprocess.Popen('cat %s/awslog.log | grep ssh' %(outdir), shell=True, stdout=subprocess.PIPE).stdout.read().split('@')[-1].strip()
print("instance is",instance)
print("gpu is",gpu)
print("j is ",j)
print("mpi is",mpi)
print("numfiles is",numfiles)
print("cost is",cost)
env.host_string='ubuntu@%s' %(userIP)
env.key_filename = '%s' %(keypair)
if ebs_exist is False:
writeToLog('Started transferring %sGB at %s' %(actualsize,time.asctime(time.localtime(time.time()))),'%s/run.out' %(outdir))
dirlocation= subprocess.Popen('echo $AWS_DATA_DIRECTORY', shell=True, stdout=subprocess.PIPE).stdout.read().split()[0]
#if stack is False:
# for entry in particledir.split('/'):
# if len(entry.split('.star')) == 1:
# exec_remote_cmd('mkdir /%s/%s' %(dirlocation,entry))
# dirlocation=dirlocation+'/'+entry
exec_remote_cmd('mkdir -p %s '%(particledir))
if len(otherPartDir) == 0:
if stack is False:
#s3_to_ebs(userIP,keypair,bucketname,'/data/%s/' %(particledir),'%s/rclone' %(awsdir),key_ID,secret_ID,awsregion,numfiles)
s3_to_ebs(userIP,keypair,bucketname,'/%s/' %(particledir),'%s/rclone' %(awsdir),key_ID,secret_ID,awsregion,numfiles)
if stack is True:
#s3_to_ebs(userIP,keypair,bucketname,'/data/%s' %(particledir),'%s/rclone' %(awsdir),key_ID,secret_ID,awsregion,numfiles)
s3_to_ebs(userIP,keypair,bucketname,'/%s' %(particledir),'%s/rclone' %(awsdir),key_ID,secret_ID,awsregion,numfiles)
if len(otherPartDir) > 0:
#s3_to_ebs(userIP,keypair,bucketname,'/data/%s/' %(otherPartDir),'%s/rclone' %(awsdir),key_ID,secret_ID,awsregion,numfiles)
s3_to_ebs(userIP,keypair,bucketname,'/%s' %(otherPartDir),'%s/rclone' %(awsdir),key_ID,secret_ID,awsregion,numfiles)
writeToLog('Finished transfer at %s' %(time.asctime( time.localtime(time.time()) )),'%s/run.out' %(outdir))
#Make output directories
dirlocation = subprocess.Popen('echo $AWS_DATA_DIRECTORY', shell=True, stdout=subprocess.PIPE).stdout.read().split()[0]
outdirlist=outdir.split('/')
#exec_remote_cmd('mkdir %s'%particledir)
#exec_remote_cmd('echo'+particledir+' > /home/ubuntu/check.log')
del outdirlist[-1]
#for entry in outdirlist:
# exec_remote_cmd('mkdir /%s/%s' %(dirlocation,entry))
# dirlocation=dirlocation+'/'+entry
dirlocation = outdir
cmd='rsync -avuL --rsync-path="rsync" --log-file="%s/rsync.log" -e "ssh -q -o StrictHostKeyChecking=no -i %s" %s/ ubuntu@%s:%s > %s/rsync.log' %(outdir,keypair,outdir,userIP,outdir,outdir)
writeToLog(cmd,"%s/rsync.log" %(outdir))
subprocess.Popen(cmd,shell=True).wait()
if len(otherPartDir) > 0:
dirlocation= subprocess.Popen('echo $AWS_DATA_DIRECTORY', shell=True, stdout=subprocess.PIPE).stdout.read().split()[0]
partdirlist=particledir.split('/')
del partdirlist[-1]
#for entry in partdirlist:
# exec_remote_cmd('mkdir /%s/%s' %(dirlocation,entry))
# dirlocation=dirlocation+'/'+entry
exec_remote_cmd('mkdir -p %s' %(particledir))
writeToLog('mkdir -p %s' %(particledir),'%s/run.out' %(outdir))
# Sync particle directory with instance
cmd='rsync --rsync-path="rsync" --log-file="%s/rsync.log" -avuL -e "ssh -q -o StrictHostKeyChecking=no -i %s" %s/ ubuntu@%s:%s > %s/rsync.log' %(outdir,keypair,particledir,userIP,dirlocation,outdir)
writeToLog(cmd,"%s/rsync.log" %(outdir))
subprocess.Popen(cmd,shell=True).wait()
# if initmodel != 'None':
# #cmd='rsync --rsync-path="rsync" --log-file="%s/rsync.log" -avu -R -e "ssh -q -o StrictHostKeyChecking=no -i %s" %s ubuntu@%s:/data/ > %s/rsync.log' %(outdir,keypair,initmodel,userIP,outdir)
# cmd='rsync --rsync-path="rsync" --log-file="%s/rsync.log" -avu -R -e "ssh -q -o StrictHostKeyChecking=no -i %s" %s ubuntu@%s:/ > %s/rsync.log' %(outdir,keypair,initmodel,userIP,outdir)
# subprocess.Popen(cmd,shell=True).wait()
# if len(mask) > 0:
# #cmd='rsync --rsync-path="rsync" --log-file="%s/rsync.log" -avu -R -e "ssh -q -o StrictHostKeyChecking=no -i %s" %s ubuntu@%s:/data/ > %s/rsync.log' %(outdir,keypair,mask,userIP,outdir)
# cmd='rsync --rsync-path="rsync" --log-file="%s/rsync.log" -avu -R -e "ssh -q -o StrictHostKeyChecking=no -i %s" %s ubuntu@%s:/ > %s/rsync.log' %(outdir,keypair,mask,userIP,outdir)
# subprocess.Popen(cmd,shell=True).wait()
relion_remote_cmd='mpirun -np %i /home/EM_Packages/relion2.0/build/bin/relion_refine_mpi %s %s %s' %(mpi,relioncmd,j,gpu)
o2=open('run_aws.job','w')
o2.write('#!/bin/bash\n')
#o2.write('cd /data\n')
o2.write('cd %s \n' %(outdir))
o2.write('%s\n' %(relion_remote_cmd))
o2.close()
st = os.stat('run_aws.job')
os.chmod('run_aws.job', st.st_mode | stat.S_IEXEC)
# Sync run_aws.job to instance
cmd='rsync --rsync-path="rsync" --log-file="%s/rsync.log" -avu -e "ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i %s" run_aws.job ubuntu@%s:~/ > %s/rsync.log' %(outdir,keypair,userIP,outdir)
subprocess.Popen(cmd,shell=True).wait()
# configure LD_LIBRARY_PATH
cmd='ssh -q -n -f -i %s ubuntu@%s "export LD_LIBRARY_PATH=/home/EM_Packages/relion2.0/build/lib:$LD_LIBRARY_PATH && nohup ./run_aws.job > /%s/run.out 2> /%s/run.err < /dev/null &"' %(keypair,userIP,outdir,outdir)
subprocess.Popen(cmd,shell=True)
writeToLog('Job submitted to the cloud...','%s/run.out' %(outdir))
cmd='scp -q -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i %s %s/run.out ubuntu@%s:/%s > %s/rsync.log' %(keypair,outdir,userIP,outdir,outdir)
subprocess.Popen(cmd,shell=True)
isdone=0
# Sync instance with local storage while job is still running
while isdone == 0:
cmd='mkdir -p /%s; rsync --rsync-path="rsync" --log-file="%s/rsync.log" -avuL -e "ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i %s" ubuntu@%s:/%s/ %s > %s/rsync.log' %(outbasename,outdir,keypair,userIP,outdir,outdir,outdir)
writeToLog(cmd,"%s/rsync.log" %(outdir))
subprocess.Popen(cmd,shell=True).wait()
time.sleep(2)
if autoref == -1:
if os.path.exists('%s_it%03i_data.star' %(outbasename,int(numiters))):
isdone=1
if autoref != -1:
if os.path.exists('%s_class001.mrc' %(outbasename)):
isdone=1
#Check if job was specified to be killed
if isdone ==0:
isdone=check_and_kill_job('%s/note.txt' %(outdir),userIP,keypair)
#Check if there are any errors
if isdone == 0:
if os.path.exists('%s/run.err' %(outdir)):
if float(subprocess.Popen('cat %s/run.err | wc -l' %(outdir),shell=True, stdout=subprocess.PIPE).stdout.read().strip()) > 0:
writeToLog('\nError detected in run.err. Shutting down instance.','%s/run.out' %(outdir))
isdone=1
time.sleep(10)
time.sleep(30)
writeToLog('Job finished!','%s/run.out' %(outdir))
writeToLog('Shutting everything down ...','%s/run.out' %(outdir))
cmd=subprocess.Popen('aws ec2 terminate-instances --instance-ids %s > %s/tmp4949585940.txt' %(instanceID,outdir),shell=True, stdout=subprocess.PIPE).stdout.read().strip()
isdone=0
time.sleep(10)
while isdone == 0:
status=subprocess.Popen('aws ec2 describe-instances --instance-ids %s --query "Reservations[*].Instances[*].{State:State}" | grep Name'%(instanceID),shell=True, stdout=subprocess.PIPE).stdout.read().strip()
if 'terminated' in status:
isdone=1
time.sleep(10)
volID=subprocess.Popen('aws ec2 delete-volume --volume-id %s' %(volID),shell=True, stdout=subprocess.PIPE).stdout.read().strip()
now=datetime.datetime.now()
finday=now.day
finhr=now.hour
finmin=now.minute
if finday != startday:
finhr=finhr+24
deltaHr=finhr-starthr
if finmin > startmin:
deltaHr=deltaHr+1
if not os.path.exists('aws_relion_costs.txt'):
cmd="echo 'Input Output Cost ($)' >> aws_relion_costs.txt"
subprocess.Popen(cmd,shell=True).wait()
cmd="echo '-----------------------------------------------------------' >> aws_relion_costs.txt"
subprocess.Popen(cmd,shell=True).wait()
cmd='echo "%s %s %.02f " >> aws_relion_costs.txt' %(particledir,outdir,float(deltaHr)*float(cost))
subprocess.Popen(cmd,shell=True).wait()
#Update .aws_relion
if os.path.exists('.aws_relion_tmp'):
os.remove('.aws_relion_tmp')
if os.path.exists('.aws_relion'):
shutil.move('.aws_relion','.aws_relion_tmp')
tmpout=open('.aws_relion','w')
for line in open('.aws_relion_tmp','r'):
if line.split()[0] == particledir:
continue
tmpout.write(line)
tmpout.close()
os.remove('.aws_relion_tmp')
cmd='echo "%s %s %s" >> .aws_relion' %(particledir,bucketname,'')
subprocess.Popen(cmd,shell=True).wait()
#Cleanup
#if os.path.exists('%s/awslog.log' %(outdir)):
# os.remove('%s/awslog.log' %(outdir))
#if os.path.exists('%s/awsebs.log' %(outdir)):
# os.remove('%s/awsebs.log' %(outdir))
#if os.path.exists('run_aws.job'):
# os.remove('run_aws.job')
if os.path.exists('rclonetmplist1298.txt'):
os.remove('rclonetmplist1298.txt')
if os.path.exists('%s/.rclone.conf' %(outdir)):
os.remove('%s/.rclone.conf' %(outdir))
if os.path.exists('runningProcs.txt'):
os.remove('runningProcs.txt')
#==============================
def check_and_kill_job(note,IP,keypair):
o9=open(note,'r')
kill=0
for line in o9:
if len(line.split()) > 0:
if line.split()[0] == 'Kill':
kill=1
if line.split()[0] == 'kill':
kill=1
if kill == 1:
kill_job(keypair,IP)
o9.close()
return kill
#====================
def kill_job(keypair,IP):
env.host_string='ubuntu@%s' %(IP)
env.key_filename = '%s' %(keypair)
exec_remote_cmd('ps aux | grep mpi > runningProcs.txt')
cmd='scp -q -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i %s ubuntu@%s:~/runningProcs.txt .' %(keypair,IP)
subprocess.Popen(cmd,shell=True).wait()
pidlist=[]
for proc in open('runningProcs.txt','r'):
if 'refine_mpi' in proc:
pidlist.append(proc.split()[1])
for pid in pidlist:
exec_remote_cmd('kill -9 %s' %(pid))
#====================
def get_stable_instance_region(region,instance,spotprice):
cmd = "get_spot_duration.py --region=%s --product-description='Linux/UNIX' --bids=%s:%s "%(region,instance,spotprice)
proc=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE)
out,err = proc.communicate()
most_stable_value = out.split("\n")[1].split("\t")[0]
most_stable_region = out.split("\n")[1].split("\t")[2]
for line in out.split("\n")[1:]:
if line is not '':
if float(line.split("\t")[0]) > float(most_stable_value):
most_stable_value = line.split("\t")[0]
most_stable_region = line.split("\t")[2]
return most_stable_region
#==============================
if __name__ == "__main__":
#Read relion command from appion
in_cmd=sys.argv[1]
#checkConflicts()
relion_refine_mpi(in_cmd)
|
{
"content_hash": "2b8f456e3c3d9b7239fc53da4ad4acaf",
"timestamp": "",
"source": "github",
"line_count": 1014,
"max_line_length": 249,
"avg_line_length": 39.87179487179487,
"alnum_prop": 0.6259213455354935,
"repo_name": "leschzinerlab/AWS",
"id": "e3c28899a8eb2707b82a596b2662250b5c842c0d",
"size": "40452",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "relion/apAWS.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "336104"
},
{
"name": "Shell",
"bytes": "1734"
}
],
"symlink_target": ""
}
|
"""
test_parse_tg
~~~~~~~~~~~~~
Test glass transition parser.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import unittest
from lxml import etree
from chemdataextractor.doc.text import Sentence
from chemdataextractor.parse.tg import tg_phrase
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
class TestParseTg(unittest.TestCase):
maxDiff = None
def do_parse(self, input, expected):
s = Sentence(input)
log.debug(s)
log.debug(s.tagged_tokens)
result = next(tg_phrase.scan(s.tagged_tokens))[0]
log.debug(etree.tostring(result, pretty_print=True, encoding='unicode'))
self.assertEqual(expected, etree.tostring(result, encoding='unicode'))
# Test: "glass transition temperature of"
def test_tg1(self):
s = 'The poly(azide) shows a glass transition temperature of 282.6 °C.'
expected = '<tg_phrase><tg><value>282.6</value><units>°C</units></tg></tg_phrase>'
self.do_parse(s, expected)
# Test: "glass transition temp. of with temperature withing '()'"
def test_tg2(self):
s = 'Differential scanning calorimetry revealed a glass transition temp. of (-19) ° for the homopolymer and 20° for the copolymer.'
expected = '<tg_phrase><tg><value>-19</value><units>°</units></tg></tg_phrase>'
self.do_parse(s, expected)
# Test: "Tg of "
def test_tg3(self):
s = 'Polymandelide is a glassy amorphous polymer with a Tg of 100 °C, with rheol.'
expected = '<tg_phrase><tg><value>100</value><units>°C</units></tg></tg_phrase>'
self.do_parse(s, expected)
# Test: "Tg of ca. (or Tg of about)"
def test_tg4(self):
s = 'It has been found that PSHQ4 has a Tg of ca. 130°'
expected = '<tg_phrase><tg><value>130</value><units>°</units></tg></tg_phrase>'
self.do_parse(s, expected)
# Test: (Tg) of
def test_tg5(self):
s = 'The resulting poly(AdS) had predicted mol. wts., narrow mol. wt. distributions, and high glass transition temp. (Tg) around 232 °C.'
expected = '<tg_phrase><tg><NN>Tg</NN><value>232</value><units>\xb0C</units></tg></tg_phrase>'
self.do_parse(s, expected)
# Test ommitting "transition"
def test_tg6(self):
s = 'One phase had a glass temp. of ∼-30°, corresponding to the amorphous ethylene segments.'
expected = '<tg_phrase><tg><value>∼-30</value><units>°</units></tg></tg_phrase>'
self.do_parse(s, expected)
# Test Tg: (or Tg >)
def test_tg7(self):
s = 'The four-armed compd. (ANTH-OXA6t-OC12) with the dodecyloxy surface group is a high glass transition temp. (Tg: 211°) material and exhibits good soly.'
#s = 'The four-armed compd. (ANTH-OXA6t-OC12) with the dodecyloxy surface group is a high glass transition temp. (Tg > 211°) material and exhibits good soly.'
expected = '<tg_phrase><tg><value>211</value><units>°</units></tg></tg_phrase>'
#expected = '<tg_phrase><tg><value>>211</value><units>°</units></tg></tg_phrase>'
self.do_parse(s, expected)
def test_tg8(self):
s= 'DSC experiments revealed that PGFDTDPP has a high glass-transition temperature at 150 °C compared with 90 °C for PGFDTDPP.'
expected = '<tg_phrase><tg><value>150</value><units>\xb0C</units></tg></tg_phrase>'
self.do_parse(s, expected)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "8316e034e532906b43d9b8ddb70df0e4",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 166,
"avg_line_length": 39.95505617977528,
"alnum_prop": 0.640607424071991,
"repo_name": "mcs07/ChemDataExtractor",
"id": "433620fe74edde0f4c9817efb732bedea9e85c42",
"size": "3602",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_parse_tg.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "540374"
},
{
"name": "Python",
"bytes": "833941"
},
{
"name": "Shell",
"bytes": "6106"
}
],
"symlink_target": ""
}
|
import sys
from SentenceExampleWriter import SentenceExampleWriter
import Utils.InteractionXML.IDUtils as IDUtils
try:
import xml.etree.cElementTree as ET
except ImportError:
import cElementTree as ET
import Utils.Range as Range
class PhraseTriggerExampleWriter(SentenceExampleWriter):
def __init__(self):
self.xType = "phrase"
SentenceExampleWriter.__init__(self)
def writeXMLSentence(self, examples, predictionsByExample, sentenceObject, classSet, classIds, goldSentence=None, exampleStyle=None, structureAnalyzer=None):
self.assertSameSentence(examples)
sentenceElement = sentenceObject.sentence
sentenceId = sentenceElement.get("id")
sentenceText = sentenceElement.get("text")
# detach analyses-element
sentenceAnalysesElement = None
sentenceAnalysesElement = sentenceElement.find("sentenceanalyses")
if sentenceAnalysesElement == None:
sentenceAnalysesElement = sentenceElement.find("analyses")
if sentenceAnalysesElement != None:
sentenceElement.remove(sentenceAnalysesElement)
# remove pairs and interactions
interactions = self.removeChildren(sentenceElement, ["pair", "interaction"])
# remove entities
newEntityIdCount = IDUtils.getNextFreeId(sentenceElement.findall("entity"))
nonNameEntities = self.removeNonNameEntities(sentenceElement)
# add new pairs
for example in examples:
prediction = predictionsByExample[example[0]]
entityElement = ET.Element("entity")
#entityElement.attrib["given"] = "False"
headToken = example[3]["t"]
for token in sentenceObject.tokens:
if token.get("id") == headToken:
headToken = token
break
entityElement.set("charOffset", example[3]["charOffset"])
entityElement.set("headOffset", headToken.get("charOffset"))
entityElement.set("phraseType", example[3]["ptype"])
entOffset = Range.charOffsetToSingleTuple(example[3]["charOffset"])
entityElement.set("text", sentenceText[entOffset[0]:entOffset[1]])
entityElement.set("id", sentenceId + ".e" + str(newEntityIdCount))
self.setElementType(entityElement, prediction, classSet, classIds)
newEntityIdCount += 1
sentenceElement.append(entityElement)
# re-attach the analyses-element
if sentenceAnalysesElement != None:
sentenceElement.append(sentenceAnalysesElement)
|
{
"content_hash": "b575964e81fce026c9545f3401e2d136",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 169,
"avg_line_length": 46.892857142857146,
"alnum_prop": 0.6675552170601675,
"repo_name": "ashishbaghudana/mthesis-ashish",
"id": "e3228b4c951e4659888e41eb7d1b248e5324290a",
"size": "2626",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "resources/tees/ExampleWriters/PhraseTriggerExampleWriter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "29153223"
},
{
"name": "HTML",
"bytes": "4208378"
},
{
"name": "Java",
"bytes": "64057"
},
{
"name": "Perl",
"bytes": "3148"
},
{
"name": "Python",
"bytes": "1699785"
},
{
"name": "Shell",
"bytes": "1733"
}
],
"symlink_target": ""
}
|
import _TetGen
# This file is compatible with both classic and new-style classes.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "this"):
if isinstance(value, class_type):
self.__dict__[name] = value.this
if hasattr(value,"thisown"): self.__dict__["thisown"] = value.thisown
del value.thisown
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name) or (name == "thisown"):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError,name
import types
try:
_object = types.ObjectType
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
del types
class tetgenio(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, tetgenio, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, tetgenio, name)
def __repr__(self):
return "<%s.%s; proxy of C++ tetgenio instance at %s>" % (self.__class__.__module__, self.__class__.__name__, self.this,)
FILENAMESIZE = _TetGen.tetgenio_FILENAMESIZE
INPUTLINESIZE = _TetGen.tetgenio_INPUTLINESIZE
__swig_getmethods__["init"] = lambda x: _TetGen.tetgenio_init
if _newclass:init = staticmethod(_TetGen.tetgenio_init)
__swig_setmethods__["firstnumber"] = _TetGen.tetgenio_firstnumber_set
__swig_getmethods__["firstnumber"] = _TetGen.tetgenio_firstnumber_get
if _newclass:firstnumber = property(_TetGen.tetgenio_firstnumber_get, _TetGen.tetgenio_firstnumber_set)
__swig_setmethods__["mesh_dim"] = _TetGen.tetgenio_mesh_dim_set
__swig_getmethods__["mesh_dim"] = _TetGen.tetgenio_mesh_dim_get
if _newclass:mesh_dim = property(_TetGen.tetgenio_mesh_dim_get, _TetGen.tetgenio_mesh_dim_set)
__swig_setmethods__["pointlist"] = _TetGen.tetgenio_pointlist_set
__swig_getmethods__["pointlist"] = _TetGen.tetgenio_pointlist_get
if _newclass:pointlist = property(_TetGen.tetgenio_pointlist_get, _TetGen.tetgenio_pointlist_set)
__swig_setmethods__["pointattributelist"] = _TetGen.tetgenio_pointattributelist_set
__swig_getmethods__["pointattributelist"] = _TetGen.tetgenio_pointattributelist_get
if _newclass:pointattributelist = property(_TetGen.tetgenio_pointattributelist_get, _TetGen.tetgenio_pointattributelist_set)
__swig_setmethods__["addpointlist"] = _TetGen.tetgenio_addpointlist_set
__swig_getmethods__["addpointlist"] = _TetGen.tetgenio_addpointlist_get
if _newclass:addpointlist = property(_TetGen.tetgenio_addpointlist_get, _TetGen.tetgenio_addpointlist_set)
__swig_setmethods__["addpointattributelist"] = _TetGen.tetgenio_addpointattributelist_set
__swig_getmethods__["addpointattributelist"] = _TetGen.tetgenio_addpointattributelist_get
if _newclass:addpointattributelist = property(_TetGen.tetgenio_addpointattributelist_get, _TetGen.tetgenio_addpointattributelist_set)
__swig_setmethods__["pointmarkerlist"] = _TetGen.tetgenio_pointmarkerlist_set
__swig_getmethods__["pointmarkerlist"] = _TetGen.tetgenio_pointmarkerlist_get
if _newclass:pointmarkerlist = property(_TetGen.tetgenio_pointmarkerlist_get, _TetGen.tetgenio_pointmarkerlist_set)
__swig_setmethods__["numberofpoints"] = _TetGen.tetgenio_numberofpoints_set
__swig_getmethods__["numberofpoints"] = _TetGen.tetgenio_numberofpoints_get
if _newclass:numberofpoints = property(_TetGen.tetgenio_numberofpoints_get, _TetGen.tetgenio_numberofpoints_set)
__swig_setmethods__["numberofpointattributes"] = _TetGen.tetgenio_numberofpointattributes_set
__swig_getmethods__["numberofpointattributes"] = _TetGen.tetgenio_numberofpointattributes_get
if _newclass:numberofpointattributes = property(_TetGen.tetgenio_numberofpointattributes_get, _TetGen.tetgenio_numberofpointattributes_set)
__swig_setmethods__["numberofaddpoints"] = _TetGen.tetgenio_numberofaddpoints_set
__swig_getmethods__["numberofaddpoints"] = _TetGen.tetgenio_numberofaddpoints_get
if _newclass:numberofaddpoints = property(_TetGen.tetgenio_numberofaddpoints_get, _TetGen.tetgenio_numberofaddpoints_set)
__swig_setmethods__["tetrahedronlist"] = _TetGen.tetgenio_tetrahedronlist_set
__swig_getmethods__["tetrahedronlist"] = _TetGen.tetgenio_tetrahedronlist_get
if _newclass:tetrahedronlist = property(_TetGen.tetgenio_tetrahedronlist_get, _TetGen.tetgenio_tetrahedronlist_set)
__swig_setmethods__["tetrahedronattributelist"] = _TetGen.tetgenio_tetrahedronattributelist_set
__swig_getmethods__["tetrahedronattributelist"] = _TetGen.tetgenio_tetrahedronattributelist_get
if _newclass:tetrahedronattributelist = property(_TetGen.tetgenio_tetrahedronattributelist_get, _TetGen.tetgenio_tetrahedronattributelist_set)
__swig_setmethods__["tetrahedronvolumelist"] = _TetGen.tetgenio_tetrahedronvolumelist_set
__swig_getmethods__["tetrahedronvolumelist"] = _TetGen.tetgenio_tetrahedronvolumelist_get
if _newclass:tetrahedronvolumelist = property(_TetGen.tetgenio_tetrahedronvolumelist_get, _TetGen.tetgenio_tetrahedronvolumelist_set)
__swig_setmethods__["neighborlist"] = _TetGen.tetgenio_neighborlist_set
__swig_getmethods__["neighborlist"] = _TetGen.tetgenio_neighborlist_get
if _newclass:neighborlist = property(_TetGen.tetgenio_neighborlist_get, _TetGen.tetgenio_neighborlist_set)
__swig_setmethods__["numberoftetrahedra"] = _TetGen.tetgenio_numberoftetrahedra_set
__swig_getmethods__["numberoftetrahedra"] = _TetGen.tetgenio_numberoftetrahedra_get
if _newclass:numberoftetrahedra = property(_TetGen.tetgenio_numberoftetrahedra_get, _TetGen.tetgenio_numberoftetrahedra_set)
__swig_setmethods__["numberofcorners"] = _TetGen.tetgenio_numberofcorners_set
__swig_getmethods__["numberofcorners"] = _TetGen.tetgenio_numberofcorners_get
if _newclass:numberofcorners = property(_TetGen.tetgenio_numberofcorners_get, _TetGen.tetgenio_numberofcorners_set)
__swig_setmethods__["numberoftetrahedronattributes"] = _TetGen.tetgenio_numberoftetrahedronattributes_set
__swig_getmethods__["numberoftetrahedronattributes"] = _TetGen.tetgenio_numberoftetrahedronattributes_get
if _newclass:numberoftetrahedronattributes = property(_TetGen.tetgenio_numberoftetrahedronattributes_get, _TetGen.tetgenio_numberoftetrahedronattributes_set)
__swig_setmethods__["facetlist"] = _TetGen.tetgenio_facetlist_set
__swig_getmethods__["facetlist"] = _TetGen.tetgenio_facetlist_get
if _newclass:facetlist = property(_TetGen.tetgenio_facetlist_get, _TetGen.tetgenio_facetlist_set)
__swig_setmethods__["facetmarkerlist"] = _TetGen.tetgenio_facetmarkerlist_set
__swig_getmethods__["facetmarkerlist"] = _TetGen.tetgenio_facetmarkerlist_get
if _newclass:facetmarkerlist = property(_TetGen.tetgenio_facetmarkerlist_get, _TetGen.tetgenio_facetmarkerlist_set)
__swig_setmethods__["numberoffacets"] = _TetGen.tetgenio_numberoffacets_set
__swig_getmethods__["numberoffacets"] = _TetGen.tetgenio_numberoffacets_get
if _newclass:numberoffacets = property(_TetGen.tetgenio_numberoffacets_get, _TetGen.tetgenio_numberoffacets_set)
__swig_setmethods__["holelist"] = _TetGen.tetgenio_holelist_set
__swig_getmethods__["holelist"] = _TetGen.tetgenio_holelist_get
if _newclass:holelist = property(_TetGen.tetgenio_holelist_get, _TetGen.tetgenio_holelist_set)
__swig_setmethods__["numberofholes"] = _TetGen.tetgenio_numberofholes_set
__swig_getmethods__["numberofholes"] = _TetGen.tetgenio_numberofholes_get
if _newclass:numberofholes = property(_TetGen.tetgenio_numberofholes_get, _TetGen.tetgenio_numberofholes_set)
__swig_setmethods__["regionlist"] = _TetGen.tetgenio_regionlist_set
__swig_getmethods__["regionlist"] = _TetGen.tetgenio_regionlist_get
if _newclass:regionlist = property(_TetGen.tetgenio_regionlist_get, _TetGen.tetgenio_regionlist_set)
__swig_setmethods__["numberofregions"] = _TetGen.tetgenio_numberofregions_set
__swig_getmethods__["numberofregions"] = _TetGen.tetgenio_numberofregions_get
if _newclass:numberofregions = property(_TetGen.tetgenio_numberofregions_get, _TetGen.tetgenio_numberofregions_set)
__swig_setmethods__["facetconstraintlist"] = _TetGen.tetgenio_facetconstraintlist_set
__swig_getmethods__["facetconstraintlist"] = _TetGen.tetgenio_facetconstraintlist_get
if _newclass:facetconstraintlist = property(_TetGen.tetgenio_facetconstraintlist_get, _TetGen.tetgenio_facetconstraintlist_set)
__swig_setmethods__["numberoffacetconstraints"] = _TetGen.tetgenio_numberoffacetconstraints_set
__swig_getmethods__["numberoffacetconstraints"] = _TetGen.tetgenio_numberoffacetconstraints_get
if _newclass:numberoffacetconstraints = property(_TetGen.tetgenio_numberoffacetconstraints_get, _TetGen.tetgenio_numberoffacetconstraints_set)
__swig_setmethods__["segmentconstraintlist"] = _TetGen.tetgenio_segmentconstraintlist_set
__swig_getmethods__["segmentconstraintlist"] = _TetGen.tetgenio_segmentconstraintlist_get
if _newclass:segmentconstraintlist = property(_TetGen.tetgenio_segmentconstraintlist_get, _TetGen.tetgenio_segmentconstraintlist_set)
__swig_setmethods__["numberofsegmentconstraints"] = _TetGen.tetgenio_numberofsegmentconstraints_set
__swig_getmethods__["numberofsegmentconstraints"] = _TetGen.tetgenio_numberofsegmentconstraints_get
if _newclass:numberofsegmentconstraints = property(_TetGen.tetgenio_numberofsegmentconstraints_get, _TetGen.tetgenio_numberofsegmentconstraints_set)
__swig_setmethods__["nodeconstraintlist"] = _TetGen.tetgenio_nodeconstraintlist_set
__swig_getmethods__["nodeconstraintlist"] = _TetGen.tetgenio_nodeconstraintlist_get
if _newclass:nodeconstraintlist = property(_TetGen.tetgenio_nodeconstraintlist_get, _TetGen.tetgenio_nodeconstraintlist_set)
__swig_setmethods__["numberofnodeconstraints"] = _TetGen.tetgenio_numberofnodeconstraints_set
__swig_getmethods__["numberofnodeconstraints"] = _TetGen.tetgenio_numberofnodeconstraints_get
if _newclass:numberofnodeconstraints = property(_TetGen.tetgenio_numberofnodeconstraints_get, _TetGen.tetgenio_numberofnodeconstraints_set)
__swig_setmethods__["pbcgrouplist"] = _TetGen.tetgenio_pbcgrouplist_set
__swig_getmethods__["pbcgrouplist"] = _TetGen.tetgenio_pbcgrouplist_get
if _newclass:pbcgrouplist = property(_TetGen.tetgenio_pbcgrouplist_get, _TetGen.tetgenio_pbcgrouplist_set)
__swig_setmethods__["numberofpbcgroups"] = _TetGen.tetgenio_numberofpbcgroups_set
__swig_getmethods__["numberofpbcgroups"] = _TetGen.tetgenio_numberofpbcgroups_get
if _newclass:numberofpbcgroups = property(_TetGen.tetgenio_numberofpbcgroups_get, _TetGen.tetgenio_numberofpbcgroups_set)
__swig_setmethods__["trifacelist"] = _TetGen.tetgenio_trifacelist_set
__swig_getmethods__["trifacelist"] = _TetGen.tetgenio_trifacelist_get
if _newclass:trifacelist = property(_TetGen.tetgenio_trifacelist_get, _TetGen.tetgenio_trifacelist_set)
__swig_setmethods__["adjtetlist"] = _TetGen.tetgenio_adjtetlist_set
__swig_getmethods__["adjtetlist"] = _TetGen.tetgenio_adjtetlist_get
if _newclass:adjtetlist = property(_TetGen.tetgenio_adjtetlist_get, _TetGen.tetgenio_adjtetlist_set)
__swig_setmethods__["trifacemarkerlist"] = _TetGen.tetgenio_trifacemarkerlist_set
__swig_getmethods__["trifacemarkerlist"] = _TetGen.tetgenio_trifacemarkerlist_get
if _newclass:trifacemarkerlist = property(_TetGen.tetgenio_trifacemarkerlist_get, _TetGen.tetgenio_trifacemarkerlist_set)
__swig_setmethods__["numberoftrifaces"] = _TetGen.tetgenio_numberoftrifaces_set
__swig_getmethods__["numberoftrifaces"] = _TetGen.tetgenio_numberoftrifaces_get
if _newclass:numberoftrifaces = property(_TetGen.tetgenio_numberoftrifaces_get, _TetGen.tetgenio_numberoftrifaces_set)
__swig_setmethods__["edgelist"] = _TetGen.tetgenio_edgelist_set
__swig_getmethods__["edgelist"] = _TetGen.tetgenio_edgelist_get
if _newclass:edgelist = property(_TetGen.tetgenio_edgelist_get, _TetGen.tetgenio_edgelist_set)
__swig_setmethods__["edgemarkerlist"] = _TetGen.tetgenio_edgemarkerlist_set
__swig_getmethods__["edgemarkerlist"] = _TetGen.tetgenio_edgemarkerlist_get
if _newclass:edgemarkerlist = property(_TetGen.tetgenio_edgemarkerlist_get, _TetGen.tetgenio_edgemarkerlist_set)
__swig_setmethods__["numberofedges"] = _TetGen.tetgenio_numberofedges_set
__swig_getmethods__["numberofedges"] = _TetGen.tetgenio_numberofedges_get
if _newclass:numberofedges = property(_TetGen.tetgenio_numberofedges_get, _TetGen.tetgenio_numberofedges_set)
def initialize(*args): return _TetGen.tetgenio_initialize(*args)
def deinitialize(*args): return _TetGen.tetgenio_deinitialize(*args)
def load_node_call(*args): return _TetGen.tetgenio_load_node_call(*args)
def load_node(*args): return _TetGen.tetgenio_load_node(*args)
def load_addnodes(*args): return _TetGen.tetgenio_load_addnodes(*args)
def load_pbc(*args): return _TetGen.tetgenio_load_pbc(*args)
def load_var(*args): return _TetGen.tetgenio_load_var(*args)
def load_mtr(*args): return _TetGen.tetgenio_load_mtr(*args)
def load_poly(*args): return _TetGen.tetgenio_load_poly(*args)
def load_off(*args): return _TetGen.tetgenio_load_off(*args)
def load_ply(*args): return _TetGen.tetgenio_load_ply(*args)
def load_stl(*args): return _TetGen.tetgenio_load_stl(*args)
def load_medit(*args): return _TetGen.tetgenio_load_medit(*args)
def load_plc(*args): return _TetGen.tetgenio_load_plc(*args)
def load_tetmesh(*args): return _TetGen.tetgenio_load_tetmesh(*args)
def save_nodes(*args): return _TetGen.tetgenio_save_nodes(*args)
def save_elements(*args): return _TetGen.tetgenio_save_elements(*args)
def save_faces(*args): return _TetGen.tetgenio_save_faces(*args)
def save_edges(*args): return _TetGen.tetgenio_save_edges(*args)
def save_neighbors(*args): return _TetGen.tetgenio_save_neighbors(*args)
def save_poly(*args): return _TetGen.tetgenio_save_poly(*args)
def readline(*args): return _TetGen.tetgenio_readline(*args)
def findnextfield(*args): return _TetGen.tetgenio_findnextfield(*args)
def readnumberline(*args): return _TetGen.tetgenio_readnumberline(*args)
def findnextnumber(*args): return _TetGen.tetgenio_findnextnumber(*args)
def __init__(self, *args):
_swig_setattr(self, tetgenio, 'this', _TetGen.new_tetgenio(*args))
_swig_setattr(self, tetgenio, 'thisown', 1)
def __del__(self, destroy=_TetGen.delete_tetgenio):
try:
if self.thisown: destroy(self)
except: pass
class tetgenioPtr(tetgenio):
def __init__(self, this):
_swig_setattr(self, tetgenio, 'this', this)
if not hasattr(self,"thisown"): _swig_setattr(self, tetgenio, 'thisown', 0)
self.__class__ = tetgenio
_TetGen.tetgenio_swigregister(tetgenioPtr)
tetgenio_init = _TetGen.tetgenio_init
class polygon(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, polygon, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, polygon, name)
def __repr__(self):
return "<%s.%s; proxy of C++ tetgenio::polygon instance at %s>" % (self.__class__.__module__, self.__class__.__name__, self.this,)
def __init__(self, *args):
_swig_setattr(self, polygon, 'this', _TetGen.new_polygon(*args))
_swig_setattr(self, polygon, 'thisown', 1)
def __del__(self, destroy=_TetGen.delete_polygon):
try:
if self.thisown: destroy(self)
except: pass
class polygonPtr(polygon):
def __init__(self, this):
_swig_setattr(self, polygon, 'this', this)
if not hasattr(self,"thisown"): _swig_setattr(self, polygon, 'thisown', 0)
self.__class__ = polygon
_TetGen.polygon_swigregister(polygonPtr)
class facet(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, facet, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, facet, name)
def __repr__(self):
return "<%s.%s; proxy of C++ tetgenio::facet instance at %s>" % (self.__class__.__module__, self.__class__.__name__, self.this,)
def __init__(self, *args):
_swig_setattr(self, facet, 'this', _TetGen.new_facet(*args))
_swig_setattr(self, facet, 'thisown', 1)
def __del__(self, destroy=_TetGen.delete_facet):
try:
if self.thisown: destroy(self)
except: pass
class facetPtr(facet):
def __init__(self, this):
_swig_setattr(self, facet, 'this', this)
if not hasattr(self,"thisown"): _swig_setattr(self, facet, 'thisown', 0)
self.__class__ = facet
_TetGen.facet_swigregister(facetPtr)
class pbcgroup(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, pbcgroup, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, pbcgroup, name)
def __repr__(self):
return "<%s.%s; proxy of C++ tetgenio::pbcgroup instance at %s>" % (self.__class__.__module__, self.__class__.__name__, self.this,)
def __init__(self, *args):
_swig_setattr(self, pbcgroup, 'this', _TetGen.new_pbcgroup(*args))
_swig_setattr(self, pbcgroup, 'thisown', 1)
def __del__(self, destroy=_TetGen.delete_pbcgroup):
try:
if self.thisown: destroy(self)
except: pass
class pbcgroupPtr(pbcgroup):
def __init__(self, this):
_swig_setattr(self, pbcgroup, 'this', this)
if not hasattr(self,"thisown"): _swig_setattr(self, pbcgroup, 'thisown', 0)
self.__class__ = pbcgroup
_TetGen.pbcgroup_swigregister(pbcgroupPtr)
tetrahedralize = _TetGen.tetrahedralize
allocate_array = _TetGen.allocate_array
set_val = _TetGen.set_val
get_val = _TetGen.get_val
allocate_facet_array = _TetGen.allocate_facet_array
add_tri = _TetGen.add_tri
|
{
"content_hash": "a67424425fa42b8cddca01a0fc9ef320",
"timestamp": "",
"source": "github",
"line_count": 289,
"max_line_length": 161,
"avg_line_length": 63.799307958477506,
"alnum_prop": 0.7249701703004664,
"repo_name": "minesense/VisTrails",
"id": "8794b175eca9f43726287cedb8c26fc4b8e4b437",
"size": "19551",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "contrib/TetGenBridge/TetGen.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1129"
},
{
"name": "Makefile",
"bytes": "768"
},
{
"name": "Mako",
"bytes": "66613"
},
{
"name": "PHP",
"bytes": "49302"
},
{
"name": "Python",
"bytes": "21260847"
},
{
"name": "R",
"bytes": "782836"
},
{
"name": "Ruby",
"bytes": "875"
},
{
"name": "SQLPL",
"bytes": "2323"
},
{
"name": "Shell",
"bytes": "26542"
},
{
"name": "TeX",
"bytes": "147247"
},
{
"name": "XSLT",
"bytes": "1090"
}
],
"symlink_target": ""
}
|
from kivy.uix.boxlayout import BoxLayout
from kivy.properties import StringProperty, NumericProperty
class PotionWidget(BoxLayout):
name = StringProperty(None)
minimum = NumericProperty(1)
maximum = NumericProperty(5)
current = NumericProperty(1)
potion_id = NumericProperty(None)
current_duration = NumericProperty(0)
minimum_duration = NumericProperty(0)
maximum_duration = NumericProperty(32767)
def update_current(self, val):
self.current = max(min(self.current + val, self.maximum), self.minimum)
def remove_self(self):
self.parent.remove_widget(self)
def set_duration(self, duration):
self.ids.slider.set_value(duration)
|
{
"content_hash": "f3eb6a9256ad0c8b73bcae6af5467a48",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 79,
"avg_line_length": 33.285714285714285,
"alnum_prop": 0.7195994277539342,
"repo_name": "Kovak/KivyNBT",
"id": "6564c885b07678bcd31c2c033e8b96029a43a22e",
"size": "699",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uix/potionwidget.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "199280"
}
],
"symlink_target": ""
}
|
import base64
import os.path
import string
import sys
import tempfile
import zipfile
DEBUG_ZIPPEY = False
NAME = 'Zippey'
ENCODING = 'UTF-8'
def debug(msg):
'''Print debug message'''
if DEBUG_ZIPPEY:
sys.stderr.write('{0}: debug: {1}\n'.format(NAME, msg))
def error(msg):
'''Print error message'''
sys.stderr.write('{0}: error: {1}\n'.format(NAME, msg))
def init():
'''Initialize writing; set binary mode for windows'''
debug("Running on {}".format(sys.platform))
if sys.platform.startswith('win'):
import msvcrt
debug("Enable Windows binary workaround")
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
def encode(input, output):
'''Encode into special VCS friendly format from input to output'''
debug("ENCODE was called")
tfp = tempfile.TemporaryFile(mode='w+b')
tfp.write(input.read())
zfp = zipfile.ZipFile(tfp, "r")
for name in zfp.namelist():
data = zfp.read(name)
text_extensions = ['.txt', '.html', '.xml']
extension = os.path.splitext(name)[1][1:].strip().lower()
try:
# Check if text data
data.decode(ENCODING)
try:
strdata = map(chr, data)
except TypeError:
strdata = data
if extension not in text_extensions and not all(
c in string.printable for c in strdata
):
raise UnicodeDecodeError(
ENCODING, "".encode(ENCODING), 0, 1,
"Artificial exception")
# Encode
debug("Appending text file '{}'".format(name))
output.write("{}|{}|A|{}\n".format(
len(data), len(data), name).encode(ENCODING))
output.write(data)
output.write("\n".encode(ENCODING))
except UnicodeDecodeError:
# Binary data
debug("Appending binary file '{}'".format(name))
raw_len = len(data)
data = base64.b64encode(data)
output.write("{}|{}|B|{}\n".format(
len(data), raw_len, name).encode(ENCODING))
output.write(data)
output.write("\n".encode(ENCODING))
zfp.close()
tfp.close()
def decode(input, output):
'''Decode from special VCS friendly format from input to output'''
debug("DECODE was called")
tfp = tempfile.TemporaryFile(mode='w+b')
zfp = zipfile.ZipFile(tfp, "w", zipfile.ZIP_DEFLATED)
while True:
meta = input.readline().decode(ENCODING)
if not meta:
break
(data_len, raw_len, mode, name) = [
t(s) for (t, s) in zip((int, int, str, str), meta.split('|'))]
if mode == 'A':
debug("Appending text file '{}'".format(name))
zfp.writestr(name.rstrip(), input.read(data_len))
input.read(1) # Skip last '\n'
elif mode == 'B':
debug("Appending binary file '{}'".format(name.rstrip()))
zfp.writestr(name.rstrip(), base64.b64decode(input.read(data_len)))
input.read(1) # Skip last '\n'
else:
# Should never reach here
zfp.close()
tfp.close()
error('Illegal mode "{}"'.format(mode))
sys.exit(1)
# Flush all writes
zfp.close()
# Write output
tfp.seek(0)
output.write(tfp.read())
tfp.close()
|
{
"content_hash": "feae30cff0e014a88c98cd05ff66fcb6",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 79,
"avg_line_length": 31.26126126126126,
"alnum_prop": 0.5510086455331412,
"repo_name": "Empiria/matador",
"id": "50bca14f925eec791b0ae9d96d6a0ed16aed158c",
"size": "7260",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/matador/zippey.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "64662"
}
],
"symlink_target": ""
}
|
"""Exceptions related to publishing TestResults"""
class PublishFailure(Exception):
"""An exception thrown when sending a test result to a publisher fails.
:param publisher: The publisher that failed to publish.
:param result: The result that failed to publish.
"""
def __init__(self, publisher, result):
super(PublishFailure, self).__init__(
"Exception while publishing a TestResult.")
self._publisher = publisher
self._result = result
def result(self):
"""Returns the result that could not be published."""
return self._result
def publisher(self):
"""Returns the publisher that could not be published to."""
return self._publisher
def __str__(self):
return "Could not publish {0} to {1}".format(self._result,
self._publisher)
class EnrichmentFailure(Exception):
"""An exception thrown when the enrichment of a validation fails.
:param publisher: The publisher the validation was enriched for.
:param validation: The validation that failed to be enriched.
:param values: The values that the validation was enriched with.
"""
def __init__(self, publisher, validation, values):
super(EnrichmentFailure, self).__init__(
"Exception while enriching a Validation.")
self._publisher = publisher
self._validation = validation
self._values = values
try:
self._valid_values = self._values._enriched_data
except AttributeError:
self._valid_values = "Missing enriched data field on validation"
def validation(self):
"""Returns the validation that failed to enrich."""
return self._validation
def publisher(self):
"""Returns the publisher that the enrichment was for."""
return self._publisher
def values(self):
"""Returns the enrichment values."""
return self._values
def __str__(self):
return "Could not enrich {} with {} (for {}). Validation contained" +\
"these enriched fields at time of failure:" +\
"{}".format(self._validation,
self._values,
self._publisher,
self._valid_values)
|
{
"content_hash": "ff07848886764e805feba75b48108a7e",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 78,
"avg_line_length": 32.59722222222222,
"alnum_prop": 0.6011930123561994,
"repo_name": "curtisallen/Alarmageddon",
"id": "75e9d9190c18ecec4547c7c724fc7d8f783617bc",
"size": "2347",
"binary": false,
"copies": "1",
"ref": "refs/heads/kafka-validator",
"path": "alarmageddon/publishing/exceptions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "250922"
},
{
"name": "Shell",
"bytes": "2342"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0004_message_last_reply_date'),
]
operations = [
migrations.AlterUniqueTogether(
name='moduleasset',
unique_together=set([]),
),
migrations.RemoveField(
model_name='moduleasset',
name='module',
),
migrations.AlterUniqueTogether(
name='moduledata',
unique_together=set([]),
),
migrations.RemoveField(
model_name='moduledata',
name='module',
),
migrations.DeleteModel(
name='ModuleAsset',
),
migrations.DeleteModel(
name='ModuleData',
),
]
|
{
"content_hash": "4eec5c367a2449bb1b63f1fbc8a822c2",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 48,
"avg_line_length": 23.457142857142856,
"alnum_prop": 0.5286236297198539,
"repo_name": "patchew-project/patchew",
"id": "d012b12e2844be447c8cbdbbf18907a12d1b4aed",
"size": "893",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "api/migrations/0005_auto_20160707_1007.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "57256"
},
{
"name": "HTML",
"bytes": "37890"
},
{
"name": "JavaScript",
"bytes": "40258"
},
{
"name": "Jinja",
"bytes": "1238"
},
{
"name": "Python",
"bytes": "500595"
},
{
"name": "Shell",
"bytes": "5477"
}
],
"symlink_target": ""
}
|
"""
LICENCE
-------
Copyright 2013 by Kitware, Inc. All Rights Reserved. Please refer to
KITWARE_LICENSE.TXT for licensing information, or contact General Counsel,
Kitware, Inc., 28 Corporate Drive, Clifton Park, NY 12065.
"""
from .VCDStoreBackendInterface import VCDStoreBackendInterface
from .SQLiteVCDStoreBackend import SQLiteVCDStoreBackend
|
{
"content_hash": "29ce4a61599d65a729cb683db45cd1d7",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 74,
"avg_line_length": 31.727272727272727,
"alnum_prop": 0.7994269340974212,
"repo_name": "anguoyang/SMQTK",
"id": "17875c78436709ba91fc4a97fbc3668c38e48d2a",
"size": "349",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "OLD_ROOT/Backend/SMQTK_Backend/VCDStore/implementations/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "168608"
},
{
"name": "C++",
"bytes": "1555888"
},
{
"name": "CMake",
"bytes": "133035"
},
{
"name": "CSS",
"bytes": "173954"
},
{
"name": "Cuda",
"bytes": "138262"
},
{
"name": "HTML",
"bytes": "353039"
},
{
"name": "Java",
"bytes": "197569"
},
{
"name": "JavaScript",
"bytes": "1953035"
},
{
"name": "Makefile",
"bytes": "8196"
},
{
"name": "Matlab",
"bytes": "46934"
},
{
"name": "Perl",
"bytes": "3476237"
},
{
"name": "Perl6",
"bytes": "286157"
},
{
"name": "Python",
"bytes": "2120427"
},
{
"name": "Shell",
"bytes": "4944"
},
{
"name": "TeX",
"bytes": "149162"
}
],
"symlink_target": ""
}
|
"""Beam utilities."""
import apache_beam as beam
import numpy as np
from typing import List, Tuple
class MeanVarianceCombineFn(beam.CombineFn):
"""Class implementing a beam transformation that computes dataset statistics.
Implements methods required by beam.CombineFn interface to be used in a
pipeline. Called during the dataset construction process to provide mean and
variance of the primary inputs in the dataset.
"""
def create_accumulator(self) -> Tuple[float, float, int]:
return 0.0, 0.0, 0
def add_input(
self,
accumulator: Tuple[float, float, int],
values: np.ndarray # pylint: disable=redefined-builtin
) -> Tuple[float, float, int]:
"""Includes input components to the running mean and added_variance.
Implementation below follows Welford's algorithm described in more detail at
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
in the Online algorithm section. `added_variance` corresponds to M2.
Args:
accumulator: Accumulator of mean, aggregated variance and count.
values: New set of values to be added to the accumulator.
Returns:
Updated accumulator that includes values from the input.
"""
mean, added_variance, count = accumulator
for value in np.nditer(values):
count += 1
new_mean = mean + (value - mean) / count
new_added_variance = added_variance + (value - mean) * (value - new_mean)
mean = new_mean
added_variance = new_added_variance
return new_mean, new_added_variance, count
def merge_accumulators(
self,
accumulators: List[Tuple[float, float, int]]
) -> Tuple[float, float, int]:
"""Merges accumulators to estimate the combined mean and added_variance."""
means, added_variances, counts = zip(*accumulators)
total_count = np.sum(counts)
added_mean = np.sum([means[i] * counts[i] for i in range(len(counts))])
new_mean = added_mean / total_count
new_added_variance = np.sum(
[added_variances[i] + counts[i] * (means[i] - new_mean)**2
for i in range(len(counts))])
return new_mean, new_added_variance, total_count
def extract_output(
self,
accumulator: Tuple[float, float, int]) -> Tuple[float, float]:
"""Extracts mean and variance."""
mean, added_variance, count = accumulator
if count > 1:
return mean, added_variance / (count - 1)
else:
return mean, 0
|
{
"content_hash": "c1779cf529f083d01504cb6f94878d99",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 80,
"avg_line_length": 35.89705882352941,
"alnum_prop": 0.6796394920114707,
"repo_name": "google-research/data-driven-advection",
"id": "76342056f39cb9ce388c021796e096f384397cd7",
"size": "3107",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datadrivenpdes/pipelines/beamlib.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2388361"
},
{
"name": "Python",
"bytes": "230365"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.utils.translation import ugettext as _
from django.contrib import admin
from rest_framework import serializers
class ProjectStatus(models.Model):
title = models.CharField(verbose_name=_("Title"),
max_length=250,
blank=False,
null=False)
description = models.TextField(verbose_name=_("Text"),
blank=True,
null=True)
is_done = models.BooleanField(verbose_name=_("Status represents project is done"),)
class Meta:
app_label = "crm"
verbose_name = _('Project Status')
verbose_name_plural = _('Project Status')
def __str__(self):
return str(self.id) + " " + str(self.title)
class OptionProjectStatus(admin.ModelAdmin):
list_display = ('id',
'title',
'description',
'is_done')
fieldsets = (
(_('Project Status'), {
'fields': ('title',
'description',
'is_done')
}),
)
save_as = True
class ProjectStatusJSONSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ProjectStatus
fields = ('id',
'title',
'description',)
|
{
"content_hash": "d26bd06af033255efb489fbcc5610bba",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 87,
"avg_line_length": 29.595744680851062,
"alnum_prop": 0.5111430625449317,
"repo_name": "dario61081/koalixcrm",
"id": "ccebbc4992d059cb499c812a27ff2b60e63e261c",
"size": "1416",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "koalixcrm/crm/reporting/project_status.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "527"
},
{
"name": "Python",
"bytes": "275768"
},
{
"name": "XSLT",
"bytes": "287211"
}
],
"symlink_target": ""
}
|
from .avro import FlinkAvroDecoder, FlinkAvroDatumReader
__all__ = [
"FlinkAvroDatumReader",
"FlinkAvroDecoder"
]
|
{
"content_hash": "abd7898a42a22da2aa402915ff57d807",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 56,
"avg_line_length": 20.5,
"alnum_prop": 0.7235772357723578,
"repo_name": "lincoln-lil/flink",
"id": "eb2fc50337d9b5bb6644e792fb5f6d225e1a64aa",
"size": "1081",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "flink-python/pyflink/fn_execution/formats/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "20596"
},
{
"name": "Batchfile",
"bytes": "1863"
},
{
"name": "C",
"bytes": "847"
},
{
"name": "Cython",
"bytes": "137975"
},
{
"name": "Dockerfile",
"bytes": "5579"
},
{
"name": "FreeMarker",
"bytes": "101034"
},
{
"name": "GAP",
"bytes": "139876"
},
{
"name": "HTML",
"bytes": "188268"
},
{
"name": "HiveQL",
"bytes": "215858"
},
{
"name": "Java",
"bytes": "96014255"
},
{
"name": "JavaScript",
"bytes": "7038"
},
{
"name": "Less",
"bytes": "84321"
},
{
"name": "Makefile",
"bytes": "5134"
},
{
"name": "Python",
"bytes": "3100138"
},
{
"name": "Scala",
"bytes": "10647786"
},
{
"name": "Shell",
"bytes": "513298"
},
{
"name": "TypeScript",
"bytes": "381893"
},
{
"name": "q",
"bytes": "16945"
}
],
"symlink_target": ""
}
|
"""
@author: Exile
@date: 05-07-2016
@place: Cartagena - Colombia
@licence: Creative Common
"""
from django.contrib import admin
from exile_ui.admin import admin_site
from import_export.formats import base_formats
from import_export.admin import ExportMixin, ImportExportModelAdmin
from import_export import resources, fields
from plugins.pdf.format import PDF
class PdfExportMixin(ExportMixin):
def get_export_formats(self,):
formats = super(PdfExportMixin, self).get_export_formats()
return [PDF, base_formats.CSV, base_formats.XLSX]
# end def
# end class
registry = {}
def register_export(model, resource_class):
registry[model] = resource_class
# end def
old_register = admin_site.register
def register(model, *args):
if model in registry:
if len(args):
modeladmin = args[0]
else:
modeladmin = admin.ModelAdmin
# end if
class newadmin(PdfExportMixin, modeladmin):
resource_class = registry[model]
# end class
return old_register(model, newadmin)
# end if
return old_register(model, *args)
# end def
admin_site.register = register
|
{
"content_hash": "7f1d9aa95e0e57a264f4617ca4eca81a",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 67,
"avg_line_length": 24.166666666666668,
"alnum_prop": 0.6922413793103448,
"repo_name": "exildev/Piscix",
"id": "3a396eb55eb17bcfd66ff310bf9c2ba8e4d34334",
"size": "1206",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "informes/reports.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "58048"
},
{
"name": "HTML",
"bytes": "34644"
},
{
"name": "JavaScript",
"bytes": "94296"
},
{
"name": "Python",
"bytes": "120220"
},
{
"name": "Shell",
"bytes": "205"
}
],
"symlink_target": ""
}
|
"""Hue sensor entities."""
from aiohue.sensors import TYPE_ZLL_LIGHTLEVEL, TYPE_ZLL_TEMPERATURE
from homeassistant.const import (
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
)
from homeassistant.helpers.entity import Entity
from .const import DOMAIN as HUE_DOMAIN
from .sensor_base import SENSOR_CONFIG_MAP, GenericZLLSensor
LIGHT_LEVEL_NAME_FORMAT = "{} light level"
TEMPERATURE_NAME_FORMAT = "{} temperature"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Defer sensor setup to the shared sensor module."""
await hass.data[HUE_DOMAIN][
config_entry.entry_id
].sensor_manager.async_register_component(False, async_add_entities)
class GenericHueGaugeSensorEntity(GenericZLLSensor, Entity):
"""Parent class for all 'gauge' Hue device sensors."""
async def _async_update_ha_state(self, *args, **kwargs):
await self.async_update_ha_state(self, *args, **kwargs)
class HueLightLevel(GenericHueGaugeSensorEntity):
"""The light level sensor entity for a Hue motion sensor device."""
device_class = DEVICE_CLASS_ILLUMINANCE
unit_of_measurement = "lx"
@property
def state(self):
"""Return the state of the device."""
if self.sensor.lightlevel is None:
return None
# https://developers.meethue.com/develop/hue-api/supported-devices/#clip_zll_lightlevel
# Light level in 10000 log10 (lux) +1 measured by sensor. Logarithm
# scale used because the human eye adjusts to light levels and small
# changes at low lux levels are more noticeable than at high lux
# levels.
return round(float(10 ** ((self.sensor.lightlevel - 1) / 10000)), 2)
@property
def device_state_attributes(self):
"""Return the device state attributes."""
attributes = super().device_state_attributes
attributes.update(
{
"lightlevel": self.sensor.lightlevel,
"daylight": self.sensor.daylight,
"dark": self.sensor.dark,
"threshold_dark": self.sensor.tholddark,
"threshold_offset": self.sensor.tholdoffset,
}
)
return attributes
class HueTemperature(GenericHueGaugeSensorEntity):
"""The temperature sensor entity for a Hue motion sensor device."""
device_class = DEVICE_CLASS_TEMPERATURE
unit_of_measurement = TEMP_CELSIUS
@property
def state(self):
"""Return the state of the device."""
if self.sensor.temperature is None:
return None
return self.sensor.temperature / 100
SENSOR_CONFIG_MAP.update(
{
TYPE_ZLL_LIGHTLEVEL: {
"binary": False,
"name_format": LIGHT_LEVEL_NAME_FORMAT,
"class": HueLightLevel,
},
TYPE_ZLL_TEMPERATURE: {
"binary": False,
"name_format": TEMPERATURE_NAME_FORMAT,
"class": HueTemperature,
},
}
)
|
{
"content_hash": "164e5f3df1c24a7601091b3e8e68e410",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 95,
"avg_line_length": 31.652631578947368,
"alnum_prop": 0.6451612903225806,
"repo_name": "postlund/home-assistant",
"id": "5fa2ed683895ba8021c8369b3d5cd21403077f88",
"size": "3007",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/hue/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20215859"
},
{
"name": "Shell",
"bytes": "6663"
}
],
"symlink_target": ""
}
|
import logging
from blox.modules.module import module
from blox.exceptions import *
from xml.dom.minidom import parseString
logger = logging.getLogger( 'blox' )
class xmlparse(module):
def output(self, input, params):
logger.debug('processing xml')
if not input:
raise ParseException( 'Invalid input' )
xml = parseString( input )
if not xml:
raise ParseException( 'Invalid xml' )
return xml
|
{
"content_hash": "c60942d4efdd1bede935a3b883b29458",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 45,
"avg_line_length": 28.4,
"alnum_prop": 0.7112676056338029,
"repo_name": "pollett/blox",
"id": "7b155522412701aaeab9b171d1e6bde7e90f5ba2",
"size": "426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blox/modules/xmlparse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8697"
},
{
"name": "Shell",
"bytes": "1573"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
setup(name='django-starter-box',
version='0.1.1',
license='MIT',
description='Django starter box',
long_description='Starter box',
author='Lucas Tan',
author_email='do-not-spam@gmail.com',
url='http://github.com/lucastan/django-starter-box',
packages=find_packages(exclude=('djdemo',)),
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Django',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
],
zip_safe=False,
)
|
{
"content_hash": "799cbf148f3e2b00af8b738eb5f7bbb1",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 56,
"avg_line_length": 32.75,
"alnum_prop": 0.6170483460559797,
"repo_name": "lucastan/django-starter-box",
"id": "65c688c0a4a68a22de1cefe8504513e636f5fcf1",
"size": "786",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "99"
},
{
"name": "HTML",
"bytes": "2078"
},
{
"name": "JavaScript",
"bytes": "836"
},
{
"name": "Python",
"bytes": "101698"
},
{
"name": "Shell",
"bytes": "52"
}
],
"symlink_target": ""
}
|
from charmhelpers.core import hookenv
from charms.layer.apache_bigtop_base import get_layer_opts, get_package_version
from charms.layer.bigtop_hive import Hive
from charms.reactive import (
RelationBase,
is_state,
remove_state,
set_state,
when,
when_not,
)
from charms.reactive.helpers import data_changed
@when('bigtop.available')
def report_status():
hadoop_joined = is_state('hadoop.joined')
hadoop_ready = is_state('hadoop.ready')
hbase_joined = is_state('hbase.joined')
hbase_ready = is_state('hbase.ready')
database_joined = is_state('database.connected')
database_ready = is_state('database.available')
hive_installed = is_state('hive.installed')
if not hadoop_joined:
hookenv.status_set('blocked',
'waiting for relation to hadoop plugin')
elif not hadoop_ready:
hookenv.status_set('waiting',
'waiting for hadoop to become ready')
elif database_joined and not database_ready:
hookenv.status_set('waiting',
'waiting for database to become ready')
elif hbase_joined and not hbase_ready:
hookenv.status_set('waiting',
'waiting for hbase to become ready')
elif hive_installed and not database_ready:
hookenv.status_set('active',
'ready (local metastore)')
elif hive_installed and database_ready:
hookenv.status_set('active',
'ready (remote metastore)')
@when('bigtop.available', 'hadoop.ready')
def install_hive(hadoop):
'''
Anytime our dependencies are available, check to see if we have a valid
reason to (re)install. These include:
- initial install
- HBase has joined/departed
'''
# Hive cannot handle - in the metastore db name and
# mysql uses the service name to name the db
if "-" in hookenv.service_name():
hookenv.status_set('blocked', "application name may not contain '-'; "
"redeploy with a different name")
return
# Get hbase connection dict if it's available
if is_state('hbase.ready'):
hbase = RelationBase.from_state('hbase.ready')
hbserver = hbase.hbase_servers()[0]
else:
hbserver = None
# Get zookeeper connection dict if it's available
if is_state('zookeeper.ready'):
zk = RelationBase.from_state('zookeeper.ready')
zks = zk.zookeepers()
else:
zks = None
# Use this to determine if we need to reinstall
deployment_matrix = {
'hbase': hbserver,
'zookeepers': zks
}
# Handle nuances when installing versus re-installing
if not is_state('hive.installed'):
prefix = "installing"
# On initial install, prime our kv with the current deployment matrix.
# Subsequent calls will use this to determine if a reinstall is needed.
data_changed('deployment_matrix', deployment_matrix)
else:
prefix = "configuring"
# Return if our matrix has not changed
if not data_changed('deployment_matrix', deployment_matrix):
return
hookenv.status_set('maintenance', '{} hive'.format(prefix))
hookenv.log("{} hive with: {}".format(prefix, deployment_matrix))
hive = Hive()
hive.install(hbase=hbserver, zk_units=zks)
hive.restart()
hive.open_ports()
set_state('hive.installed')
report_status()
# set app version string for juju status output
hive_version = get_package_version('hive') or 'unknown'
hookenv.application_version_set(hive_version)
@when('hive.installed', 'config.changed.heap')
def config_changed():
hookenv.status_set('maintenance', 'configuring with new options')
hive = Hive()
hive.configure_hive()
hive.restart()
report_status()
@when('hive.installed', 'database.available')
@when_not('hive.db.configured')
def configure_with_remote_db(db):
hookenv.status_set('maintenance', 'configuring external database')
hive = Hive()
hive.configure_remote_db(db)
hive.restart()
set_state('hive.db.configured')
report_status()
@when('hive.installed', 'hive.db.configured')
@when_not('database.available')
def configure_with_local_db():
'''
Reconfigure Hive using a local metastore db.
The initial installation will configure Hive with a local metastore_db.
Once an external db becomes available, we reconfigure Hive to use it. If
that external db goes away, we'll use this method to set Hive back into
local mode.
'''
hookenv.status_set('maintenance', 'configuring local database')
hive = Hive()
hive.configure_local_db()
hive.restart()
remove_state('hive.db.configured')
report_status()
@when('hive.installed')
@when_not('hadoop.ready')
def stop_hive():
'''
Hive depends on Hadoop. If we are installed and hadoop goes away, shut down
services and remove our installed state.
'''
hive = Hive()
hive.close_ports()
hive.stop()
remove_state('hive.installed')
report_status()
@when('hive.installed', 'client.joined')
def serve_client(client):
'''
Inform clients when hive is ready to serve.
'''
port = get_layer_opts().port('hive-thrift')
client.send_port(port)
client.set_ready()
@when('client.joined')
@when_not('hive.installed')
def stop_serving_client(client):
'''
Inform connected clients that Hive is no longer ready. This can happen
if Hadoop goes away (the 'installed' state will be removed).
'''
client.clear_ready()
|
{
"content_hash": "a2ab0c40fb1f671b4442ef3abfc4ed36",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 79,
"avg_line_length": 31.43016759776536,
"alnum_prop": 0.6477070742979026,
"repo_name": "mbalassi/bigtop",
"id": "bf4ce2b82da2d225e136a41419c5da7201fb8588",
"size": "6408",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "bigtop-packages/src/charm/hive/layer-hive/reactive/hive.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4822"
},
{
"name": "Groff",
"bytes": "49282"
},
{
"name": "Groovy",
"bytes": "579842"
},
{
"name": "Java",
"bytes": "650838"
},
{
"name": "Makefile",
"bytes": "60209"
},
{
"name": "PigLatin",
"bytes": "15615"
},
{
"name": "Puppet",
"bytes": "147130"
},
{
"name": "Python",
"bytes": "41447"
},
{
"name": "Ruby",
"bytes": "26396"
},
{
"name": "Scala",
"bytes": "85334"
},
{
"name": "Shell",
"bytes": "598568"
},
{
"name": "XSLT",
"bytes": "1323"
}
],
"symlink_target": ""
}
|
'''
GUI written in QT5 to perform the Laplace Approximation
'''
from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QHBoxLayout, QPushButton, QComboBox, QLabel, QLineEdit, QMessageBox, QMainWindow, QShortcut, QGroupBox,QFormLayout,QSpinBox
from PyQt5.QtGui import QStandardItemModel, QStandardItem
from PyQt5.QtCore import Qt
import time
# Make sure that we are using QT5
import matplotlib
matplotlib.use('Qt5Agg')
import numpy as np
import sys
import biasd as b
from smd_loader import ui_loader
class laplace(QWidget):
def __init__(self,parent):
super(QWidget,self).__init__(parent=parent)
self.initialize()
def initialize(self):
vbox = QVBoxLayout()
bload = QPushButton("Select Trajectory")
self.brun = QPushButton("Run single")
self.bbatch = QPushButton("Batch all like this")
gb = QGroupBox('Options')
form = QFormLayout()
self.sb_nrestarts = QSpinBox()
self.sb_nrestarts.setRange(1,100)
self.sb_nrestarts.setValue(5)
form.addRow("Number of Restarts",self.sb_nrestarts)
gb.setLayout(form)
bload.clicked.connect(self.load_trajectory)
self.brun.clicked.connect(self.run)
self.bbatch.clicked.connect(self.batch)
[b.setEnabled(False) for b in [self.brun,self.bbatch]]
vbox.addWidget(bload)
vbox.addWidget(gb)
hbox = QHBoxLayout()
hbox.addStretch(1)
hbox.addWidget(self.brun)
hbox.addWidget(self.bbatch)
hbox.addStretch(1)
vbox.addLayout(hbox)
vbox.addStretch(1)
self.setLayout(vbox)
self.setFocus()
self.setWindowTitle('Laplace Approximation')
self.show()
def get_smd_filename(self):
return self.parent().parent().get_smd_filename()
def load_trajectory(self):
try:
self.loader.close()
except:
pass
self.loader = ui_loader(self,select=True)
self.loader.show()
def select_callback(self,location):
self.loader.close()
l = location.split('/')
if len(l) == 3:
if l[0].startswith('trajectory') and l[1] == 'data':
self.tloc = l[0]
self.dname = l[2]
self.get_tau()
self.parent().parent().parent().statusBar().showMessage("Chose %s"%(self.tloc))
[b.setEnabled(True) for b in [self.brun,self.bbatch]]
self.totaltraj = 1
self.currenttraj = 1
return
else:
QMessageBox.critical(self,"Couldn't load this dataset","Couldn't load this from the HDF5 file as a dataset. Make sure you are selecting a dataset.")
def get_tau(self):
try:
fn = self.get_smd_filename()
f = b.smd.load(fn)
if f[self.tloc+'/data'].keys().count('Time') > 0:
t = f[self.tloc+'/data/Time'].value
elif f[self.tloc+'/data'].keys().count('time') > 0:
t = f[self.tloc+'/data/time'].value
self.tau = t[1] - t[0]
f.close()
except:
try:
f.close()
except:
pass
self.tau = None
def run(self,prompt=True):
if not self.tloc is None:
priors = self.parent().parent().priors
nr = self.sb_nrestarts.value()
if prompt:
reply = QMessageBox.question(self,'Run?',"Are you sure you want to run this?")
else:
reply = QMessageBox.Yes
if reply == QMessageBox.Yes:
try:
self.parent().parent().parent().statusBar().showMessage("Running %d/%d...."%(self.currenttraj,self.totaltraj))
fn = self.get_smd_filename()
f = b.smd.load(fn)
g = f[self.tloc]
gname = 'Laplace Analysis '+time.ctime()
gg = g.create_group(gname)
b.smd.add.parameter_collection(gg,priors,label='Priors')
gg.attrs['completed'] = 'False'
gg.attrs['program'] = 'BIASD GUI'
traj = g['data/'+self.dname].value
traj = traj[np.isfinite(traj)]
b.smd.save(f)
result = b.laplace.laplace_approximation(traj, priors, self.tau, nrestarts=nr)
f = b.smd.load(fn)
g = f[self.tloc+'/'+gname]
b.smd.add.laplace_posterior(g,result,label='Posterior')
g.attrs['completed'] = 'True'
g.attrs['number of restarts'] = nr
b.smd.save(f)
except:
try:
f.close()
except:
pass
self.parent().parent().parent().statusBar().showMessage("Laplace Crashed...")
self.parent().parent().log.new('Crashed Laplace Approximation on %s/data/%s'%(self.tloc,self.dname))
self.parent().parent().parent().statusBar().showMessage("Completed Laplace on %s"%(self.tloc))
self.parent().parent().log.new('Completed Laplace Approximation on %s/data/%s'%(self.tloc,self.dname))
def batch(self):
reply = QMessageBox.question(self,'Run?',"Are you sure you want to process all of the trajectories like this?")
if reply == QMessageBox.Yes:
fn = self.get_smd_filename()
f = b.smd.load(fn)
tlist = [i for i in f.keys() if i.startswith('trajectory')]
f.close()
self.totaltraj = len(tlist)
t0 = time.time()
for t in tlist:
try:
self.tloc = t
self.get_tau()
self.run(prompt=False)
self.currenttraj += 1
except:
pass
t1 = time.time()
self.parent().parent().parent().statusBar().showMessage("Completed batch mode Laplace...")
QMessageBox.information(self,"Complete","Batch mode complete in %f seconds"%((t1-t0)))
def keyPressEvent(self,event):
if event.key() == Qt.Key_Escape:
self.parent().close()
if event.key() == Qt.Key_S:
self.load_trajectory()
class ui_laplace(QMainWindow):
def __init__(self,parent=None):
super(QMainWindow,self).__init__(parent)
self.ui = laplace(self)
self.setCentralWidget(self.ui)
self.show()
def closeEvent(self,event):
self.parent().activateWindow()
self.parent().raise_()
self.parent().setFocus()
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
w = ui_laplace()
sys.exit(app.exec_())
|
{
"content_hash": "7266ba83a979ecbba7b2d85329ceadc7",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 187,
"avg_line_length": 30.043243243243243,
"alnum_prop": 0.6658870097157251,
"repo_name": "ckinzthompson/biasd",
"id": "837c276db219ac046329fc5394838be2840f1db9",
"size": "5584",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "biasd/gui/laplace.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7730"
},
{
"name": "C",
"bytes": "15704"
},
{
"name": "Cuda",
"bytes": "7086"
},
{
"name": "HTML",
"bytes": "282"
},
{
"name": "Makefile",
"bytes": "8696"
},
{
"name": "Python",
"bytes": "152650"
}
],
"symlink_target": ""
}
|
from django.conf import settings
ADMIN_THUMB_HEIGHT = 140
IMAGE_CROPPING_RATIO = getattr(settings, 'GALLERIES_IMAGE_CROPPING_RATIO', '16x9')
# Find the ADMIN_THUMBNAIL_SIZE based on the given IMAGE_CROPPING_RATIO
width, height = [int(i) for i in IMAGE_CROPPING_RATIO.split("x")]
ADMIN_THUMBNAIL_SIZE = getattr(settings, 'IMAGE_CROPPING_THUMB_SIZE', tuple([int((ADMIN_THUMB_HEIGHT/height)*width), int((ADMIN_THUMB_HEIGHT/height)*height)]))
|
{
"content_hash": "91f07f4bd215fa98fa83297dbea445fe",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 159,
"avg_line_length": 49.111111111111114,
"alnum_prop": 0.7579185520361991,
"repo_name": "espenhogbakk/django-galleries",
"id": "a9df6fa0ee341975708a38bdb8f89b90471d1295",
"size": "442",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "galleries/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14532"
},
{
"name": "JavaScript",
"bytes": "70766"
},
{
"name": "Python",
"bytes": "18163"
}
],
"symlink_target": ""
}
|
import logging
import os
import pickle
from typing import List, Optional
import numpy as np
import torch
from anndata import AnnData, read
from scvi import _CONSTANTS
from scvi.data import transfer_anndata_setup
from scvi.core.models import BaseModelClass, VAEMixin
from scvi.core.modules import JVAE, Classifier
from scvi.core.trainers import JVAETrainer
from scvi.core.trainers.jvae_trainer import JvaeDataLoader
from scvi.model._utils import _get_var_names_from_setup_anndata
logger = logging.getLogger(__name__)
def _unpack_tensors(tensors):
x = tensors[_CONSTANTS.X_KEY].squeeze_(0)
local_l_mean = tensors[_CONSTANTS.LOCAL_L_MEAN_KEY].squeeze_(0)
local_l_var = tensors[_CONSTANTS.LOCAL_L_VAR_KEY].squeeze_(0)
batch_index = tensors[_CONSTANTS.BATCH_KEY].squeeze_(0)
y = tensors[_CONSTANTS.LABELS_KEY].squeeze_(0)
return x, local_l_mean, local_l_var, batch_index, y
class GIMVI(VAEMixin, BaseModelClass):
"""
Joint VAE for imputing missing genes in spatial data [Lopez19]_.
Parameters
----------
adata_seq
AnnData object that has been registered via :func:`~scvi.data.setup_anndata`
and contains RNA-seq data.
adata_spatial
AnnData object that has been registered via :func:`~scvi.data.setup_anndata`
and contains spatial data.
n_hidden
Number of nodes per hidden layer.
generative_distributions
List of generative distribution for adata_seq data and adata_spatial data.
model_library_size
List of bool of whether to model library size for adata_seq and adata_spatial.
n_latent
Dimensionality of the latent space.
use_cuda
Use the GPU or not.
**model_kwargs
Keyword args for :class:`~scvi.core.modules.JVAE`
Examples
--------
>>> adata_seq = anndata.read_h5ad(path_to_anndata_seq)
>>> adata_spatial = anndata.read_h5ad(path_to_anndata_spatial)
>>> scvi.data.setup_anndata(adata_seq)
>>> scvi.data.setup_anndata(adata_spatial)
>>> vae = scvi.model.GIMVI(adata_seq, adata_spatial)
>>> vae.train(n_epochs=400)
"""
def __init__(
self,
adata_seq: AnnData,
adata_spatial: AnnData,
generative_distributions: List = ["zinb", "nb"],
model_library_size: List = [True, False],
n_latent: int = 10,
use_cuda: bool = True,
**model_kwargs,
):
super(GIMVI, self).__init__(use_cuda=use_cuda)
self.use_cuda = use_cuda and torch.cuda.is_available()
self.adatas = [adata_seq, adata_spatial]
self.scvi_setup_dicts_ = {
"seq": adata_seq.uns["_scvi"],
"spatial": adata_spatial.uns["_scvi"],
}
seq_var_names = _get_var_names_from_setup_anndata(adata_seq)
spatial_var_names = _get_var_names_from_setup_anndata(adata_spatial)
if not set(spatial_var_names) <= set(seq_var_names):
raise ValueError("spatial genes needs to be subset of seq genes")
spatial_gene_loc = [
np.argwhere(seq_var_names == g)[0] for g in spatial_var_names
]
spatial_gene_loc = np.concatenate(spatial_gene_loc)
gene_mappings = [slice(None), spatial_gene_loc]
sum_stats = [d.uns["_scvi"]["summary_stats"] for d in self.adatas]
n_inputs = [s["n_vars"] for s in sum_stats]
total_genes = adata_seq.uns["_scvi"]["summary_stats"]["n_vars"]
# since we are combining datasets, we need to increment the batch_idx
# of one of the datasets
adata_seq_n_batches = adata_seq.uns["_scvi"]["summary_stats"]["n_batch"]
adata_spatial.obs["_scvi_batch"] += adata_seq_n_batches
n_batches = sum([s["n_batch"] for s in sum_stats])
self.model = JVAE(
n_inputs,
total_genes,
gene_mappings,
generative_distributions,
model_library_size,
n_batch=n_batches,
n_latent=n_latent,
**model_kwargs,
)
self._model_summary_string = (
"GimVI Model with the following params: \nn_latent: {}, n_inputs: {}, n_genes: {}, "
+ "n_batch: {}, generative distributions: {}"
).format(n_latent, n_inputs, total_genes, n_batches, generative_distributions)
self.init_params_ = self._get_init_params(locals())
@property
def _trainer_class(self):
return JVAETrainer
@property
def _scvi_dl_class(self):
return JvaeDataLoader
def train(
self,
n_epochs: Optional[int] = 200,
kappa: Optional[int] = 5,
discriminator: Optional[Classifier] = None,
train_size: float = 0.9,
frequency: int = 1,
n_epochs_kl_warmup: int = 400,
train_fun_kwargs: dict = {},
**kwargs,
):
"""
Train the model.
Parameters
----------
n_epochs
Number of passes through the dataset.
kappa
Scaling parameter for the discriminator loss.
discriminator
:class:`~scvi.core.modules.Classifier` object.
train_size
Size of training set in the range [0.0, 1.0].
frequency
Frequency with which metrics are computed on the data for train/test/val sets.
n_epochs_kl_warmup
Number of passes through dataset for scaling term on KL divergence to go from 0 to 1.
train_fun_kwargs
Keyword args for the train method of :class:`~scvi.core.trainers.trainer.Trainer`.
**kwargs
Other keyword args for :class:`~scvi.core.trainers.trainer.Trainer`.
"""
train_fun_kwargs = dict(train_fun_kwargs)
if discriminator is None:
discriminator = Classifier(self.model.n_latent, 32, 2, 3, logits=True)
self.trainer = JVAETrainer(
self.model,
discriminator,
self.adatas,
train_size,
frequency=frequency,
kappa=kappa,
n_epochs_kl_warmup=n_epochs_kl_warmup,
)
logger.info("Training for {} epochs.".format(n_epochs))
self.trainer.train(n_epochs=n_epochs, **train_fun_kwargs)
self.is_trained_ = True
self.history_ = self.trainer.history
def _make_scvi_dls(self, adatas: List[AnnData] = None, batch_size=128):
if adatas is None:
adatas = self.adatas
post_list = [
self._make_scvi_dl(adata, mode=i) for i, adata in enumerate(adatas)
]
return post_list
def get_latent_representation(
self,
adatas: List[AnnData] = None,
deterministic: bool = True,
batch_size: int = 128,
) -> List[np.ndarray]:
"""
Return the latent space embedding for each dataset.
Parameters
----------
adatas
List of adata seq and adata spatial.
deterministic
If true, use the mean of the encoder instead of a Gaussian sample.
batch_size
Minibatch size for data loading into model.
"""
if adatas is None:
adatas = self.adatas
scdls = self._make_scvi_dls(adatas, batch_size=batch_size)
self.model.eval()
latents = []
for mode, scdl in enumerate(scdls):
latent = []
for tensors in scdl:
(
sample_batch,
local_l_mean,
local_l_var,
batch_index,
label,
*_,
) = _unpack_tensors(tensors)
latent.append(
self.model.sample_from_posterior_z(
sample_batch, mode, deterministic=deterministic
)
)
latent = torch.cat(latent).cpu().detach().numpy()
latents.append(latent)
return latents
def get_imputed_values(
self,
adatas: List[AnnData] = None,
deterministic: bool = True,
normalized: bool = True,
decode_mode: Optional[int] = None,
batch_size: int = 128,
) -> List[np.ndarray]:
"""
Return imputed values for all genes for each dataset.
Parameters
----------
adatas
List of adata seq and adata spatial
deterministic
If true, use the mean of the encoder instead of a Gaussian sample for the latent vector.
normalized
Return imputed normalized values or not.
decode_mode
If a `decode_mode` is given, use the encoder specific to each dataset as usual but use
the decoder of the dataset of id `decode_mode` to impute values.
batch_size
Minibatch size for data loading into model.
"""
self.model.eval()
if adatas is None:
adatas = self.adatas
scdls = self._make_scvi_dls(adatas, batch_size=batch_size)
imputed_values = []
for mode, scdl in enumerate(scdls):
imputed_value = []
for tensors in scdl:
(
sample_batch,
local_l_mean,
local_l_var,
batch_index,
label,
*_,
) = _unpack_tensors(tensors)
if normalized:
imputed_value.append(
self.model.sample_scale(
sample_batch,
mode,
batch_index,
label,
deterministic=deterministic,
decode_mode=decode_mode,
)
)
else:
imputed_value.append(
self.model.sample_rate(
sample_batch,
mode,
batch_index,
label,
deterministic=deterministic,
decode_mode=decode_mode,
)
)
imputed_value = torch.cat(imputed_value).cpu().detach().numpy()
imputed_values.append(imputed_value)
return imputed_values
def save(
self,
dir_path: str,
overwrite: bool = False,
save_anndata: bool = False,
**anndata_write_kwargs,
):
"""
Save the state of the model.
Neither the trainer optimizer state nor the trainer history are saved.
Model files are not expected to be reproducibly saved and loaded across versions
until we reach version 1.0.
Parameters
----------
dir_path
Path to a directory.
overwrite
Overwrite existing data or not. If `False` and directory
already exists at `dir_path`, error will be raised.
save_anndata
If True, also saves the anndata
anndata_write_kwargs
Kwargs for anndata write function
"""
# get all the user attributes
user_attributes = self._get_user_attributes()
# only save the public attributes with _ at the very end
user_attributes = {a[0]: a[1] for a in user_attributes if a[0][-1] == "_"}
# save the model state dict and the trainer state dict only
if not os.path.exists(dir_path) or overwrite:
os.makedirs(dir_path, exist_ok=overwrite)
else:
raise ValueError(
"{} already exists. Please provide an unexisting directory for saving.".format(
dir_path
)
)
if save_anndata:
dataset_names = ["seq", "spatial"]
for i in range(len(self.adatas)):
save_path = os.path.join(
dir_path, "adata_{}.h5ad".format(dataset_names[i])
)
self.adatas[i].write(save_path)
varnames_save_path = os.path.join(
dir_path, "var_names_{}.csv".format(dataset_names[i])
)
var_names = self.adatas[i].var_names.astype(str)
var_names = var_names.to_numpy()
np.savetxt(varnames_save_path, var_names, fmt="%s")
model_save_path = os.path.join(dir_path, "model_params.pt")
attr_save_path = os.path.join(dir_path, "attr.pkl")
torch.save(self.model.state_dict(), model_save_path)
with open(attr_save_path, "wb") as f:
pickle.dump(user_attributes, f)
@classmethod
def load(
cls,
dir_path: str,
adata_seq: Optional[AnnData] = None,
adata_spatial: Optional[AnnData] = None,
use_cuda: bool = False,
):
"""
Instantiate a model from the saved output.
Parameters
----------
adata_seq
AnnData organized in the same way as data used to train model.
It is not necessary to run :func:`~scvi.data.setup_anndata`,
as AnnData is validated against the saved `scvi` setup dictionary.
AnnData must be registered via :func:`~scvi.data.setup_anndata`.
adata_spatial
AnnData organized in the same way as data used to train model.
If None, will check for and load anndata saved with the model.
dir_path
Path to saved outputs.
use_cuda
Whether to load model on GPU.
Returns
-------
Model with loaded state dictionaries.
Examples
--------
>>> vae = GIMVI.load(adata_seq, adata_spatial, save_path)
>>> vae.get_latent_representation()
"""
model_path = os.path.join(dir_path, "model_params.pt")
setup_dict_path = os.path.join(dir_path, "attr.pkl")
seq_data_path = os.path.join(dir_path, "adata_seq.h5ad")
spatial_data_path = os.path.join(dir_path, "adata_spatial.h5ad")
seq_var_names_path = os.path.join(dir_path, "var_names_seq.csv")
spatial_var_names_path = os.path.join(dir_path, "var_names_spatial.csv")
if adata_seq is None and os.path.exists(seq_data_path):
adata_seq = read(seq_data_path)
elif adata_seq is None and not os.path.exists(seq_data_path):
raise ValueError(
"Save path contains no saved anndata and no adata was passed."
)
if adata_spatial is None and os.path.exists(spatial_data_path):
adata_spatial = read(spatial_data_path)
elif adata_spatial is None and not os.path.exists(spatial_data_path):
raise ValueError(
"Save path contains no saved anndata and no adata was passed."
)
adatas = [adata_seq, adata_spatial]
seq_var_names = np.genfromtxt(seq_var_names_path, delimiter=",", dtype=str)
spatial_var_names = np.genfromtxt(
spatial_var_names_path, delimiter=",", dtype=str
)
var_names = [seq_var_names, spatial_var_names]
for i, adata in enumerate(adatas):
saved_var_names = var_names[i]
user_var_names = adata.var_names.astype(str)
if not np.array_equal(saved_var_names, user_var_names):
logger.warning(
"var_names for adata passed in does not match var_names of "
"adata used to train the model. For valid results, the vars "
"need to be the same and in the same order as the adata used to train the model."
)
with open(setup_dict_path, "rb") as handle:
attr_dict = pickle.load(handle)
scvi_setup_dicts = attr_dict.pop("scvi_setup_dicts_")
transfer_anndata_setup(scvi_setup_dicts["seq"], adata_seq)
transfer_anndata_setup(scvi_setup_dicts["spatial"], adata_spatial)
# get the parameters for the class init signiture
init_params = attr_dict.pop("init_params_")
# update use_cuda from the saved model
use_cuda = use_cuda and torch.cuda.is_available()
init_params["use_cuda"] = use_cuda
# grab all the parameters execept for kwargs (is a dict)
non_kwargs = {k: v for k, v in init_params.items() if not isinstance(v, dict)}
# expand out kwargs
kwargs = {k: v for k, v in init_params.items() if isinstance(v, dict)}
kwargs = {k: v for (i, j) in kwargs.items() for (k, v) in j.items()}
model = cls(adata_seq, adata_spatial, **non_kwargs, **kwargs)
for attr, val in attr_dict.items():
setattr(model, attr, val)
if use_cuda:
model.model.load_state_dict(torch.load(model_path))
model.model.cuda()
else:
device = torch.device("cpu")
model.model.load_state_dict(torch.load(model_path, map_location=device))
model.model.eval()
return model
|
{
"content_hash": "3b729cfae9afc8501a1a0b43402375f6",
"timestamp": "",
"source": "github",
"line_count": 471,
"max_line_length": 101,
"avg_line_length": 36.30785562632696,
"alnum_prop": 0.5579790655517222,
"repo_name": "YosefLab/scVI",
"id": "6b43b0862b4d667de1d29b3a760e9d928cdca95a",
"size": "17101",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scvi/model/gimvi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "226"
},
{
"name": "Python",
"bytes": "582001"
}
],
"symlink_target": ""
}
|
""" Setup script for PyPI """
import os
from setuptools import setup
from ConfigParser import SafeConfigParser
settings = SafeConfigParser()
settings.read(os.path.realpath('cumulus_ds/settings.conf'))
setup(
name='cumulus',
version=settings.get('general', 'version'),
license='Apache Software License',
description='Cumulus Deployment Suite for Amazon Web Services',
author='Sebastian Dahlgren',
author_email='sebastian.dahlgren@skymill.se',
url='http://www.skymillsolutions.com',
keywords="cumulus cloudformation amazon web services",
platforms=['Any'],
packages=['cumulus_ds'],
scripts=['cumulus'],
include_package_data=True,
zip_safe=False,
install_requires=[
'boto >= 2.12.0'
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python'
]
)
|
{
"content_hash": "e9acb36d09a27a601ab134818bb7d23e",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 67,
"avg_line_length": 29.558823529411764,
"alnum_prop": 0.663681592039801,
"repo_name": "skymill/cumulus",
"id": "d5d3b314ecdd6dbfdd761ca68e7bd4f50dad5b5d",
"size": "1005",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cumulus/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "87696"
}
],
"symlink_target": ""
}
|
"""
Model for (133,171) rate-compatible punctured convolutional (RCPC) code;
contains `RCPC` class.
Revision Info
=============
* $LastChangedBy: mandke $
* $LastChangedDate: 2011-04-28 14:37:55 -0500 (Thu, 28 Apr 2011) $
* $LastChangedRevision: 4954 $
:author: Ketan Mandke <kmandke@mail.utexas.edu>
:copyright:
Copyright 2009 The University of Texas at Austin
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
:var USE_A_D:
Constant specifying if a_d parameter should be used in computation of
convolutional coded error-rate (instead of c_d).
"""
__docformat__ = "restructuredtext en"
from wins.trace import Traceable
from wins.helper import nchoosek
from numpy import sqrt, ceil, array
from numpy import log2
from scipy.stats import erfc
from scapy.all import Packet
## FIXME:
# Using c_d instead of a_d seems to produce a more pessimistic result (i.e. a
# more conservative guess of the error-probability.
USE_A_D = 0
class RCPC(Traceable):
"""Model for (133,171) rate-compatible punctured convolutional (RCPC) code.
This RCPC code is commonly used in many wireless standards (i.e. IEEE
802.11a/g/n). See `Punctured convolutional codes`_ for more.
:CVariables:
* `params`: Dictionary containing parameters used for characterizing
bit-error (BER) performance of the (133,171) RCPC code.
Each dictionary key is a string representation of the code rate (e.g.
"1/2", "2/3", etc.), and each entry is another dictionary containing
three fields:
* 'a_d': The distance coefficients used in calculating BER.
* 'c_d': Complementary coefficients also used in calculating BER.
* 'dfree': `Free distance`_ of code rate.
* `rates`: Property to access code rates supported by this class.
* `rate`: Property to access/modify coding rate of `RCPC`.
* `coderate`: Property to alias `rate`.
:IVariables:
* `__rate`: Private variable to maintain coding rate.
.. _`Punctured convolutional codes`: http://en.wikipedia.org/wiki/Convolutional_code#Popular_convolutional_codes
.. _`Free distance`: http://en.wikipedia.org/wiki/Convolutional_code#Free_distance_and_error_distribution
"""
name = "rcpc code"
tracename = "RCPC"
params = {'1/2':{'dfree':10,
'a_d':[11,0,38,0,193,0,1331,0,7275,0,40406,0,234969,0,1337714,0,7594819,0,43375588,0],
'c_d':[36,0,211,0,1404,0,11633,0,77433,0,502690,0,3322763,0,21292910,0,134365911,0,843425871,0]},
'2/3':{'dfree':6,
'a_d':[1,16,48,158,642,2435,9174,34705,131585,499608],
'c_d':[3,70,285,1276,6160,27128,117019,498860,2103891,8784123]},
'3/4':{'dfree':5,
'a_d':[8,31,160,892,4512,23307,121077,625059,3234886,16753077],
'c_d':[42,201,1492,10469,62935,379644,2253373,13073811,75152755,428005675]},
'4/5':{'dfree': 4,
'a_d':[3,24,172,1158,7409,48729,319861,2097971,13765538,90315667],
'c_d':[12,188,1732,15256,121372,945645,7171532,53399130,392137968,2846810288]},
'5/6':{'dfree':4,
'a_d':[14,69,654,4996,39699,315371,2507890,19921920,158275483,1257455600],
'c_d':[92,528,8694,79453,792114,7375573,67884974,610875423,5427275376,47664215639]},
'6/7':{'dfree':3,
'a_d':[1,20,223,1961,18093,169175,1576108,14656816,136394365,1269388805,11812597193],
'c_d':[5,169,2725,32233,370861,4169788,45417406,483171499,5063039490,52394710081,536645404278]},
'7/8':{'dfree':3,
'a_d':[2,46,499,5291,56179,599557,6387194,68117821,726098696,7741086706],
'c_d':[9,500,7437,105707,1402743,17909268,222292299,2706822556,32434972565,383973015410]},
}
def __init__(self, rate=None, **kwargs):
"""Constructor.
:param rate: Coding rate [default='1/2']
If specified, `rate` **must** be supported by `params`. By default,
`rate` is set to the mother code rate (i.e. '1/2').
"""
self.__rate = None
if rate is None: rate = "1/2"
Traceable.__init__(self, **kwargs)
# set parameters
self.rate = rate
rate = property(fget=lambda self: self.__rate, \
fset=lambda self,r: self.set_rate(r) )
coderate = property(fget=lambda self: self.rate, \
fset=lambda self,r: setattr(self,'rate',r) )
rates = property(fget=lambda self: self.params.keys() )
def ber(self, uber):
"""Calculate bit-error rate of coded system (or coded BER) from BER of
uncoded system (or uncoded BER).
:param uber: BER of uncoded system.
:return: BER of coded system; in [0,1].
This method calls `calcber()` with local parameters to compute the
bit-error rate of the coded system.
:note: If `uber` is a `numpy` array, this method will return an array of
values corresponding to the specified parameters.
"""
return self.calcber(uber, coderate=self.rate)
def per(self, p, uber):
"""Calculate packet-error rate (PER); **overload as needed**.
:param p: Packet (packet length in bytes) to compute PER for.
:param uber: BER of uncoded system.
:return: Packet-error rate (PER) in [0,1].
This method calls `calcper()` with local parameters to compute the
packet-error rate.
**Overload this method to change how packet-error rate is computed.**
"""
return self.calcper(p, uber, coderate=self.rate)
def set_rate(self, r):
"""Set coding rate `r`."""
assert (r in self.params), "[RCPC]: Unsupported rate (%s)!"%(r)
self.__rate = r
@classmethod
def hard_decode(cls, uber, dfree):
"""Calculate bit-error probability for hard decision decoded RCPC.
:param uber: Probability of bit-error for uncoded system.
:param dfree: Free distance of convolutional code.
:return: BER for hard decision decoded RCPC coded system.
"""
pd, p = 0, uber
dmin = int(ceil((dfree+1.0)/2) )
q = 1 - p
for k in range(dmin, dfree+1):
pd += nchoosek(dfree,k)*(p**k)*(q**(dfree-k) )
if (int(dfree)%2<1):
pd += 0.5*nchoosek(dfree,int(dfree/2))*(p**(dfree/2))*(q**(dfree/2))
return pd
@classmethod
def calcber(cls, uber, coderate=None):
"""Calculate bit-error rate of coded system (or coded BER) from BER of
uncoded system (or uncoded BER).
:param uber: BER of uncoded system; in [0, 1].
:param coderate: Optional code rate; **must be supported by `params`**.
:return: BER of coded system; in [0,1].
If `coderate` is not specified this method will use the rate of the
mother code (i.e. "1/2") when calculating BER.
:note: If `uber` is a `numpy` array, this method will return an array of
values corresponding to the specified parameters.
:note: This method computes an upper bound on the probability of
bit-error for a coded system utilizing hard-decision decoding.
This is modeled by calling `hard_decode()`.
"""
global USE_A_D
if coderate is None: coderate = "1/2"
assert (coderate in cls.params), \
"[RCPC]: Unsupported code rate (%s) in calcber()!"%(coderate)
# get parameters
dfree = cls.params[coderate]['dfree']
a_d = cls.params[coderate]['a_d']
c_d = cls.params[coderate]['c_d']
numd = len(c_d)
if USE_A_D: numd = len(a_d)
# compute coded BER
cber = 0*uber
for k in range(numd):
if USE_A_D: cber += a_d[k]*cls.hard_decode(uber, dfree+k)
else: cber += c_d[k]*cls.hard_decode(uber, dfree+k)
# apply ber ceiling
blim = 0.5
try:
for k in range(len(uber)):
#blim = ber[k]
if cber[k]>blim: cber[k] = blim
except TypeError:
if cber>blim: cber = blim
return cber
@classmethod
def calcper(cls, p, *args, **kwargs):
"""Calculate packet-error rate of coded system; **overload as needed**.
:param p: Packet (packet length in bytes) to compute PER for.
:param args: Arguments passed to `calcber()`.
:param kwargs: Additional keywords also passed to `calcber()`.
:return: PER of coded system; in [0,1].
By default this method uses the following relationship to determine the
packet-error rate:
PER = 1 - (1 - BER)^Nbits
where BER is the bit-error rate (as determined by calling `calcber()`
with args and kwargs) and Nbits is the number of bits in packet `p`.
**Overload this method to change how packet-error rate is computed.**
"""
plen = p
ber = cls.calcber(*args, **kwargs)
if isinstance(p, Packet): plen = len(p)
nbits = 8*plen
per = 1 - (1-ber)**nbits
return per
|
{
"content_hash": "49f75d37fe9b38ef1d694586c049201d",
"timestamp": "",
"source": "github",
"line_count": 240,
"max_line_length": 118,
"avg_line_length": 40.6375,
"alnum_prop": 0.6060699272018866,
"repo_name": "reidlindsay/wins",
"id": "ede2c25f4c83a498137f9bbdb9ddfaa00089c0ac",
"size": "9778",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wins/digital/rcpc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5653"
},
{
"name": "C++",
"bytes": "51883"
},
{
"name": "Makefile",
"bytes": "2270"
},
{
"name": "Python",
"bytes": "1193050"
},
{
"name": "Shell",
"bytes": "665341"
}
],
"symlink_target": ""
}
|
from .stochastic_gradient import BaseSGDClassifier
from .stochastic_gradient import BaseSGDRegressor
from .stochastic_gradient import DEFAULT_EPSILON
class PassiveAggressiveClassifier(BaseSGDClassifier):
"""Passive Aggressive Classifier
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
fit_intercept: bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter: int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle: bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to False.
random_state: int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose: integer, optional
The verbosity level
n_jobs: integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
loss : string, optional
The loss function to be used:
hinge: equivalent to PA-I in the reference paper.
squared_hinge: equivalent to PA-II in the reference paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDClassifier
Perceptron
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True,
n_iter=5, shuffle=False, verbose=0, loss="hinge",
n_jobs=1, random_state=None, warm_start=False):
BaseSGDClassifier.__init__(self,
penalty=None,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
eta0=1.0,
warm_start=warm_start,
n_jobs=n_jobs)
self.C = C
self.loss = loss
def partial_fit(self, X, y, classes=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of the training data
y : numpy array of shape [n_samples]
Subset of the target values
classes : array, shape = [n_classes]
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr, n_iter=1,
classes=classes, sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_classes,n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [n_classes]
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr,
coef_init=coef_init, intercept_init=intercept_init)
class PassiveAggressiveRegressor(BaseSGDRegressor):
"""Passive Aggressive Regressor
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
epsilon: float
If the difference between the current prediction and the correct label
is below this threshold, the model is not updated.
fit_intercept: bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter: int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle: bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to False.
random_state: int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose: integer, optional
The verbosity level
loss : string, optional
The loss function to be used:
epsilon_insensitive: equivalent to PA-I in the reference paper.
squared_epsilon_insensitive: equivalent to PA-II in the reference
paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDRegressor
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=False,
verbose=0, loss="epsilon_insensitive",
epsilon=DEFAULT_EPSILON, random_state=None, class_weight=None,
warm_start=False):
BaseSGDRegressor.__init__(self,
penalty=None,
l1_ratio=0,
epsilon=epsilon,
eta0=1.0,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
warm_start=warm_start)
self.C = C
self.loss = loss
def partial_fit(self, X, y):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of training data
y : numpy array of shape [n_samples]
Subset of target values
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr, n_iter=1,
sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [1]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr,
coef_init=coef_init,
intercept_init=intercept_init)
|
{
"content_hash": "3d38241fe7ae6033da315764233233c8",
"timestamp": "",
"source": "github",
"line_count": 275,
"max_line_length": 79,
"avg_line_length": 35.32363636363636,
"alnum_prop": 0.5595017500514721,
"repo_name": "soulmachine/scikit-learn",
"id": "b06c5f3146aba34c693e2a7f5b23357694cbf007",
"size": "9778",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "sklearn/linear_model/passive_aggressive.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from tigergrader.initdb import create_user
import unittest
from testhelper import TestHelper
class LoginTestCases(TestHelper):
def test_invalid_login(self):
rv = self.login('nobody', 'sesame')
assert "Invalid Credentials" in rv.data
def test_no_login(self):
rv = self.app.get('/report/T1', follow_redirects=True)
assert "You need to log in" in rv.data
def test_admin_login(self):
create_user('admin', 'admin@example.com', 'admintiger')
rv = self.login('admin', 'admintiger')
assert "Invalid Credentials" not in rv.data
assert "You were successfully logged in" in rv.data
assert "Group admin" in rv.data
assert "Registration and Modules" in rv.data
def test_user_login(self):
create_user('user', 'user@example.com', 'usertiger')
rv = self.login('user', 'usertiger')
assert "Invalid Credentials" not in rv.data
assert "You were successfully logged in" in rv.data
assert "Group user" in rv.data
assert "(current grade : 0 / 20)" in rv.data
def test_login_logout(self):
create_user('user', 'user@example.com', 'usertiger')
rv = self.login('user', 'usertiger')
assert "Group user" in rv.data
rv = self.logout()
assert "Group user" not in rv.data
def test_registration_inactive(self):
rv = self.register("newuser", "secret", "newuser@example.com")
assert "Registration is currently closed" in rv.data
def test_no_admin_access(self):
create_user('user', 'user@example.com', 'usertiger')
self.login('user', 'usertiger')
rv = self.activate_registration()
assert "Administrative account required" in rv.data
def test_registration_active(self):
create_user('admin', 'admin@example.com', 'admintiger')
self.activate_registration()
rv = self.register("newuser", "secretsecret", "newuser@example.com")
assert "Account successfully created" in rv.data
def test_wrong_mail(self):
create_user('admin', 'admin@example.com', 'admintiger')
self.activate_registration()
rv = self.register("newuser", "secretsecret", "wrongmail")
assert "Emails addresses are not correct" in rv.data
def test_wrong_username(self):
create_user('admin', 'admin@example.com', 'admintiger')
self.activate_registration()
rv = self.register("newuser:)", "secretsecret", "newuser@example.com")
assert "can only contain letters" in rv.data
def test_no_mail(self):
create_user('admin', 'admin@example.com', 'admintiger')
self.activate_registration()
rv = self.register("newuser", "secretsecret", "")
assert "At least one email must be provided" in rv.data
def test_pass_too_short(self):
create_user('admin', 'admin@example.com', 'admintiger')
self.activate_registration()
rv = self.register("newuser", "secret", "newuser@example.com")
assert "Password must be at least 8 characters long" in rv.data
def test_user_too_short(self):
create_user('admin', 'admin@example.com', 'admintiger')
self.activate_registration()
rv = self.register("ne", "secret", "newuser@example.com")
assert "Username must be at least 3 characters long" in rv.data
def test_already_exists(self):
create_user('admin', 'admin@example.com', 'admintiger')
self.activate_registration()
self.register("newuser", "secretsecret", "newuser@example.com")
rv = self.register("newuser", "secretsecret", "newuser@example.com")
assert "Account already exists" in rv.data
rv = self.register("admin", "secretsecret", "newuser@example.com")
assert "Account already exists" in rv.data
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "6ab8edb7668d53890a3f590a75c82930",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 78,
"avg_line_length": 41.212765957446805,
"alnum_prop": 0.6404233350542076,
"repo_name": "pablooliveira/tigergrader",
"id": "640743600049f536f1e3a78d1782a7720957c920",
"size": "3874",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tigergrader/tests/test_login.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8140"
},
{
"name": "Python",
"bytes": "47660"
},
{
"name": "Ruby",
"bytes": "419"
},
{
"name": "Shell",
"bytes": "3029"
}
],
"symlink_target": ""
}
|
from azure.cli.command_modules.sf.config import SfConfigParser
from azure.cli.core.util import CLIError
from azure.servicefabric import ServiceFabricClientAPIs
from azure.cli.command_modules.sf.cluster_auth import ClientCertAuthentication
from azure.cli.core.commands.client_factory import configure_common_settings
def cf_sf_client(_):
sf_config = SfConfigParser()
endpoint = sf_config.connection_endpoint()
if not endpoint:
raise CLIError("Connection endpoint not specified, run 'az sf cluster select' first.")
cert = sf_config.cert_info()
ca_cert = sf_config.ca_cert_info()
no_verify = sf_config.no_verify_setting()
auth = ClientCertAuthentication(cert, ca_cert, no_verify)
client = ServiceFabricClientAPIs(auth, base_url=endpoint)
configure_common_settings(client)
return client
|
{
"content_hash": "89ce0fc3f7eb78744621226523c868fc",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 94,
"avg_line_length": 39.76190476190476,
"alnum_prop": 0.7592814371257485,
"repo_name": "samedder/azure-cli",
"id": "949dda9be0dfca5f4da6e8af2ae44bb17db1c7db",
"size": "1181",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/command_modules/azure-cli-sf/azure/cli/command_modules/sf/_factory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11279"
},
{
"name": "C++",
"bytes": "275"
},
{
"name": "JavaScript",
"bytes": "380"
},
{
"name": "Python",
"bytes": "5627973"
},
{
"name": "Shell",
"bytes": "25031"
}
],
"symlink_target": ""
}
|
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1NetworkPolicyIngressRule(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, _from=None, ports=None):
"""
V1beta1NetworkPolicyIngressRule - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'_from': 'list[V1beta1NetworkPolicyPeer]',
'ports': 'list[V1beta1NetworkPolicyPort]'
}
self.attribute_map = {
'_from': 'from',
'ports': 'ports'
}
self.__from = _from
self._ports = ports
@property
def _from(self):
"""
Gets the _from of this V1beta1NetworkPolicyIngressRule.
List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least on item, this rule allows traffic only if the traffic matches at least one item in the from list.
:return: The _from of this V1beta1NetworkPolicyIngressRule.
:rtype: list[V1beta1NetworkPolicyPeer]
"""
return self.__from
@_from.setter
def _from(self, _from):
"""
Sets the _from of this V1beta1NetworkPolicyIngressRule.
List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least on item, this rule allows traffic only if the traffic matches at least one item in the from list.
:param _from: The _from of this V1beta1NetworkPolicyIngressRule.
:type: list[V1beta1NetworkPolicyPeer]
"""
self.__from = _from
@property
def ports(self):
"""
Gets the ports of this V1beta1NetworkPolicyIngressRule.
List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.
:return: The ports of this V1beta1NetworkPolicyIngressRule.
:rtype: list[V1beta1NetworkPolicyPort]
"""
return self._ports
@ports.setter
def ports(self, ports):
"""
Sets the ports of this V1beta1NetworkPolicyIngressRule.
List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.
:param ports: The ports of this V1beta1NetworkPolicyIngressRule.
:type: list[V1beta1NetworkPolicyPort]
"""
self._ports = ports
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1NetworkPolicyIngressRule):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
{
"content_hash": "e21328ebb9c0da5ee1f6e2dff2220204",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 395,
"avg_line_length": 37.829787234042556,
"alnum_prop": 0.6166104236970379,
"repo_name": "djkonro/client-python",
"id": "61ab58de9d4612b9ebc098da76d5a3c349eefe19",
"size": "5351",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "kubernetes/client/models/v1beta1_network_policy_ingress_rule.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6527154"
},
{
"name": "Shell",
"bytes": "16522"
}
],
"symlink_target": ""
}
|
from salesforce.testrunner.settings import *
# replace the test app
INSTALLED_APPS = tuple(x for x in INSTALLED_APPS if x != 'salesforce.testrunner.example')
INSTALLED_APPS += ('tests.inspectdb',)
|
{
"content_hash": "a6d3832c0d6b4fcd6f7870e39a63b4c5",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 89,
"avg_line_length": 39.6,
"alnum_prop": 0.7575757575757576,
"repo_name": "philchristensen/django-salesforce",
"id": "af36226193ed23f0a0484098b028de2c255b0731",
"size": "198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/inspectdb/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1215"
},
{
"name": "Python",
"bytes": "202579"
},
{
"name": "Shell",
"bytes": "2666"
}
],
"symlink_target": ""
}
|
#
# Tables describing slots in the CPython type object
# and associated know-how.
#
import Naming
import PyrexTypes
import StringEncoding
invisible = ['__cinit__', '__dealloc__', '__richcmp__',
'__nonzero__', '__bool__']
class Signature(object):
# Method slot signature descriptor.
#
# has_dummy_arg boolean
# has_generic_args boolean
# fixed_arg_format string
# ret_format string
# error_value string
#
# The formats are strings made up of the following
# characters:
#
# 'O' Python object
# 'T' Python object of the type of 'self'
# 'v' void
# 'p' void *
# 'P' void **
# 'i' int
# 'b' bint
# 'I' int *
# 'l' long
# 'f' float
# 'd' double
# 'h' Py_hash_t
# 'z' Py_ssize_t
# 'Z' Py_ssize_t *
# 's' char *
# 'S' char **
# 'r' int used only to signal exception
# 'B' Py_buffer *
# '-' dummy 'self' argument (not used)
# '*' rest of args passed as generic Python
# arg tuple and kw dict (must be last
# char in format string)
format_map = {
'O': PyrexTypes.py_object_type,
'v': PyrexTypes.c_void_type,
'p': PyrexTypes.c_void_ptr_type,
'P': PyrexTypes.c_void_ptr_ptr_type,
'i': PyrexTypes.c_int_type,
'b': PyrexTypes.c_bint_type,
'I': PyrexTypes.c_int_ptr_type,
'l': PyrexTypes.c_long_type,
'f': PyrexTypes.c_float_type,
'd': PyrexTypes.c_double_type,
'h': PyrexTypes.c_py_hash_t_type,
'z': PyrexTypes.c_py_ssize_t_type,
'Z': PyrexTypes.c_py_ssize_t_ptr_type,
's': PyrexTypes.c_char_ptr_type,
'S': PyrexTypes.c_char_ptr_ptr_type,
'r': PyrexTypes.c_returncode_type,
'B': PyrexTypes.c_py_buffer_ptr_type,
# 'T', '-' and '*' are handled otherwise
# and are not looked up in here
}
type_to_format_map = dict([(type_, format_)
for format_, type_ in format_map.iteritems()])
error_value_map = {
'O': "NULL",
'T': "NULL",
'i': "-1",
'b': "-1",
'l': "-1",
'r': "-1",
'h': "-1",
'z': "-1",
}
def __init__(self, arg_format, ret_format):
self.has_dummy_arg = 0
self.has_generic_args = 0
if arg_format[:1] == '-':
self.has_dummy_arg = 1
arg_format = arg_format[1:]
if arg_format[-1:] == '*':
self.has_generic_args = 1
arg_format = arg_format[:-1]
self.fixed_arg_format = arg_format
self.ret_format = ret_format
self.error_value = self.error_value_map.get(ret_format, None)
self.is_staticmethod = False
def num_fixed_args(self):
return len(self.fixed_arg_format)
def is_self_arg(self, i):
# argument is 'self' for methods or 'class' for classmethods
return self.fixed_arg_format[i] == 'T'
def returns_self_type(self):
# return type is same as 'self' argument type
return self.ret_format == 'T'
def fixed_arg_type(self, i):
return self.format_map[self.fixed_arg_format[i]]
def return_type(self):
return self.format_map[self.ret_format]
def format_from_type(self, arg_type):
if arg_type.is_pyobject:
arg_type = PyrexTypes.py_object_type
return self.type_to_format_map[arg_type]
def exception_value(self):
return self.error_value_map.get(self.ret_format)
def function_type(self, self_arg_override=None):
# Construct a C function type descriptor for this signature
args = []
for i in xrange(self.num_fixed_args()):
if self_arg_override is not None and self.is_self_arg(i):
assert isinstance(self_arg_override, PyrexTypes.CFuncTypeArg)
args.append(self_arg_override)
else:
arg_type = self.fixed_arg_type(i)
args.append(PyrexTypes.CFuncTypeArg("", arg_type, None))
if self_arg_override is not None and self.returns_self_type():
ret_type = self_arg_override.type
else:
ret_type = self.return_type()
exc_value = self.exception_value()
return PyrexTypes.CFuncType(ret_type, args, exception_value = exc_value)
def method_flags(self):
if self.ret_format == "O":
full_args = self.fixed_arg_format
if self.has_dummy_arg:
full_args = "O" + full_args
if full_args in ["O", "T"]:
if self.has_generic_args:
return [method_varargs, method_keywords]
else:
return [method_noargs]
elif full_args in ["OO", "TO"] and not self.has_generic_args:
return [method_onearg]
if self.is_staticmethod:
return [method_varargs, method_keywords]
return None
class SlotDescriptor(object):
# Abstract base class for type slot descriptors.
#
# slot_name string Member name of the slot in the type object
# is_initialised_dynamically Is initialised by code in the module init function
# is_inherited Is inherited by subtypes (see PyType_Ready())
# py3 Indicates presence of slot in Python 3
# py2 Indicates presence of slot in Python 2
# ifdef Full #ifdef string that slot is wrapped in. Using this causes py3, py2 and flags to be ignored.)
def __init__(self, slot_name, dynamic=False, inherited=False,
py3=True, py2=True, ifdef=None):
self.slot_name = slot_name
self.is_initialised_dynamically = dynamic
self.is_inherited = inherited
self.ifdef = ifdef
self.py3 = py3
self.py2 = py2
def preprocessor_guard_code(self):
ifdef = self.ifdef
py2 = self.py2
py3 = self.py3
guard = None
if ifdef:
guard = ("#if %s" % ifdef)
elif not py3 or py3 == '<RESERVED>':
guard = ("#if PY_MAJOR_VERSION < 3")
elif not py2:
guard = ("#if PY_MAJOR_VERSION >= 3")
return guard
def generate(self, scope, code):
end_pypy_guard = False
if self.is_initialised_dynamically:
value = "0"
else:
value = self.slot_code(scope)
if value == "0" and self.is_inherited:
# PyPy currently has a broken PyType_Ready() that fails to
# inherit some slots. To work around this, we explicitly
# set inherited slots here, but only in PyPy since CPython
# handles this better than we do.
inherited_value = value
current_scope = scope
while (inherited_value == "0"
and current_scope.parent_type
and current_scope.parent_type.base_type
and current_scope.parent_type.base_type.scope):
current_scope = current_scope.parent_type.base_type.scope
inherited_value = self.slot_code(current_scope)
if inherited_value != "0":
code.putln("#if CYTHON_COMPILING_IN_PYPY")
code.putln("%s, /*%s*/" % (inherited_value, self.slot_name))
code.putln("#else")
end_pypy_guard = True
preprocessor_guard = self.preprocessor_guard_code()
if preprocessor_guard:
code.putln(preprocessor_guard)
code.putln("%s, /*%s*/" % (value, self.slot_name))
if self.py3 == '<RESERVED>':
code.putln("#else")
code.putln("0, /*reserved*/")
if preprocessor_guard:
code.putln("#endif")
if end_pypy_guard:
code.putln("#endif")
# Some C implementations have trouble statically
# initialising a global with a pointer to an extern
# function, so we initialise some of the type slots
# in the module init function instead.
def generate_dynamic_init_code(self, scope, code):
if self.is_initialised_dynamically:
value = self.slot_code(scope)
if value != "0":
code.putln("%s.%s = %s;" % (
scope.parent_type.typeobj_cname,
self.slot_name,
value
)
)
class FixedSlot(SlotDescriptor):
# Descriptor for a type slot with a fixed value.
#
# value string
def __init__(self, slot_name, value, py3=True, py2=True, ifdef=None):
SlotDescriptor.__init__(self, slot_name, py3=py3, py2=py2, ifdef=ifdef)
self.value = value
def slot_code(self, scope):
return self.value
class EmptySlot(FixedSlot):
# Descriptor for a type slot whose value is always 0.
def __init__(self, slot_name, py3=True, py2=True, ifdef=None):
FixedSlot.__init__(self, slot_name, "0", py3=py3, py2=py2, ifdef=ifdef)
class MethodSlot(SlotDescriptor):
# Type slot descriptor for a user-definable method.
#
# signature Signature
# method_name string The __xxx__ name of the method
# alternatives [string] Alternative list of __xxx__ names for the method
def __init__(self, signature, slot_name, method_name, fallback=None,
py3=True, py2=True, ifdef=None, inherited=True):
SlotDescriptor.__init__(self, slot_name, py3=py3, py2=py2,
ifdef=ifdef, inherited=inherited)
self.signature = signature
self.slot_name = slot_name
self.method_name = method_name
self.alternatives = []
method_name_to_slot[method_name] = self
#
if fallback:
self.alternatives.append(fallback)
for alt in (self.py2, self.py3):
if isinstance(alt, (tuple, list)):
slot_name, method_name = alt
self.alternatives.append(method_name)
method_name_to_slot[method_name] = self
def slot_code(self, scope):
entry = scope.lookup_here(self.method_name)
if entry and entry.func_cname:
return entry.func_cname
for method_name in self.alternatives:
entry = scope.lookup_here(method_name)
if entry and entry.func_cname:
return entry.func_cname
return "0"
class InternalMethodSlot(SlotDescriptor):
# Type slot descriptor for a method which is always
# synthesized by Cython.
#
# slot_name string Member name of the slot in the type object
def __init__(self, slot_name, **kargs):
SlotDescriptor.__init__(self, slot_name, **kargs)
def slot_code(self, scope):
return scope.mangle_internal(self.slot_name)
class GCDependentSlot(InternalMethodSlot):
# Descriptor for a slot whose value depends on whether
# the type participates in GC.
def __init__(self, slot_name, **kargs):
InternalMethodSlot.__init__(self, slot_name, **kargs)
def slot_code(self, scope):
if not scope.needs_gc():
return "0"
if not scope.has_pyobject_attrs:
# if the type does not have object attributes, it can
# delegate GC methods to its parent - iff the parent
# functions are defined in the same module
parent_type_scope = scope.parent_type.base_type.scope
if scope.parent_scope is parent_type_scope.parent_scope:
entry = scope.parent_scope.lookup_here(scope.parent_type.base_type.name)
if entry.visibility != 'extern':
return self.slot_code(parent_type_scope)
return InternalMethodSlot.slot_code(self, scope)
class ConstructorSlot(InternalMethodSlot):
# Descriptor for tp_new and tp_dealloc.
def __init__(self, slot_name, method, **kargs):
InternalMethodSlot.__init__(self, slot_name, **kargs)
self.method = method
def slot_code(self, scope):
if self.slot_name != 'tp_new' \
and scope.parent_type.base_type \
and not scope.has_pyobject_attrs \
and not scope.lookup_here(self.method):
# if the type does not have object attributes, it can
# delegate GC methods to its parent - iff the parent
# functions are defined in the same module
parent_type_scope = scope.parent_type.base_type.scope
if scope.parent_scope is parent_type_scope.parent_scope:
entry = scope.parent_scope.lookup_here(scope.parent_type.base_type.name)
if entry.visibility != 'extern':
return self.slot_code(parent_type_scope)
return InternalMethodSlot.slot_code(self, scope)
class SyntheticSlot(InternalMethodSlot):
# Type slot descriptor for a synthesized method which
# dispatches to one or more user-defined methods depending
# on its arguments. If none of the relevant methods are
# defined, the method will not be synthesized and an
# alternative default value will be placed in the type
# slot.
def __init__(self, slot_name, user_methods, default_value, **kargs):
InternalMethodSlot.__init__(self, slot_name, **kargs)
self.user_methods = user_methods
self.default_value = default_value
def slot_code(self, scope):
if scope.defines_any(self.user_methods):
return InternalMethodSlot.slot_code(self, scope)
else:
return self.default_value
class TypeFlagsSlot(SlotDescriptor):
# Descriptor for the type flags slot.
def slot_code(self, scope):
value = "Py_TPFLAGS_DEFAULT"
if scope.directives['type_version_tag']:
# it's not in 'Py_TPFLAGS_DEFAULT' in Py2
value += "|Py_TPFLAGS_HAVE_VERSION_TAG"
else:
# it's enabled in 'Py_TPFLAGS_DEFAULT' in Py3
value = "(%s&~Py_TPFLAGS_HAVE_VERSION_TAG)" % value
value += "|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER"
if not scope.parent_type.is_final_type:
value += "|Py_TPFLAGS_BASETYPE"
if scope.needs_gc():
value += "|Py_TPFLAGS_HAVE_GC"
return value
class DocStringSlot(SlotDescriptor):
# Descriptor for the docstring slot.
def slot_code(self, scope):
if scope.doc is not None:
if scope.doc.is_unicode:
doc = scope.doc.utf8encode()
else:
doc = scope.doc.byteencode()
return '__Pyx_DOCSTR("%s")' % StringEncoding.escape_byte_string(doc)
else:
return "0"
class SuiteSlot(SlotDescriptor):
# Descriptor for a substructure of the type object.
#
# sub_slots [SlotDescriptor]
def __init__(self, sub_slots, slot_type, slot_name):
SlotDescriptor.__init__(self, slot_name)
self.sub_slots = sub_slots
self.slot_type = slot_type
substructures.append(self)
def is_empty(self, scope):
for slot in self.sub_slots:
if slot.slot_code(scope) != "0":
return False
return True
def substructure_cname(self, scope):
return "%s%s_%s" % (Naming.pyrex_prefix, self.slot_name, scope.class_name)
def slot_code(self, scope):
if not self.is_empty(scope):
return "&%s" % self.substructure_cname(scope)
return "0"
def generate_substructure(self, scope, code):
if not self.is_empty(scope):
code.putln("")
code.putln(
"static %s %s = {" % (
self.slot_type,
self.substructure_cname(scope)))
for slot in self.sub_slots:
slot.generate(scope, code)
code.putln("};")
substructures = [] # List of all SuiteSlot instances
class MethodTableSlot(SlotDescriptor):
# Slot descriptor for the method table.
def slot_code(self, scope):
return scope.method_table_cname
class MemberTableSlot(SlotDescriptor):
# Slot descriptor for the table of Python-accessible attributes.
def slot_code(self, scope):
return "0"
class GetSetSlot(SlotDescriptor):
# Slot descriptor for the table of attribute get & set methods.
def slot_code(self, scope):
if scope.property_entries:
return scope.getset_table_cname
else:
return "0"
class BaseClassSlot(SlotDescriptor):
# Slot descriptor for the base class slot.
def __init__(self, name):
SlotDescriptor.__init__(self, name, dynamic = 1)
def generate_dynamic_init_code(self, scope, code):
base_type = scope.parent_type.base_type
if base_type:
code.putln("%s.%s = %s;" % (
scope.parent_type.typeobj_cname,
self.slot_name,
base_type.typeptr_cname))
# The following dictionary maps __xxx__ method names to slot descriptors.
method_name_to_slot = {}
## The following slots are (or could be) initialised with an
## extern function pointer.
#
#slots_initialised_from_extern = (
# "tp_free",
#)
#------------------------------------------------------------------------------------------
#
# Utility functions for accessing slot table data structures
#
#------------------------------------------------------------------------------------------
def get_special_method_signature(name):
# Given a method name, if it is a special method,
# return its signature, else return None.
slot = method_name_to_slot.get(name)
if slot:
return slot.signature
else:
return None
def get_property_accessor_signature(name):
# Return signature of accessor for an extension type
# property, else None.
return property_accessor_signatures.get(name)
def get_base_slot_function(scope, slot):
# Returns the function implementing this slot in the baseclass.
# This is useful for enabling the compiler to optimize calls
# that recursively climb the class hierarchy.
base_type = scope.parent_type.base_type
if scope.parent_scope is base_type.scope.parent_scope:
parent_slot = slot.slot_code(base_type.scope)
if parent_slot != '0':
entry = scope.parent_scope.lookup_here(scope.parent_type.base_type.name)
if entry.visibility != 'extern':
return parent_slot
return None
def get_slot_function(scope, slot):
# Returns the function implementing this slot in the baseclass.
# This is useful for enabling the compiler to optimize calls
# that recursively climb the class hierarchy.
slot_code = slot.slot_code(scope)
if slot_code != '0':
entry = scope.parent_scope.lookup_here(scope.parent_type.name)
if entry.visibility != 'extern':
return slot_code
return None
#------------------------------------------------------------------------------------------
#
# Signatures for generic Python functions and methods.
#
#------------------------------------------------------------------------------------------
pyfunction_signature = Signature("-*", "O")
pymethod_signature = Signature("T*", "O")
#------------------------------------------------------------------------------------------
#
# Signatures for simple Python functions.
#
#------------------------------------------------------------------------------------------
pyfunction_noargs = Signature("-", "O")
pyfunction_onearg = Signature("-O", "O")
#------------------------------------------------------------------------------------------
#
# Signatures for the various kinds of function that
# can appear in the type object and its substructures.
#
#------------------------------------------------------------------------------------------
unaryfunc = Signature("T", "O") # typedef PyObject * (*unaryfunc)(PyObject *);
binaryfunc = Signature("OO", "O") # typedef PyObject * (*binaryfunc)(PyObject *, PyObject *);
ibinaryfunc = Signature("TO", "O") # typedef PyObject * (*binaryfunc)(PyObject *, PyObject *);
ternaryfunc = Signature("OOO", "O") # typedef PyObject * (*ternaryfunc)(PyObject *, PyObject *, PyObject *);
iternaryfunc = Signature("TOO", "O") # typedef PyObject * (*ternaryfunc)(PyObject *, PyObject *, PyObject *);
callfunc = Signature("T*", "O") # typedef PyObject * (*ternaryfunc)(PyObject *, PyObject *, PyObject *);
inquiry = Signature("T", "i") # typedef int (*inquiry)(PyObject *);
lenfunc = Signature("T", "z") # typedef Py_ssize_t (*lenfunc)(PyObject *);
# typedef int (*coercion)(PyObject **, PyObject **);
intargfunc = Signature("Ti", "O") # typedef PyObject *(*intargfunc)(PyObject *, int);
ssizeargfunc = Signature("Tz", "O") # typedef PyObject *(*ssizeargfunc)(PyObject *, Py_ssize_t);
intintargfunc = Signature("Tii", "O") # typedef PyObject *(*intintargfunc)(PyObject *, int, int);
ssizessizeargfunc = Signature("Tzz", "O") # typedef PyObject *(*ssizessizeargfunc)(PyObject *, Py_ssize_t, Py_ssize_t);
intobjargproc = Signature("TiO", 'r') # typedef int(*intobjargproc)(PyObject *, int, PyObject *);
ssizeobjargproc = Signature("TzO", 'r') # typedef int(*ssizeobjargproc)(PyObject *, Py_ssize_t, PyObject *);
intintobjargproc = Signature("TiiO", 'r') # typedef int(*intintobjargproc)(PyObject *, int, int, PyObject *);
ssizessizeobjargproc = Signature("TzzO", 'r') # typedef int(*ssizessizeobjargproc)(PyObject *, Py_ssize_t, Py_ssize_t, PyObject *);
intintargproc = Signature("Tii", 'r')
ssizessizeargproc = Signature("Tzz", 'r')
objargfunc = Signature("TO", "O")
objobjargproc = Signature("TOO", 'r') # typedef int (*objobjargproc)(PyObject *, PyObject *, PyObject *);
readbufferproc = Signature("TzP", "z") # typedef Py_ssize_t (*readbufferproc)(PyObject *, Py_ssize_t, void **);
writebufferproc = Signature("TzP", "z") # typedef Py_ssize_t (*writebufferproc)(PyObject *, Py_ssize_t, void **);
segcountproc = Signature("TZ", "z") # typedef Py_ssize_t (*segcountproc)(PyObject *, Py_ssize_t *);
charbufferproc = Signature("TzS", "z") # typedef Py_ssize_t (*charbufferproc)(PyObject *, Py_ssize_t, char **);
objargproc = Signature("TO", 'r') # typedef int (*objobjproc)(PyObject *, PyObject *);
# typedef int (*visitproc)(PyObject *, void *);
# typedef int (*traverseproc)(PyObject *, visitproc, void *);
destructor = Signature("T", "v") # typedef void (*destructor)(PyObject *);
# printfunc = Signature("TFi", 'r') # typedef int (*printfunc)(PyObject *, FILE *, int);
# typedef PyObject *(*getattrfunc)(PyObject *, char *);
getattrofunc = Signature("TO", "O") # typedef PyObject *(*getattrofunc)(PyObject *, PyObject *);
# typedef int (*setattrfunc)(PyObject *, char *, PyObject *);
setattrofunc = Signature("TOO", 'r') # typedef int (*setattrofunc)(PyObject *, PyObject *, PyObject *);
delattrofunc = Signature("TO", 'r')
cmpfunc = Signature("TO", "i") # typedef int (*cmpfunc)(PyObject *, PyObject *);
reprfunc = Signature("T", "O") # typedef PyObject *(*reprfunc)(PyObject *);
hashfunc = Signature("T", "h") # typedef Py_hash_t (*hashfunc)(PyObject *);
# typedef PyObject *(*richcmpfunc) (PyObject *, PyObject *, int);
richcmpfunc = Signature("OOi", "O") # typedef PyObject *(*richcmpfunc) (PyObject *, PyObject *, int);
getiterfunc = Signature("T", "O") # typedef PyObject *(*getiterfunc) (PyObject *);
iternextfunc = Signature("T", "O") # typedef PyObject *(*iternextfunc) (PyObject *);
descrgetfunc = Signature("TOO", "O") # typedef PyObject *(*descrgetfunc) (PyObject *, PyObject *, PyObject *);
descrsetfunc = Signature("TOO", 'r') # typedef int (*descrsetfunc) (PyObject *, PyObject *, PyObject *);
descrdelfunc = Signature("TO", 'r')
initproc = Signature("T*", 'r') # typedef int (*initproc)(PyObject *, PyObject *, PyObject *);
# typedef PyObject *(*newfunc)(struct _typeobject *, PyObject *, PyObject *);
# typedef PyObject *(*allocfunc)(struct _typeobject *, int);
getbufferproc = Signature("TBi", "r") # typedef int (*getbufferproc)(PyObject *, Py_buffer *, int);
releasebufferproc = Signature("TB", "v") # typedef void (*releasebufferproc)(PyObject *, Py_buffer *);
#------------------------------------------------------------------------------------------
#
# Signatures for accessor methods of properties.
#
#------------------------------------------------------------------------------------------
property_accessor_signatures = {
'__get__': Signature("T", "O"),
'__set__': Signature("TO", 'r'),
'__del__': Signature("T", 'r')
}
#------------------------------------------------------------------------------------------
#
# Descriptor tables for the slots of the various type object
# substructures, in the order they appear in the structure.
#
#------------------------------------------------------------------------------------------
PyNumberMethods = (
MethodSlot(binaryfunc, "nb_add", "__add__"),
MethodSlot(binaryfunc, "nb_subtract", "__sub__"),
MethodSlot(binaryfunc, "nb_multiply", "__mul__"),
MethodSlot(binaryfunc, "nb_divide", "__div__", py3 = False),
MethodSlot(binaryfunc, "nb_remainder", "__mod__"),
MethodSlot(binaryfunc, "nb_divmod", "__divmod__"),
MethodSlot(ternaryfunc, "nb_power", "__pow__"),
MethodSlot(unaryfunc, "nb_negative", "__neg__"),
MethodSlot(unaryfunc, "nb_positive", "__pos__"),
MethodSlot(unaryfunc, "nb_absolute", "__abs__"),
MethodSlot(inquiry, "nb_nonzero", "__nonzero__", py3 = ("nb_bool", "__bool__")),
MethodSlot(unaryfunc, "nb_invert", "__invert__"),
MethodSlot(binaryfunc, "nb_lshift", "__lshift__"),
MethodSlot(binaryfunc, "nb_rshift", "__rshift__"),
MethodSlot(binaryfunc, "nb_and", "__and__"),
MethodSlot(binaryfunc, "nb_xor", "__xor__"),
MethodSlot(binaryfunc, "nb_or", "__or__"),
EmptySlot("nb_coerce", py3 = False),
MethodSlot(unaryfunc, "nb_int", "__int__", fallback="__long__"),
MethodSlot(unaryfunc, "nb_long", "__long__", fallback="__int__", py3 = "<RESERVED>"),
MethodSlot(unaryfunc, "nb_float", "__float__"),
MethodSlot(unaryfunc, "nb_oct", "__oct__", py3 = False),
MethodSlot(unaryfunc, "nb_hex", "__hex__", py3 = False),
# Added in release 2.0
MethodSlot(ibinaryfunc, "nb_inplace_add", "__iadd__"),
MethodSlot(ibinaryfunc, "nb_inplace_subtract", "__isub__"),
MethodSlot(ibinaryfunc, "nb_inplace_multiply", "__imul__"),
MethodSlot(ibinaryfunc, "nb_inplace_divide", "__idiv__", py3 = False),
MethodSlot(ibinaryfunc, "nb_inplace_remainder", "__imod__"),
MethodSlot(ibinaryfunc, "nb_inplace_power", "__ipow__"), # actually ternaryfunc!!!
MethodSlot(ibinaryfunc, "nb_inplace_lshift", "__ilshift__"),
MethodSlot(ibinaryfunc, "nb_inplace_rshift", "__irshift__"),
MethodSlot(ibinaryfunc, "nb_inplace_and", "__iand__"),
MethodSlot(ibinaryfunc, "nb_inplace_xor", "__ixor__"),
MethodSlot(ibinaryfunc, "nb_inplace_or", "__ior__"),
# Added in release 2.2
# The following require the Py_TPFLAGS_HAVE_CLASS flag
MethodSlot(binaryfunc, "nb_floor_divide", "__floordiv__"),
MethodSlot(binaryfunc, "nb_true_divide", "__truediv__"),
MethodSlot(ibinaryfunc, "nb_inplace_floor_divide", "__ifloordiv__"),
MethodSlot(ibinaryfunc, "nb_inplace_true_divide", "__itruediv__"),
# Added in release 2.5
MethodSlot(unaryfunc, "nb_index", "__index__", ifdef = "PY_VERSION_HEX >= 0x02050000")
)
PySequenceMethods = (
MethodSlot(lenfunc, "sq_length", "__len__"),
EmptySlot("sq_concat"), # nb_add used instead
EmptySlot("sq_repeat"), # nb_multiply used instead
SyntheticSlot("sq_item", ["__getitem__"], "0"), #EmptySlot("sq_item"), # mp_subscript used instead
MethodSlot(ssizessizeargfunc, "sq_slice", "__getslice__"),
EmptySlot("sq_ass_item"), # mp_ass_subscript used instead
SyntheticSlot("sq_ass_slice", ["__setslice__", "__delslice__"], "0"),
MethodSlot(cmpfunc, "sq_contains", "__contains__"),
EmptySlot("sq_inplace_concat"), # nb_inplace_add used instead
EmptySlot("sq_inplace_repeat"), # nb_inplace_multiply used instead
)
PyMappingMethods = (
MethodSlot(lenfunc, "mp_length", "__len__"),
MethodSlot(objargfunc, "mp_subscript", "__getitem__"),
SyntheticSlot("mp_ass_subscript", ["__setitem__", "__delitem__"], "0"),
)
PyBufferProcs = (
MethodSlot(readbufferproc, "bf_getreadbuffer", "__getreadbuffer__", py3 = False),
MethodSlot(writebufferproc, "bf_getwritebuffer", "__getwritebuffer__", py3 = False),
MethodSlot(segcountproc, "bf_getsegcount", "__getsegcount__", py3 = False),
MethodSlot(charbufferproc, "bf_getcharbuffer", "__getcharbuffer__", py3 = False),
MethodSlot(getbufferproc, "bf_getbuffer", "__getbuffer__", ifdef = "PY_VERSION_HEX >= 0x02060000"),
MethodSlot(releasebufferproc, "bf_releasebuffer", "__releasebuffer__", ifdef = "PY_VERSION_HEX >= 0x02060000")
)
#------------------------------------------------------------------------------------------
#
# The main slot table. This table contains descriptors for all the
# top-level type slots, beginning with tp_dealloc, in the order they
# appear in the type object.
#
#------------------------------------------------------------------------------------------
slot_table = (
ConstructorSlot("tp_dealloc", '__dealloc__'),
EmptySlot("tp_print"), #MethodSlot(printfunc, "tp_print", "__print__"),
EmptySlot("tp_getattr"),
EmptySlot("tp_setattr"),
MethodSlot(cmpfunc, "tp_compare", "__cmp__", py3 = '<RESERVED>'),
MethodSlot(reprfunc, "tp_repr", "__repr__"),
SuiteSlot(PyNumberMethods, "PyNumberMethods", "tp_as_number"),
SuiteSlot(PySequenceMethods, "PySequenceMethods", "tp_as_sequence"),
SuiteSlot(PyMappingMethods, "PyMappingMethods", "tp_as_mapping"),
MethodSlot(hashfunc, "tp_hash", "__hash__", inherited=False), # Py3 checks for __richcmp__
MethodSlot(callfunc, "tp_call", "__call__"),
MethodSlot(reprfunc, "tp_str", "__str__"),
SyntheticSlot("tp_getattro", ["__getattr__","__getattribute__"], "0"), #"PyObject_GenericGetAttr"),
SyntheticSlot("tp_setattro", ["__setattr__", "__delattr__"], "0"), #"PyObject_GenericSetAttr"),
SuiteSlot(PyBufferProcs, "PyBufferProcs", "tp_as_buffer"),
TypeFlagsSlot("tp_flags"),
DocStringSlot("tp_doc"),
GCDependentSlot("tp_traverse"),
GCDependentSlot("tp_clear"),
# Later -- synthesize a method to split into separate ops?
MethodSlot(richcmpfunc, "tp_richcompare", "__richcmp__", inherited=False), # Py3 checks for __hash__
EmptySlot("tp_weaklistoffset"),
MethodSlot(getiterfunc, "tp_iter", "__iter__"),
MethodSlot(iternextfunc, "tp_iternext", "__next__"),
MethodTableSlot("tp_methods"),
MemberTableSlot("tp_members"),
GetSetSlot("tp_getset"),
BaseClassSlot("tp_base"), #EmptySlot("tp_base"),
EmptySlot("tp_dict"),
SyntheticSlot("tp_descr_get", ["__get__"], "0"),
SyntheticSlot("tp_descr_set", ["__set__", "__delete__"], "0"),
EmptySlot("tp_dictoffset"),
MethodSlot(initproc, "tp_init", "__init__"),
EmptySlot("tp_alloc"), #FixedSlot("tp_alloc", "PyType_GenericAlloc"),
InternalMethodSlot("tp_new"),
EmptySlot("tp_free"),
EmptySlot("tp_is_gc"),
EmptySlot("tp_bases"),
EmptySlot("tp_mro"),
EmptySlot("tp_cache"),
EmptySlot("tp_subclasses"),
EmptySlot("tp_weaklist"),
EmptySlot("tp_del"),
EmptySlot("tp_version_tag", ifdef="PY_VERSION_HEX >= 0x02060000"),
EmptySlot("tp_finalize", ifdef="PY_VERSION_HEX >= 0x030400a1 && defined(Py_TPFLAGS_HAVE_FINALIZE)"),
)
#------------------------------------------------------------------------------------------
#
# Descriptors for special methods which don't appear directly
# in the type object or its substructures. These methods are
# called from slot functions synthesized by Cython.
#
#------------------------------------------------------------------------------------------
MethodSlot(initproc, "", "__cinit__")
MethodSlot(destructor, "", "__dealloc__")
MethodSlot(objobjargproc, "", "__setitem__")
MethodSlot(objargproc, "", "__delitem__")
MethodSlot(ssizessizeobjargproc, "", "__setslice__")
MethodSlot(ssizessizeargproc, "", "__delslice__")
MethodSlot(getattrofunc, "", "__getattr__")
MethodSlot(setattrofunc, "", "__setattr__")
MethodSlot(delattrofunc, "", "__delattr__")
MethodSlot(descrgetfunc, "", "__get__")
MethodSlot(descrsetfunc, "", "__set__")
MethodSlot(descrdelfunc, "", "__delete__")
# Method flags for python-exposed methods.
method_noargs = "METH_NOARGS"
method_onearg = "METH_O"
method_varargs = "METH_VARARGS"
method_keywords = "METH_KEYWORDS"
method_coexist = "METH_COEXIST"
|
{
"content_hash": "0da973c3dc7377217ea3956ee9f03613",
"timestamp": "",
"source": "github",
"line_count": 822,
"max_line_length": 133,
"avg_line_length": 40.77250608272506,
"alnum_prop": 0.5716246456810383,
"repo_name": "genome-vendor/cython",
"id": "a66a1d33488a939243c70985366ec96de9a440aa",
"size": "33515",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "Cython/Compiler/TypeSlots.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "10912001"
},
{
"name": "C++",
"bytes": "13793"
},
{
"name": "Emacs Lisp",
"bytes": "2482"
},
{
"name": "Python",
"bytes": "7313301"
},
{
"name": "Shell",
"bytes": "256"
},
{
"name": "Smalltalk",
"bytes": "618"
}
],
"symlink_target": ""
}
|
"""Train and evaluate the model."""
import logging
import tensorflow as tf
import tensorflow_transform as tft
from tfx_taxifare_tips.model_training import model_input
from tfx_taxifare_tips.model_training import model
def train(
data_accessor,
train_data_dir,
eval_data_dir,
tft_output_dir,
log_dir,
hyperparameters,
):
"""
Args:
data_accessor:
train_data_dir:
eval_data_dir:
tft_output_dir:
log_dir:
hyperparameters:
Returns:
classifer:
"""
logging.info("Loading tft output from %s", tft_output_dir)
tft_output = tft.TFTransformOutput(tft_output_dir)
schema = tft_output.transformed_metadata.schema
train_dataset = model_input.get_dataset(
file_pattern=train_data_dir,
data_accessor=data_accessor,
schema=schema,
batch_size=hyperparameters["batch_size"],
)
eval_dataset = model_input.get_dataset(
file_pattern=eval_data_dir,
data_accessor=data_accessor,
schema=schema,
batch_size=hyperparameters["batch_size"],
)
classifier = model.build_binary_classifier(
hyperparameters=hyperparameters, tft_output=tft_output
)
optimizer = tf.keras.optimizers.Adam(learning_rate=hyperparameters["learning_rate"])
loss = tf.keras.losses.BinaryCrossentropy(from_logits=True)
metrics = [tf.keras.metrics.BinaryAccuracy(name="accuracy")]
classifier.compile(optimizer=optimizer, loss=loss, metrics=metrics)
classifier.summary(print_fn=logging.info)
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)
early_stopping_callback = tf.keras.callbacks.EarlyStopping(
monitor="val_loss", patience=2, restore_best_weights=True
)
logging.info("Model training started...")
classifier.fit(
train_dataset,
epochs=hyperparameters["num_epochs"],
validation_data=eval_dataset,
callbacks=[tensorboard_callback, early_stopping_callback],
)
logging.info("Model training completed.")
return classifier
def evaluate(classifier, data_accessor, eval_data_dir, tft_output_dir, hyperparameters):
"""
Args:
classifier:
data_accessor:
eval_data_dir:
tft_output_dir:
hyperparameters:
Returns:
evaluation_metrics:
"""
logging.info("Loading tft output from %s", tft_output_dir)
tft_output = tft.TFTransformOutput(tft_output_dir)
schema = tft_output.transformed_metadata.schema
logging.info("Model evaluation started...")
eval_dataset = model_input.get_dataset(
file_pattern=eval_data_dir,
data_accessor=data_accessor,
schema=schema,
batch_size=hyperparameters["batch_size"],
)
evaluation_metrics = classifier.evaluate(eval_dataset)
logging.info("Model evaluation completed.")
return evaluation_metrics
|
{
"content_hash": "3b5cb479f755df5362cd2c17eff24a26",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 88,
"avg_line_length": 27.83653846153846,
"alnum_prop": 0.6766839378238342,
"repo_name": "GoogleCloudPlatform/training-data-analyst",
"id": "41a8363756350617a90825c5f29e0b9e8e457810",
"size": "2895",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "self-paced-labs/vertex-ai/vertex-pipelines/tfx/tfx_taxifare_tips/model_training/model_trainer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "39536"
},
{
"name": "C#",
"bytes": "23445"
},
{
"name": "C++",
"bytes": "30926"
},
{
"name": "CSS",
"bytes": "53087"
},
{
"name": "Dockerfile",
"bytes": "90856"
},
{
"name": "Go",
"bytes": "93755"
},
{
"name": "HCL",
"bytes": "73891"
},
{
"name": "HTML",
"bytes": "2342167"
},
{
"name": "Java",
"bytes": "2441030"
},
{
"name": "JavaScript",
"bytes": "3957504"
},
{
"name": "Jinja",
"bytes": "257585"
},
{
"name": "Jsonnet",
"bytes": "5696"
},
{
"name": "Jupyter Notebook",
"bytes": "242016061"
},
{
"name": "Makefile",
"bytes": "12642"
},
{
"name": "PigLatin",
"bytes": "11558"
},
{
"name": "Pug",
"bytes": "457977"
},
{
"name": "Python",
"bytes": "18543833"
},
{
"name": "R",
"bytes": "68"
},
{
"name": "Scala",
"bytes": "27161"
},
{
"name": "Shell",
"bytes": "763259"
},
{
"name": "TypeScript",
"bytes": "66858"
}
],
"symlink_target": ""
}
|
import frappe
from frappe.defaults import *
from frappe.tests.utils import FrappeTestCase
class TestDefaults(FrappeTestCase):
def test_global(self):
clear_user_default("key1")
set_global_default("key1", "value1")
self.assertEqual(get_global_default("key1"), "value1")
set_global_default("key1", "value2")
self.assertEqual(get_global_default("key1"), "value2")
add_global_default("key1", "value3")
self.assertEqual(get_global_default("key1"), "value2")
self.assertEqual(get_defaults()["key1"], ["value2", "value3"])
self.assertEqual(get_user_default_as_list("key1"), ["value2", "value3"])
def test_user(self):
set_user_default("key1", "2value1")
self.assertEqual(get_user_default_as_list("key1"), ["2value1"])
set_user_default("key1", "2value2")
self.assertEqual(get_user_default("key1"), "2value2")
add_user_default("key1", "3value3")
self.assertEqual(get_user_default("key1"), "2value2")
self.assertEqual(get_user_default_as_list("key1"), ["2value2", "3value3"])
def test_global_if_not_user(self):
set_global_default("key4", "value4")
self.assertEqual(get_user_default("key4"), "value4")
def test_clear(self):
set_user_default("key5", "value5")
self.assertEqual(get_user_default("key5"), "value5")
clear_user_default("key5")
self.assertEqual(get_user_default("key5"), None)
def test_clear_global(self):
set_global_default("key6", "value6")
self.assertEqual(get_user_default("key6"), "value6")
clear_default("key6", value="value6")
self.assertEqual(get_user_default("key6"), None)
def test_user_permission_on_defaults(self):
self.assertEqual(get_global_default("language"), "en")
self.assertEqual(get_user_default("language"), "en")
self.assertEqual(get_user_default_as_list("language"), ["en"])
old_user = frappe.session.user
user = "test@example.com"
frappe.set_user(user)
perm_doc = frappe.get_doc(
dict(
doctype="User Permission",
user=frappe.session.user,
allow="Language",
for_value="en-GB",
)
).insert(ignore_permissions=True)
self.assertEqual(get_global_default("language"), None)
self.assertEqual(get_user_default("language"), None)
self.assertEqual(get_user_default_as_list("language"), [])
frappe.delete_doc("User Permission", perm_doc.name)
frappe.set_user(old_user)
|
{
"content_hash": "1793b91d4c27fa01104c36d01509b401",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 76,
"avg_line_length": 32.40845070422535,
"alnum_prop": 0.6988265971316818,
"repo_name": "frappe/frappe",
"id": "3c04f16ec85838d3657dde7188929b43aeb0b314",
"size": "2398",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "frappe/tests/test_defaults.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "65093"
},
{
"name": "HTML",
"bytes": "250850"
},
{
"name": "JavaScript",
"bytes": "2523337"
},
{
"name": "Less",
"bytes": "10921"
},
{
"name": "Python",
"bytes": "3618097"
},
{
"name": "SCSS",
"bytes": "261690"
},
{
"name": "Vue",
"bytes": "98456"
}
],
"symlink_target": ""
}
|
"""
Run RSA with py4java distribution and discovery module
:author: Scott Lewis
:copyright: Copyright 2020, Scott Lewis
:license: Apache License 2.0
:version: 1.0.1
..
Copyright 2020 Scott Lewis
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import pelix.framework as pelix
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# Logger
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
# ------- Main constants for the sample
HTTP_HOSTNAME = "127.0.0.1"
HTTP_PORT = 8181
# ------------------------------------------------------------------------------
def main():
# Set the initial bundles
bundles = (
"pelix.ipopo.core",
"pelix.shell.core",
"pelix.shell.ipopo",
"pelix.shell.console",
# RSA implementation
"pelix.rsa.remoteserviceadmin",
# Basic topology manager (opt)
"pelix.rsa.topologymanagers.basic",
# RSA shell commands (opt)
"pelix.rsa.shell",
"pelix.rsa.providers.distribution.py4j",
"samples.rsa.helloconsumer",
# Example helloconsumer. Only uses remote proxies
"samples.rsa.pbhelloconsumer",
)
# Use the utility method to create, run and delete the framework
framework = pelix.create_framework(
bundles, {"ecf.py4j.javaport": 25333, "ecf.py4j.pythonport": 25334}
)
framework.start()
try:
framework.wait_for_stop()
except KeyboardInterrupt:
framework.stop()
if __name__ == "__main__":
main()
|
{
"content_hash": "0cf1bc04ece1d78f809f6da15e140b17",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 80,
"avg_line_length": 28.073170731707318,
"alnum_prop": 0.5925282363162467,
"repo_name": "tcalmant/ipopo",
"id": "98bb589cab0b7443d7eb122d09a530b16baebc01",
"size": "2352",
"binary": false,
"copies": "1",
"ref": "refs/heads/v1",
"path": "samples/run_rsa_py4java.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2183067"
}
],
"symlink_target": ""
}
|
from oslo_serialization import jsonutils
from oslo_utils import versionutils
from nova import db
from nova import exception
from nova.objects import base
from nova.objects import fields as obj_fields
from nova.virt import hardware
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class InstanceNUMACell(base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Add pagesize field
# Version 1.2: Add cpu_pinning_raw and topology fields
# Version 1.3: Add cpu_policy and cpu_thread_policy fields
# Version 1.4: Add cpuset_reserved field
VERSION = '1.4'
def obj_make_compatible(self, primitive, target_version):
super(InstanceNUMACell, self).obj_make_compatible(primitive,
target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 4):
primitive.pop('cpuset_reserved', None)
if target_version < (1, 3):
primitive.pop('cpu_policy', None)
primitive.pop('cpu_thread_policy', None)
fields = {
'id': obj_fields.IntegerField(),
'cpuset': obj_fields.SetOfIntegersField(),
'memory': obj_fields.IntegerField(),
'pagesize': obj_fields.IntegerField(nullable=True),
'cpu_topology': obj_fields.ObjectField('VirtCPUTopology',
nullable=True),
'cpu_pinning_raw': obj_fields.DictOfIntegersField(nullable=True),
'cpu_policy': obj_fields.CPUAllocationPolicyField(nullable=True),
'cpu_thread_policy': obj_fields.CPUThreadAllocationPolicyField(
nullable=True),
# These physical CPUs are reserved for use by the hypervisor
'cpuset_reserved': obj_fields.SetOfIntegersField(nullable=True),
}
cpu_pinning = obj_fields.DictProxyField('cpu_pinning_raw')
def __init__(self, **kwargs):
super(InstanceNUMACell, self).__init__(**kwargs)
if 'pagesize' not in kwargs:
self.pagesize = None
self.obj_reset_changes(['pagesize'])
if 'cpu_pinning' not in kwargs:
self.cpu_pinning = None
self.obj_reset_changes(['cpu_pinning_raw'])
if 'cpu_policy' not in kwargs:
self.cpu_policy = None
self.obj_reset_changes(['cpu_policy'])
if 'cpu_thread_policy' not in kwargs:
self.cpu_thread_policy = None
self.obj_reset_changes(['cpu_thread_policy'])
if 'cpuset_reserved' not in kwargs:
self.cpuset_reserved = None
self.obj_reset_changes(['cpuset_reserved'])
def __len__(self):
return len(self.cpuset)
def _to_dict(self):
# NOTE(sahid): Used as legacy, could be renamed in
# _legacy_to_dict_ to the future to avoid confusing.
return {'cpus': hardware.format_cpu_spec(self.cpuset,
allow_ranges=False),
'mem': {'total': self.memory},
'id': self.id,
'pagesize': self.pagesize}
@classmethod
def _from_dict(cls, data_dict):
# NOTE(sahid): Used as legacy, could be renamed in
# _legacy_from_dict_ to the future to avoid confusing.
cpuset = hardware.parse_cpu_spec(data_dict.get('cpus', ''))
memory = data_dict.get('mem', {}).get('total', 0)
cell_id = data_dict.get('id')
pagesize = data_dict.get('pagesize')
return cls(id=cell_id, cpuset=cpuset,
memory=memory, pagesize=pagesize)
@property
def siblings(self):
cpu_list = sorted(list(self.cpuset))
threads = 0
if ('cpu_topology' in self) and self.cpu_topology:
threads = self.cpu_topology.threads
if threads == 1:
threads = 0
return list(map(set, zip(*[iter(cpu_list)] * threads)))
@property
def cpu_pinning_requested(self):
return self.cpu_policy == obj_fields.CPUAllocationPolicy.DEDICATED
def pin(self, vcpu, pcpu):
if vcpu not in self.cpuset:
return
pinning_dict = self.cpu_pinning or {}
pinning_dict[vcpu] = pcpu
self.cpu_pinning = pinning_dict
def pin_vcpus(self, *cpu_pairs):
for vcpu, pcpu in cpu_pairs:
self.pin(vcpu, pcpu)
def clear_host_pinning(self):
"""Clear any data related to how this cell is pinned to the host.
Needed for aborting claims as we do not want to keep stale data around.
"""
self.id = -1
self.cpu_pinning = {}
return self
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class InstanceNUMATopology(base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Takes into account pagesize
# Version 1.2: InstanceNUMACell 1.2
# Version 1.3: Add emulator threads policy
VERSION = '1.3'
def obj_make_compatible(self, primitive, target_version):
super(InstanceNUMATopology, self).obj_make_compatible(primitive,
target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 3):
primitive.pop('emulator_threads_policy', None)
fields = {
# NOTE(danms): The 'id' field is no longer used and should be
# removed in the future when convenient
'id': obj_fields.IntegerField(),
'instance_uuid': obj_fields.UUIDField(),
'cells': obj_fields.ListOfObjectsField('InstanceNUMACell'),
'emulator_threads_policy': (
obj_fields.CPUEmulatorThreadsPolicyField(nullable=True)),
}
@classmethod
def obj_from_primitive(cls, primitive, context=None):
if 'nova_object.name' in primitive:
obj_topology = super(InstanceNUMATopology, cls).obj_from_primitive(
primitive, context=None)
else:
# NOTE(sahid): This compatibility code needs to stay until we can
# guarantee that there are no cases of the old format stored in
# the database (or forever, if we can never guarantee that).
obj_topology = InstanceNUMATopology._from_dict(primitive)
obj_topology.id = 0
return obj_topology
@classmethod
def obj_from_db_obj(cls, instance_uuid, db_obj):
primitive = jsonutils.loads(db_obj)
obj_topology = cls.obj_from_primitive(primitive)
if 'nova_object.name' not in db_obj:
obj_topology.instance_uuid = instance_uuid
# No benefit to store a list of changed fields
obj_topology.obj_reset_changes()
return obj_topology
# TODO(ndipanov) Remove this method on the major version bump to 2.0
@base.remotable
def create(self):
values = {'numa_topology': self._to_json()}
db.instance_extra_update_by_uuid(self._context, self.instance_uuid,
values)
self.obj_reset_changes()
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid):
db_extra = db.instance_extra_get_by_instance_uuid(
context, instance_uuid, columns=['numa_topology'])
if not db_extra:
raise exception.NumaTopologyNotFound(instance_uuid=instance_uuid)
if db_extra['numa_topology'] is None:
return None
return cls.obj_from_db_obj(instance_uuid, db_extra['numa_topology'])
def _to_json(self):
return jsonutils.dumps(self.obj_to_primitive())
def __len__(self):
"""Defined so that boolean testing works the same as for lists."""
return len(self.cells)
def _to_dict(self):
# NOTE(sahid): Used as legacy, could be renamed in _legacy_to_dict_
# in the future to avoid confusing.
return {'cells': [cell._to_dict() for cell in self.cells]}
@classmethod
def _from_dict(cls, data_dict):
# NOTE(sahid): Used as legacy, could be renamed in _legacy_from_dict_
# in the future to avoid confusing.
return cls(cells=[
InstanceNUMACell._from_dict(cell_dict)
for cell_dict in data_dict.get('cells', [])])
@property
def cpu_pinning_requested(self):
return all(cell.cpu_pinning_requested for cell in self.cells)
def clear_host_pinning(self):
"""Clear any data related to how instance is pinned to the host.
Needed for aborting claims as we do not want to keep stale data around.
"""
for cell in self.cells:
cell.clear_host_pinning()
return self
@property
def emulator_threads_isolated(self):
"""Determines whether emulator threads should be isolated"""
return (self.obj_attr_is_set('emulator_threads_policy')
and (self.emulator_threads_policy
== obj_fields.CPUEmulatorThreadsPolicy.ISOLATE))
|
{
"content_hash": "f34ddaa9cf45fcd588f5f79f71e0a5c6",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 79,
"avg_line_length": 38.39495798319328,
"alnum_prop": 0.6116217990807616,
"repo_name": "jianghuaw/nova",
"id": "3872d69945416527614a7a7897ed33b072869759",
"size": "9746",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nova/objects/instance_numa_topology.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1435"
},
{
"name": "PHP",
"bytes": "32515"
},
{
"name": "Python",
"bytes": "19932348"
},
{
"name": "Shell",
"bytes": "28290"
},
{
"name": "Smarty",
"bytes": "339635"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from mords_api.models import Word, Book, Learner, Note, Entry, LearningWord
admin.site.register(Word)
admin.site.register(Book)
admin.site.register(Learner)
admin.site.register(Note)
admin.site.register(Entry)
admin.site.register(LearningWord)
|
{
"content_hash": "ae55f34797cbf6cffd6d757284ccf6be",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 75,
"avg_line_length": 27.9,
"alnum_prop": 0.8100358422939068,
"repo_name": "cheng10/M-ords",
"id": "e0327fb729273831281cd1676e9a702cc10dbf75",
"size": "279",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mords_backend/mords/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "13520"
},
{
"name": "HTML",
"bytes": "62829"
},
{
"name": "JavaScript",
"bytes": "46511"
},
{
"name": "Python",
"bytes": "61695"
},
{
"name": "Shell",
"bytes": "5245"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from doit.task import clean_targets
def simple():
print("ok")
def task_poo():
return {
'actions': ['touch poo'],
'targets': ['poo'],
'clean': [clean_targets, simple],
}
|
{
"content_hash": "616753e7291b9a5c286a1d0f3ccc5bf4",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 41,
"avg_line_length": 20.75,
"alnum_prop": 0.5582329317269076,
"repo_name": "gh0std4ncer/doit",
"id": "d5317d5bb504d5960156824ec0a9f729f47c4e95",
"size": "249",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "doc/tutorial/clean_mix.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "491553"
}
],
"symlink_target": ""
}
|
import sublime
import sublime_plugin
import fnmatch
import os
import shutil
import sys
import threading
import time
try:
from . import tools
except ValueError:
from package_sync_helpers import tools
class Queue(object):
current = None
pool = []
def __init__(self):
pass
def start(self):
# Clear old thread
if self.current and self.current["thread"].is_alive():
sublime.set_timeout(lambda: self.start(), 500)
else:
# Reset current thread, since it ended
self.current = None
# Check for elements in pool
if self.pool:
self.current = self.pool.pop(0)
self.current["thread"].start()
# Attemp a new start of the thread
sublime.set_timeout(lambda: self.start(), 500)
def has(self, key):
pool = self.pool + [self.current] if self.current else []
return any([item for item in pool if item["key"] == key])
def add(self, thread, key=None):
self.pool += [{"key": key if key else thread.name, "thread": thread}]
self.start()
class Sync(threading.Thread):
def __init__(self, mode=["pull", "push"], override=False, item=None):
psync_settings = tools.get_psync_settings()
self.psync_settings = psync_settings
self.mode = mode
self.item = item
self.override = override
threading.Thread.__init__(self)
def run(self):
sync_interval = self.psync_settings.get("online_sync_interval", 1)
# Stop watcher and wait for the poll
tools.pause_watcher(
local="pull" in self.mode, remote="push" in self.mode)
# If no item pull and push all
if not self.item:
tools.log("PackageSync: Complete sync started.", force=True)
# Fetch all items from the remote location
if "pull" in self.mode:
self.pull_all()
# Push all items to the remote location
if "push" in self.mode:
self.push_all()
tools.log("PackageSync: Complete sync done.", force=True)
else:
# Pull the selected item
if "pull" in self.mode:
self.pull(self.item)
# Push the selected item
if "push" in self.mode:
self.push(self.item)
# Restart watcher again
tools.pause_watcher(
False, local="pull" in self.mode, remote="push" in self.mode)
def find_files(self, path):
tools.log("PackageSync: find_files started for %s" % path)
include_files = self.psync_settings["include_files"]
ignore_files = self.psync_settings["ignore_files"]
ignore_dirs = self.psync_settings["ignore_dirs"]
# tools.log("PackageSync: path %s" % path)
# tools.log("PackageSync: include_files %s" % include_files)
# tools.log("PackageSync: ignore_files %s" % ignore_files)
# tools.log("PackageSync: ignore_dirs %s" % ignore_dirs)
resources = {}
for root, dirs, files in os.walk(path):
[dirs.remove(dir)
for dir in dirs if dir in ignore_dirs]
for file in files:
absolute_path = os.path.join(root, file)
relative_path = os.path.relpath(absolute_path, path)
include_matches = [
fnmatch.fnmatch(relative_path, p) for p in include_files]
ignore_matches = [
fnmatch.fnmatch(relative_path, p) for p in ignore_files]
if any(ignore_matches) or not any(include_matches):
continue
resources[relative_path] = {"version": os.path.getmtime(
absolute_path), "path": absolute_path, "dir": os.path.dirname(relative_path)}
return resources
def pull_all(self):
tools.log("PackageSync: pull_all started with override = %s" %
self.override)
local_dir = os.path.join(sublime.packages_path(), "User")
remote_dir = self.psync_settings["online_sync_folder"]
local_data = self.find_files(local_dir)
remote_data = self.find_files(remote_dir)
# Get data of last sync
last_run_data = tools.load_last_run_data()
last_run_data_local = last_run_data.get("last_run_data_local", {})
last_run_data_remote = last_run_data.get("last_run_data_remote", {})
deleted_local_data = [
key for key in last_run_data_local if key not in local_data]
deleted_remote_data = [
key for key in last_run_data_remote if key not in remote_data]
# tools.log("PackageSync: local_data: %s" % local_data)
# tools.log("PackageSync: remote_data: %s" % remote_data)
# tools.log("PackageSync: deleted_local_data: %s" % deleted_local_data)
# tools.log("PackageSync: deleted_remote_data: %s" % deleted_remote_data)
diff = [{"type": "d", "key": key}
for key in last_run_data_remote if key not in remote_data]
for key, value in remote_data.items():
if key in deleted_local_data:
pass
elif key not in local_data:
diff += [dict({"type": "c", "key": key}, **value)]
elif int(value["version"]) > int(local_data[key]["version"]) or self.override:
diff += [dict({"type": "m", "key": key}, **value)]
for item in diff:
self.pull(item)
# Set data for next last sync
tools.save_last_run_data(
last_run_data_local=self.find_files(local_dir),
last_run_data_remote=self.find_files(remote_dir))
def pull(self, item):
tools.log("PackageSync: pull started for %s" % item)
local_dir = os.path.join(sublime.packages_path(), "User")
remote_dir = self.psync_settings.get("sync_folder")
# Get data of last sync
last_run_data = tools.load_last_run_data()
last_run_data_local = last_run_data.get("last_run_data_local", {})
last_run_data_remote = last_run_data.get("last_run_data_remote", {})
# Make target file path and directory
target = os.path.join(local_dir, item["key"])
target_dir = os.path.dirname(target)
# TODO -- Added for error mitigation but theoretically this was not needed
# Verify why the error is occuring for these variables
try:
previous_installed_packages
installed_packages
except NameError:
previous_installed_packages = []
installed_packages = []
# Skip if file was just pushed
try:
if item["type"] == "c" or item["type"] == "m":
# Check for an updated Package Control setting file and backup
# old file
if item["key"] == "Package Control.sublime-settings":
previous_installed_packages = tools.load_installed_packages(
target)
installed_packages = tools.load_installed_packages(
item["path"])
# Check if the watcher detects a file again
if last_run_data_local[item["key"]]["version"] == item["version"]:
# tools.log("PackageSync: Already pulled")
return
except:
pass
# If a file was created
if item["type"] == "c":
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
shutil.copy2(item["path"], target)
tools.log("PackageSync: Created %s" % target)
#
last_run_data_local[item["key"]] = {
"path": target, "dir": item["dir"], "version": item["version"]}
last_run_data_remote[item["key"]] = {
"path": item["path"], "dir": item["dir"], "version": item["version"]}
# If a file was delated
elif item["type"] == "d":
if os.path.isfile(target):
os.remove(target)
tools.log("PackageSync: Deleted %s" % target)
try:
del last_run_data_local[item["key"]]
del last_run_data_remote[item["key"]]
except:
pass
# Check if directory is empty and remove it if, just cosmetic issue
if os.path.isdir(target_dir) and not os.listdir(target_dir):
os.rmdir(target_dir)
# If a file was modified
elif item["type"] == "m":
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
shutil.copy2(item["path"], target)
tools.log("PackageSync: Updated %s" % target)
#
last_run_data_local[item["key"]] = {
"path": target, "dir": item["dir"], "version": item["version"]}
last_run_data_remote[item["key"]] = {
"path": item["path"], "dir": item["dir"], "version": item["version"]}
# Set data for next last sync
tools.save_last_run_data(
last_run_data_local=last_run_data_local,
last_run_data_remote=last_run_data_remote)
if item["type"] != "d" and item["key"] == "Package Control.sublime-settings":
# Handle Package Control
self.pull_package_control(
last_run_data, previous_installed_packages, installed_packages)
def pull_package_control(self, last_run_data, previous_installed_packages, installed_packages):
# Save items to remove
to_install = [
item for item in installed_packages if item not in previous_installed_packages]
to_remove = [
item for item in previous_installed_packages if item not in installed_packages]
tools.log("PackageSync: install: %s", to_install)
tools.log("PackageSync: remove: %s", to_remove)
# Check for old remove_packages
packages_to_remove = last_run_data.get("packages_to_remove", [])
packages_to_remove += [item for item in to_remove if item !=
"Package Control" and item not in packages_to_remove]
tools.log("PackageSync: packages_to_remove %s", packages_to_remove)
if packages_to_remove:
removed_packages = tools.remove_packages(packages_to_remove)
else:
removed_packages = []
# Check if new packages are available and run package cleanup to
# install missing packages
if to_install:
sublime.set_timeout(tools.install_new_packages(), 1000)
tools.save_last_run_data(
packages_to_remove=[item for item in packages_to_remove if item not in removed_packages])
def push_all(self):
tools.log("PackageSync: push_all started with override = %s" %
self.override)
local_dir = os.path.join(sublime.packages_path(), "User")
remote_dir = self.psync_settings.get("online_sync_folder")
local_data = self.find_files(local_dir)
remote_data = self.find_files(remote_dir)
# Get data of last sync
last_run_data = tools.load_last_run_data()
last_run_data_local = last_run_data.get("last_run_data_local", {})
last_run_data_remote = last_run_data.get("last_run_data_remote", {})
deleted_local_data = [
key for key in last_run_data_local if key not in local_data]
deleted_remote_data = [
key for key in last_run_data_remote if key not in remote_data]
# tools.log("PackageSync: local_data: %s" % local_data)
# tools.log("PackageSync: remote_data: %s" % remote_data)
# tools.log("PackageSync: deleted_local_data: %s" % deleted_local_data)
# tools.log("PackageSync: deleted_remote_data: %s" % deleted_remote_data)
diff = [{"type": "d", "key": key}
for key in last_run_data_local if key not in local_data]
for key, value in local_data.items():
if key in deleted_remote_data:
pass
elif key not in remote_data:
diff += [dict({"type": "c", "key": key}, **value)]
elif int(value["version"]) > int(remote_data[key]["version"]) or self.override:
diff += [dict({"type": "m", "key": key}, **value)]
for item in diff:
self.push(item)
# Set data for next last sync
tools.save_last_run_data(
last_run_data_local=self.find_files(local_dir),
last_run_data_remote=self.find_files(remote_dir))
def push(self, item):
tools.log("PackageSync: push started for %s" % item)
local_dir = os.path.join(sublime.packages_path(), "User")
remote_dir = self.psync_settings.get("online_sync_folder")
# Get data of last sync
last_run_data = tools.load_last_run_data()
last_run_data_local = last_run_data.get("last_run_data_local", {})
last_run_data_remote = last_run_data.get("last_run_data_remote", {})
# Skip if file was just copied
try:
if item["type"] == "c" or item["type"] == "m":
if last_run_data_remote[item["key"]]["version"] == item["version"]:
tools.log("PackageSync: Already pushed")
return
except:
pass
# Make target file path and dir
target = os.path.join(remote_dir, item["key"])
target_dir = os.path.dirname(target)
if item["type"] == "c":
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
shutil.copy2(item["path"], target)
tools.log("PackageSync: Created %s" % target)
#
last_run_data_local[item["key"]] = {
"path": item["path"], "dir": item["dir"], "version": item["version"]}
last_run_data_remote[item["key"]] = {
"path": target, "dir": item["dir"], "version": item["version"]}
elif item["type"] == "d":
if os.path.isfile(target):
os.remove(target)
tools.log("PackageSync: Deleted %s" % target)
try:
del last_run_data_local[item["key"]]
del last_run_data_remote[item["key"]]
except:
pass
# Check if dir is empty and remove it if
if os.path.isdir(target_dir) and not os.listdir(target_dir):
os.rmdir(target_dir)
elif item["type"] == "m":
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
shutil.copy2(item["path"], target)
tools.log("PackageSync: Updated %s" % target)
#
last_run_data_local[item["key"]] = {
"path": item["path"], "dir": item["dir"], "version": item["version"]}
last_run_data_remote[item["key"]] = {
"path": target, "dir": item["dir"], "version": item["version"]}
# Set data for next last sync
tools.save_last_run_data(
last_run_data_local=last_run_data_local,
last_run_data_remote=last_run_data_remote)
|
{
"content_hash": "08a995ca948e1abf09ca7ead1c9e4fef",
"timestamp": "",
"source": "github",
"line_count": 407,
"max_line_length": 101,
"avg_line_length": 37.51105651105651,
"alnum_prop": 0.5545948778410952,
"repo_name": "utkarsh9891/PackageSync",
"id": "0b0d113f25a06318c92825a1cc884b466127133e",
"size": "15267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "package_sync_helpers/online.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62968"
}
],
"symlink_target": ""
}
|
import os
from datetime import date
from unittest import skipUnless
from django.conf import settings
from django.contrib.sitemaps import Sitemap
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured
from django.test import modify_settings, override_settings
from django.utils.formats import localize
from django.utils.translation import activate, deactivate
from .base import SitemapTestsBase
from .models import TestModel
class HTTPSitemapTests(SitemapTestsBase):
use_sitemap_err_msg = (
'To use sitemaps, either enable the sites framework or pass a '
'Site/RequestSite object in your view.'
)
def test_simple_sitemap_index(self):
"A simple sitemap index can be rendered"
response = self.client.get('/simple/index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode(), expected_content)
def test_sitemap_not_callable(self):
"""A sitemap may not be callable."""
response = self.client.get('/simple-not-callable/index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode(), expected_content)
def test_paged_sitemap(self):
"""A sitemap may have multiple pages."""
response = self.client.get('/simple-paged/index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>{0}/simple/sitemap-simple.xml</loc></sitemap><sitemap><loc>{0}/simple/sitemap-simple.xml?p=2</loc></sitemap>
</sitemapindex>
""".format(self.base_url)
self.assertXMLEqual(response.content.decode(), expected_content)
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(os.path.dirname(__file__), 'templates')],
}])
def test_simple_sitemap_custom_index(self):
"A simple sitemap index can be rendered with a custom template"
response = self.client.get('/simple/custom-index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<!-- This is a customised template -->
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode(), expected_content)
def test_simple_sitemap_section(self):
"A simple sitemap section can be rendered"
response = self.client.get('/simple/sitemap-simple.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today())
self.assertXMLEqual(response.content.decode(), expected_content)
def test_no_section(self):
response = self.client.get('/simple/sitemap-simple2.xml')
self.assertEqual(str(response.context['exception']), "No sitemap available for section: 'simple2'")
self.assertEqual(response.status_code, 404)
def test_empty_page(self):
response = self.client.get('/simple/sitemap-simple.xml?p=0')
self.assertEqual(str(response.context['exception']), 'Page 0 empty')
self.assertEqual(response.status_code, 404)
def test_page_not_int(self):
response = self.client.get('/simple/sitemap-simple.xml?p=test')
self.assertEqual(str(response.context['exception']), "No page 'test'")
self.assertEqual(response.status_code, 404)
def test_simple_sitemap(self):
"A simple sitemap can be rendered"
response = self.client.get('/simple/sitemap.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today())
self.assertXMLEqual(response.content.decode(), expected_content)
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(os.path.dirname(__file__), 'templates')],
}])
def test_simple_custom_sitemap(self):
"A simple sitemap can be rendered with a custom template"
response = self.client.get('/simple/custom-sitemap.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<!-- This is a customised template -->
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today())
self.assertXMLEqual(response.content.decode(), expected_content)
def test_sitemap_last_modified(self):
"Last-Modified header is set correctly"
response = self.client.get('/lastmod/sitemap.xml')
self.assertEqual(response['Last-Modified'], 'Wed, 13 Mar 2013 10:00:00 GMT')
def test_sitemap_last_modified_date(self):
"""
The Last-Modified header should be support dates (without time).
"""
response = self.client.get('/lastmod/date-sitemap.xml')
self.assertEqual(response['Last-Modified'], 'Wed, 13 Mar 2013 00:00:00 GMT')
def test_sitemap_last_modified_tz(self):
"""
The Last-Modified header should be converted from timezone aware dates
to GMT.
"""
response = self.client.get('/lastmod/tz-sitemap.xml')
self.assertEqual(response['Last-Modified'], 'Wed, 13 Mar 2013 15:00:00 GMT')
def test_sitemap_last_modified_missing(self):
"Last-Modified header is missing when sitemap has no lastmod"
response = self.client.get('/generic/sitemap.xml')
self.assertFalse(response.has_header('Last-Modified'))
def test_sitemap_last_modified_mixed(self):
"Last-Modified header is omitted when lastmod not on all items"
response = self.client.get('/lastmod-mixed/sitemap.xml')
self.assertFalse(response.has_header('Last-Modified'))
def test_sitemaps_lastmod_mixed_ascending_last_modified_missing(self):
"""
The Last-Modified header is omitted when lastmod isn't found in all
sitemaps. Test sitemaps are sorted by lastmod in ascending order.
"""
response = self.client.get('/lastmod-sitemaps/mixed-ascending.xml')
self.assertFalse(response.has_header('Last-Modified'))
def test_sitemaps_lastmod_mixed_descending_last_modified_missing(self):
"""
The Last-Modified header is omitted when lastmod isn't found in all
sitemaps. Test sitemaps are sorted by lastmod in descending order.
"""
response = self.client.get('/lastmod-sitemaps/mixed-descending.xml')
self.assertFalse(response.has_header('Last-Modified'))
def test_sitemaps_lastmod_ascending(self):
"""
The Last-Modified header is set to the most recent sitemap lastmod.
Test sitemaps are sorted by lastmod in ascending order.
"""
response = self.client.get('/lastmod-sitemaps/ascending.xml')
self.assertEqual(response['Last-Modified'], 'Sat, 20 Apr 2013 05:00:00 GMT')
def test_sitemaps_lastmod_descending(self):
"""
The Last-Modified header is set to the most recent sitemap lastmod.
Test sitemaps are sorted by lastmod in descending order.
"""
response = self.client.get('/lastmod-sitemaps/descending.xml')
self.assertEqual(response['Last-Modified'], 'Sat, 20 Apr 2013 05:00:00 GMT')
@skipUnless(settings.USE_I18N, "Internationalization is not enabled")
@override_settings(USE_L10N=True)
def test_localized_priority(self):
"The priority value should not be localized (Refs #14164)"
activate('fr')
self.assertEqual('0,3', localize(0.3))
# Priorities haven't been rendered in localized format.
response = self.client.get('/simple/sitemap.xml')
self.assertContains(response, '<priority>0.5</priority>')
self.assertContains(response, '<lastmod>%s</lastmod>' % date.today())
deactivate()
@modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'})
def test_requestsite_sitemap(self):
# Hitting the flatpages sitemap without the sites framework installed
# doesn't raise an exception.
response = self.client.get('/simple/sitemap.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>http://testserver/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % date.today()
self.assertXMLEqual(response.content.decode(), expected_content)
def test_sitemap_get_urls_no_site_1(self):
"""
Check we get ImproperlyConfigured if we don't pass a site object to
Sitemap.get_urls and no Site objects exist
"""
Site.objects.all().delete()
with self.assertRaisesMessage(ImproperlyConfigured, self.use_sitemap_err_msg):
Sitemap().get_urls()
@modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'})
def test_sitemap_get_urls_no_site_2(self):
"""
Check we get ImproperlyConfigured when we don't pass a site object to
Sitemap.get_urls if Site objects exists, but the sites framework is not
actually installed.
"""
with self.assertRaisesMessage(ImproperlyConfigured, self.use_sitemap_err_msg):
Sitemap().get_urls()
def test_sitemap_item(self):
"""
Check to make sure that the raw item is included with each
Sitemap.get_url() url result.
"""
test_sitemap = Sitemap()
test_sitemap.items = TestModel.objects.order_by('pk').all
def is_testmodel(url):
return isinstance(url['item'], TestModel)
item_in_url_info = all(map(is_testmodel, test_sitemap.get_urls()))
self.assertTrue(item_in_url_info)
def test_cached_sitemap_index(self):
"""
A cached sitemap index can be rendered (#2713).
"""
response = self.client.get('/cached/index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/cached/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode(), expected_content)
def test_x_robots_sitemap(self):
response = self.client.get('/simple/index.xml')
self.assertEqual(response['X-Robots-Tag'], 'noindex, noodp, noarchive')
response = self.client.get('/simple/sitemap.xml')
self.assertEqual(response['X-Robots-Tag'], 'noindex, noodp, noarchive')
def test_empty_sitemap(self):
response = self.client.get('/empty/sitemap.xml')
self.assertEqual(response.status_code, 200)
@override_settings(LANGUAGES=(('en', 'English'), ('pt', 'Portuguese')))
def test_simple_i18nsitemap_index(self):
"A simple i18n sitemap index can be rendered"
response = self.client.get('/simple/i18n.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>{0}/en/i18n/testmodel/{1}/</loc><changefreq>never</changefreq><priority>0.5</priority></url><url><loc>{0}/pt/i18n/testmodel/{1}/</loc><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""".format(self.base_url, self.i18n_model.pk)
self.assertXMLEqual(response.content.decode(), expected_content)
def test_sitemap_without_entries(self):
response = self.client.get('/sitemap-without-entries/sitemap.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
</urlset>"""
self.assertXMLEqual(response.content.decode(), expected_content)
|
{
"content_hash": "67292ce9abe773a62943be5a7eb52e7e",
"timestamp": "",
"source": "github",
"line_count": 277,
"max_line_length": 204,
"avg_line_length": 45.57761732851986,
"alnum_prop": 0.6707326732673268,
"repo_name": "timgraham/django",
"id": "e757170241d7e27f00141fb8aa4b3095c571ec8e",
"size": "12625",
"binary": false,
"copies": "42",
"ref": "refs/heads/master",
"path": "tests/sitemaps_tests/test_http.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "84974"
},
{
"name": "HTML",
"bytes": "224563"
},
{
"name": "JavaScript",
"bytes": "257097"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "12931531"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
"""
To install MySQL Connector/Python:
shell> python ./setup.py install
"""
from distutils.core import setup
from distutils.command.install import INSTALL_SCHEMES
# Make sure that data files are actually installed in the package directory
for install_scheme in INSTALL_SCHEMES.values():
install_scheme['data'] = install_scheme['purelib']
import setupinfo
try:
from cpyint import metasetupinfo
setupinfo.command_classes.update(metasetupinfo.command_classes)
except (ImportError, AttributeError):
# python-internal not available
pass
setup(
name=setupinfo.name,
version=setupinfo.version,
description=setupinfo.description,
long_description=setupinfo.long_description,
author=setupinfo.author,
author_email=setupinfo.author_email,
license=setupinfo.cpy_gpl_license,
keywords=setupinfo.keywords,
url=setupinfo.url,
download_url=setupinfo.download_url,
package_dir=setupinfo.package_dir,
packages=setupinfo.packages,
classifiers=setupinfo.classifiers,
cmdclass=setupinfo.command_classes,
ext_modules=setupinfo.extensions,
)
|
{
"content_hash": "e512e8178f6021c7dd2fb1f0e4d51dba",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 75,
"avg_line_length": 27.121951219512194,
"alnum_prop": 0.7571942446043165,
"repo_name": "loicbaron/nutrition",
"id": "d83022b8da2a735d0bd877bcb772bff9bec3767d",
"size": "2288",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "import_data/mysql-connector-python-2.1.6/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Blade",
"bytes": "40124"
},
{
"name": "C",
"bytes": "113252"
},
{
"name": "C++",
"bytes": "1447"
},
{
"name": "Dockerfile",
"bytes": "1652"
},
{
"name": "HTML",
"bytes": "1617"
},
{
"name": "JavaScript",
"bytes": "175722"
},
{
"name": "PHP",
"bytes": "21025"
},
{
"name": "Python",
"bytes": "1765772"
},
{
"name": "Shell",
"bytes": "2890"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.